summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorYaron Yogev <yaronyogev@gmail.com>2017-07-27 09:02:54 +0300
committerYaron Yogev <yaronyogev@gmail.com>2017-07-27 14:56:25 +0300
commit7e83d0876ddb84a45e130eeba28bc40ef53c074b (patch)
tree47d76239ae7658d87c66abd142df92709427e7dd
parent378ecbd8947589b9cbb39013a0c2e2aa201e03bd (diff)
Calipso initial release for OPNFV
Change-Id: I7210c244b0c10fa80bfa8c77cb86c9d6ddf8bc88 Signed-off-by: Yaron Yogev <yaronyogev@gmail.com>
-rw-r--r--LICENSE2
-rw-r--r--README.md50
-rw-r--r--app/api/__init__.py10
-rw-r--r--app/api/app.py71
-rw-r--r--app/api/auth/__init__.py10
-rw-r--r--app/api/auth/auth.py71
-rw-r--r--app/api/auth/token.py39
-rw-r--r--app/api/backends/__init__.py10
-rw-r--r--app/api/backends/ldap_access.py89
-rw-r--r--app/api/exceptions/__init__.py10
-rw-r--r--app/api/exceptions/exceptions.py26
-rw-r--r--app/api/middleware/__init__.py10
-rw-r--r--app/api/middleware/authentication.py63
-rw-r--r--app/api/responders/__init__.py10
-rw-r--r--app/api/responders/auth/__init__.py10
-rw-r--r--app/api/responders/auth/tokens.py117
-rw-r--r--app/api/responders/resource/__init__.py10
-rw-r--r--app/api/responders/resource/aggregates.py157
-rw-r--r--app/api/responders/resource/clique_constraints.py67
-rw-r--r--app/api/responders/resource/clique_types.py103
-rw-r--r--app/api/responders/resource/cliques.py73
-rw-r--r--app/api/responders/resource/constants.py30
-rw-r--r--app/api/responders/resource/environment_configs.py381
-rw-r--r--app/api/responders/resource/inventory.py65
-rw-r--r--app/api/responders/resource/links.py76
-rw-r--r--app/api/responders/resource/messages.py78
-rw-r--r--app/api/responders/resource/monitoring_config_templates.py65
-rw-r--r--app/api/responders/resource/scans.py111
-rw-r--r--app/api/responders/resource/scheduled_scans.py113
-rw-r--r--app/api/responders/responder_base.py223
-rwxr-xr-xapp/api/server.py74
-rw-r--r--app/api/validation/__init__.py10
-rw-r--r--app/api/validation/data_validate.py185
-rw-r--r--app/api/validation/regex.py57
-rw-r--r--app/config/events.json58
-rw-r--r--app/config/scanners.json370
-rw-r--r--app/discover/__init__.py10
-rw-r--r--app/discover/clique_finder.py174
-rw-r--r--app/discover/configuration.py70
-rw-r--r--app/discover/event_handler.py45
-rw-r--r--app/discover/event_manager.py265
-rw-r--r--app/discover/events/__init__.py10
-rw-r--r--app/discover/events/event_base.py36
-rw-r--r--app/discover/events/event_delete_base.py60
-rw-r--r--app/discover/events/event_instance_add.py45
-rw-r--r--app/discover/events/event_instance_delete.py18
-rw-r--r--app/discover/events/event_instance_update.py55
-rw-r--r--app/discover/events/event_interface_add.py139
-rw-r--r--app/discover/events/event_interface_delete.py40
-rw-r--r--app/discover/events/event_metadata_parser.py75
-rw-r--r--app/discover/events/event_network_add.py50
-rw-r--r--app/discover/events/event_network_delete.py17
-rw-r--r--app/discover/events/event_network_update.py44
-rw-r--r--app/discover/events/event_port_add.py309
-rw-r--r--app/discover/events/event_port_delete.py80
-rw-r--r--app/discover/events/event_port_update.py38
-rw-r--r--app/discover/events/event_router_add.py123
-rw-r--r--app/discover/events/event_router_delete.py37
-rw-r--r--app/discover/events/event_router_update.py82
-rw-r--r--app/discover/events/event_subnet_add.py154
-rw-r--r--app/discover/events/event_subnet_delete.py57
-rw-r--r--app/discover/events/event_subnet_update.py102
-rw-r--r--app/discover/events/listeners/__init__.py10
-rwxr-xr-xapp/discover/events/listeners/default_listener.py314
-rw-r--r--app/discover/events/listeners/listener_base.py18
-rw-r--r--app/discover/fetch_host_object_types.py37
-rw-r--r--app/discover/fetch_region_object_types.py37
-rw-r--r--app/discover/fetcher.py35
-rw-r--r--app/discover/fetcher_new.py30
-rw-r--r--app/discover/fetchers/__init__.py9
-rw-r--r--app/discover/fetchers/aci/__init__.py9
-rw-r--r--app/discover/fetchers/aci/aci_access.py200
-rw-r--r--app/discover/fetchers/aci/aci_fetch_switch_pnic.py91
-rw-r--r--app/discover/fetchers/api/__init__.py9
-rw-r--r--app/discover/fetchers/api/api_access.py195
-rw-r--r--app/discover/fetchers/api/api_fetch_availability_zones.py56
-rw-r--r--app/discover/fetchers/api/api_fetch_end_points.py35
-rw-r--r--app/discover/fetchers/api/api_fetch_host_instances.py59
-rw-r--r--app/discover/fetchers/api/api_fetch_network.py76
-rw-r--r--app/discover/fetchers/api/api_fetch_networks.py86
-rw-r--r--app/discover/fetchers/api/api_fetch_port.py60
-rw-r--r--app/discover/fetchers/api/api_fetch_ports.py55
-rw-r--r--app/discover/fetchers/api/api_fetch_project_hosts.py144
-rw-r--r--app/discover/fetchers/api/api_fetch_projects.py66
-rw-r--r--app/discover/fetchers/api/api_fetch_regions.py51
-rw-r--r--app/discover/fetchers/cli/__init__.py9
-rw-r--r--app/discover/fetchers/cli/cli_access.py206
-rw-r--r--app/discover/fetchers/cli/cli_fetch_host_pnics.py122
-rw-r--r--app/discover/fetchers/cli/cli_fetch_host_pnics_vpp.py44
-rw-r--r--app/discover/fetchers/cli/cli_fetch_host_vservice.py80
-rw-r--r--app/discover/fetchers/cli/cli_fetch_host_vservices.py27
-rw-r--r--app/discover/fetchers/cli/cli_fetch_instance_vnics.py22
-rw-r--r--app/discover/fetchers/cli/cli_fetch_instance_vnics_base.py68
-rw-r--r--app/discover/fetchers/cli/cli_fetch_instance_vnics_vpp.py18
-rw-r--r--app/discover/fetchers/cli/cli_fetch_oteps_lxb.py86
-rw-r--r--app/discover/fetchers/cli/cli_fetch_vconnectors.py40
-rw-r--r--app/discover/fetchers/cli/cli_fetch_vconnectors_lxb.py35
-rw-r--r--app/discover/fetchers/cli/cli_fetch_vconnectors_ovs.py56
-rw-r--r--app/discover/fetchers/cli/cli_fetch_vconnectors_vpp.py64
-rw-r--r--app/discover/fetchers/cli/cli_fetch_vpp_vedges.py58
-rw-r--r--app/discover/fetchers/cli/cli_fetch_vservice_vnics.py140
-rw-r--r--app/discover/fetchers/db/__init__.py9
-rw-r--r--app/discover/fetchers/db/db_access.py142
-rw-r--r--app/discover/fetchers/db/db_fetch_aggregate_hosts.py36
-rw-r--r--app/discover/fetchers/db/db_fetch_aggregates.py21
-rw-r--r--app/discover/fetchers/db/db_fetch_availability_zones.py22
-rw-r--r--app/discover/fetchers/db/db_fetch_az_network_hosts.py31
-rw-r--r--app/discover/fetchers/db/db_fetch_host_instances.py15
-rw-r--r--app/discover/fetchers/db/db_fetch_host_network_agents.py35
-rw-r--r--app/discover/fetchers/db/db_fetch_instances.py60
-rw-r--r--app/discover/fetchers/db/db_fetch_oteps.py81
-rw-r--r--app/discover/fetchers/db/db_fetch_port.py34
-rw-r--r--app/discover/fetchers/db/db_fetch_vedges_ovs.py178
-rw-r--r--app/discover/fetchers/db/db_fetch_vedges_vpp.py56
-rw-r--r--app/discover/fetchers/folder_fetcher.py36
-rw-r--r--app/discover/find_links.py30
-rw-r--r--app/discover/find_links_for_instance_vnics.py59
-rw-r--r--app/discover/find_links_for_oteps.py85
-rw-r--r--app/discover/find_links_for_pnics.py58
-rw-r--r--app/discover/find_links_for_vconnectors.py88
-rw-r--r--app/discover/find_links_for_vedges.py124
-rw-r--r--app/discover/find_links_for_vservice_vnics.py56
-rw-r--r--app/discover/manager.py45
-rw-r--r--app/discover/monitoring_mgr.py10
-rw-r--r--app/discover/network_agents_list.py23
-rw-r--r--app/discover/plugins/__init__.py10
-rwxr-xr-xapp/discover/scan.py324
-rw-r--r--app/discover/scan_error.py11
-rw-r--r--app/discover/scan_manager.py294
-rw-r--r--app/discover/scan_metadata_parser.py202
-rw-r--r--app/discover/scanner.py253
-rw-r--r--app/install/calipso-installer.py380
-rw-r--r--app/install/calipso_mongo_access.conf.example4
-rw-r--r--app/install/db/attributes_for_hover_on_data.json89
-rw-r--r--app/install/db/clique_constraints.json20
-rw-r--r--app/install/db/clique_types.json56
-rw-r--r--app/install/db/cliques.json3
-rw-r--r--app/install/db/constants.json668
-rw-r--r--app/install/db/environments_config.json78
-rw-r--r--app/install/db/inventory.json3
-rw-r--r--app/install/db/link_types.json184
-rw-r--r--app/install/db/links.json3
-rw-r--r--app/install/db/messages.json44
-rw-r--r--app/install/db/meteor_accounts_loginServiceConfiguration.json3
-rw-r--r--app/install/db/monitoring_config.json3
-rw-r--r--app/install/db/monitoring_config_templates.json378
-rw-r--r--app/install/db/network_agent_types.json52
-rw-r--r--app/install/db/roles.json26
-rw-r--r--app/install/db/scans.json24
-rw-r--r--app/install/db/scheduled_scans.json43
-rw-r--r--app/install/db/statistics.json23
-rw-r--r--app/install/db/supported_environments.json230
-rw-r--r--app/install/db/users.json51
-rw-r--r--app/install/ldap.conf.example10
-rw-r--r--app/messages/message.py65
-rw-r--r--app/monitoring/__init__.py10
-rw-r--r--app/monitoring/checks/binary_converter.py17
-rwxr-xr-xapp/monitoring/checks/check_interface.py50
-rwxr-xr-xapp/monitoring/checks/check_ping.py121
-rwxr-xr-xapp/monitoring/checks/check_pnic_vpp.py53
-rwxr-xr-xapp/monitoring/checks/check_vedge_ovs.py43
-rwxr-xr-xapp/monitoring/checks/check_vedge_vpp.py50
-rwxr-xr-xapp/monitoring/checks/check_vnic_vconnector.py72
-rwxr-xr-xapp/monitoring/checks/check_vnic_vpp.py48
-rw-r--r--app/monitoring/checks/check_vservice.py82
-rw-r--r--app/monitoring/handlers/__init__.py10
-rw-r--r--app/monitoring/handlers/basic_check_handler.py25
-rw-r--r--app/monitoring/handlers/handle_link.py36
-rw-r--r--app/monitoring/handlers/handle_otep.py48
-rw-r--r--app/monitoring/handlers/handle_pnic.py29
-rw-r--r--app/monitoring/handlers/handle_pnic_vpp.py28
-rw-r--r--app/monitoring/handlers/handle_vnic_vpp.py28
-rwxr-xr-xapp/monitoring/handlers/monitor.py92
-rw-r--r--app/monitoring/handlers/monitoring_check_handler.py94
-rw-r--r--app/monitoring/setup/__init__.py10
-rw-r--r--app/monitoring/setup/monitoring_check_handler.py54
-rw-r--r--app/monitoring/setup/monitoring_handler.py485
-rw-r--r--app/monitoring/setup/monitoring_host.py91
-rw-r--r--app/monitoring/setup/monitoring_link_vnic_vconnector.py37
-rw-r--r--app/monitoring/setup/monitoring_otep.py34
-rw-r--r--app/monitoring/setup/monitoring_pnic.py21
-rw-r--r--app/monitoring/setup/monitoring_setup_manager.py84
-rw-r--r--app/monitoring/setup/monitoring_simple_object.py25
-rw-r--r--app/monitoring/setup/monitoring_vedge.py19
-rw-r--r--app/monitoring/setup/monitoring_vnic.py20
-rw-r--r--app/monitoring/setup/monitoring_vservice.py23
-rwxr-xr-xapp/statistics/stats_consumer.py134
-rw-r--r--app/test/__init__.py10
-rw-r--r--app/test/api/__init__.py10
-rw-r--r--app/test/api/responders_test/__init__.py10
-rw-r--r--app/test/api/responders_test/auth/__init__.py10
-rw-r--r--app/test/api/responders_test/auth/test_tokens.py105
-rw-r--r--app/test/api/responders_test/resource/__init__.py10
-rw-r--r--app/test/api/responders_test/resource/test_aggregates.py103
-rw-r--r--app/test/api/responders_test/resource/test_clique_constraints.py138
-rw-r--r--app/test/api/responders_test/resource/test_clique_types.py267
-rw-r--r--app/test/api/responders_test/resource/test_cliques.py240
-rw-r--r--app/test/api/responders_test/resource/test_constants.py53
-rw-r--r--app/test/api/responders_test/resource/test_environment_configs.py420
-rw-r--r--app/test/api/responders_test/resource/test_inventory.py162
-rw-r--r--app/test/api/responders_test/resource/test_links.py193
-rw-r--r--app/test/api/responders_test/resource/test_messages.py236
-rw-r--r--app/test/api/responders_test/resource/test_monitoring_config_templates.py156
-rw-r--r--app/test/api/responders_test/resource/test_scans.py239
-rw-r--r--app/test/api/responders_test/resource/test_scheduled_scans.py247
-rw-r--r--app/test/api/responders_test/test_data/__init__.py10
-rw-r--r--app/test/api/responders_test/test_data/aggregates.py67
-rw-r--r--app/test/api/responders_test/test_data/base.py179
-rw-r--r--app/test/api/responders_test/test_data/clique_constraints.py74
-rw-r--r--app/test/api/responders_test/test_data/clique_types.py170
-rw-r--r--app/test/api/responders_test/test_data/cliques.py171
-rw-r--r--app/test/api/responders_test/test_data/constants.py23
-rw-r--r--app/test/api/responders_test/test_data/environment_configs.py221
-rw-r--r--app/test/api/responders_test/test_data/inventory.py37
-rw-r--r--app/test/api/responders_test/test_data/links.py90
-rw-r--r--app/test/api/responders_test/test_data/messages.py108
-rw-r--r--app/test/api/responders_test/test_data/monitoring_config_templates.py98
-rw-r--r--app/test/api/responders_test/test_data/scans.py187
-rw-r--r--app/test/api/responders_test/test_data/scheduled_scans.py138
-rw-r--r--app/test/api/responders_test/test_data/tokens.py83
-rw-r--r--app/test/api/test_base.py101
-rw-r--r--app/test/event_based_scan/__init__.py10
-rw-r--r--app/test/event_based_scan/config/__init__.py10
-rw-r--r--app/test/event_based_scan/config/test_config.py17
-rw-r--r--app/test/event_based_scan/test_data/__init__.py10
-rw-r--r--app/test/event_based_scan/test_data/event_payload_instance_add.py122
-rw-r--r--app/test/event_based_scan/test_data/event_payload_instance_delete.py97
-rw-r--r--app/test/event_based_scan/test_data/event_payload_instance_update.py99
-rw-r--r--app/test/event_based_scan/test_data/event_payload_interface_add.py350
-rw-r--r--app/test/event_based_scan/test_data/event_payload_interface_delete.py350
-rw-r--r--app/test/event_based_scan/test_data/event_payload_network_add.py32
-rw-r--r--app/test/event_based_scan/test_data/event_payload_network_delete.py88
-rw-r--r--app/test/event_based_scan/test_data/event_payload_network_update.py65
-rw-r--r--app/test/event_based_scan/test_data/event_payload_port_add.py314
-rw-r--r--app/test/event_based_scan/test_data/event_payload_port_delete.py290
-rw-r--r--app/test/event_based_scan/test_data/event_payload_port_update.py103
-rw-r--r--app/test/event_based_scan/test_data/event_payload_router_add.py176
-rw-r--r--app/test/event_based_scan/test_data/event_payload_router_delete.py59
-rw-r--r--app/test/event_based_scan/test_data/event_payload_router_update.py271
-rw-r--r--app/test/event_based_scan/test_data/event_payload_subnet_add.py124
-rw-r--r--app/test/event_based_scan/test_data/event_payload_subnet_delete.py95
-rw-r--r--app/test/event_based_scan/test_data/event_payload_subnet_update.py76
-rw-r--r--app/test/event_based_scan/test_event.py55
-rw-r--r--app/test/event_based_scan/test_event_delete_base.py64
-rw-r--r--app/test/event_based_scan/test_instance_add.py61
-rw-r--r--app/test/event_based_scan/test_instance_delete.py24
-rw-r--r--app/test/event_based_scan/test_instance_update.py46
-rw-r--r--app/test/event_based_scan/test_interface_add.py74
-rw-r--r--app/test/event_based_scan/test_interface_delete.py44
-rw-r--r--app/test/event_based_scan/test_network_add.py47
-rw-r--r--app/test/event_based_scan/test_network_delete.py24
-rw-r--r--app/test/event_based_scan/test_network_update.py33
-rw-r--r--app/test/event_based_scan/test_port_add.py75
-rw-r--r--app/test/event_based_scan/test_port_delete.py47
-rw-r--r--app/test/event_based_scan/test_port_update.py34
-rw-r--r--app/test/event_based_scan/test_router_add.py79
-rw-r--r--app/test/event_based_scan/test_router_delete.py23
-rw-r--r--app/test/event_based_scan/test_router_update.py62
-rw-r--r--app/test/event_based_scan/test_subnet_add.py68
-rw-r--r--app/test/event_based_scan/test_subnet_delete.py54
-rw-r--r--app/test/event_based_scan/test_subnet_update.py45
-rw-r--r--app/test/fetch/__init__.py9
-rw-r--r--app/test/fetch/api_fetch/__init__.py9
-rw-r--r--app/test/fetch/api_fetch/test_api_access.py142
-rw-r--r--app/test/fetch/api_fetch/test_api_fetch_availability_zone.py72
-rw-r--r--app/test/fetch/api_fetch/test_api_fetch_host_instances.py83
-rw-r--r--app/test/fetch/api_fetch/test_api_fetch_networks.py65
-rw-r--r--app/test/fetch/api_fetch/test_api_fetch_ports.py89
-rw-r--r--app/test/fetch/api_fetch/test_api_fetch_project_hosts.py137
-rw-r--r--app/test/fetch/api_fetch/test_api_fetch_projects.py120
-rw-r--r--app/test/fetch/api_fetch/test_api_fetch_regions.py41
-rw-r--r--app/test/fetch/api_fetch/test_data/__init__.py9
-rw-r--r--app/test/fetch/api_fetch/test_data/api_access.py55
-rw-r--r--app/test/fetch/api_fetch/test_data/api_fetch_availability_zones.py71
-rw-r--r--app/test/fetch/api_fetch/test_data/api_fetch_host_instances.py85
-rw-r--r--app/test/fetch/api_fetch/test_data/api_fetch_host_project_hosts.py225
-rw-r--r--app/test/fetch/api_fetch/test_data/api_fetch_networks.py72
-rw-r--r--app/test/fetch/api_fetch/test_data/api_fetch_ports.py72
-rw-r--r--app/test/fetch/api_fetch/test_data/api_fetch_projects.py88
-rw-r--r--app/test/fetch/api_fetch/test_data/api_fetch_regions.py50
-rw-r--r--app/test/fetch/api_fetch/test_data/configurations.py52
-rw-r--r--app/test/fetch/api_fetch/test_data/regions.py110
-rw-r--r--app/test/fetch/api_fetch/test_data/token.py23
-rw-r--r--app/test/fetch/cli_fetch/__init__.py9
-rw-r--r--app/test/fetch/cli_fetch/test_cli_access.py159
-rw-r--r--app/test/fetch/cli_fetch/test_cli_fetch_host_pnics.py135
-rw-r--r--app/test/fetch/cli_fetch/test_cli_fetch_host_pnics_vpp.py34
-rw-r--r--app/test/fetch/cli_fetch/test_cli_fetch_host_vservices.py132
-rw-r--r--app/test/fetch/cli_fetch/test_cli_fetch_instance_vnics.py111
-rw-r--r--app/test/fetch/cli_fetch/test_cli_fetch_instance_vnics_ovs.py36
-rw-r--r--app/test/fetch/cli_fetch/test_cli_fetch_instance_vnics_vpp.py23
-rw-r--r--app/test/fetch/cli_fetch/test_cli_fetch_vconnectors.py66
-rw-r--r--app/test/fetch/cli_fetch/test_cli_fetch_vconnectors_ovs.py38
-rw-r--r--app/test/fetch/cli_fetch/test_cli_fetch_vconnectors_vpp.py50
-rw-r--r--app/test/fetch/cli_fetch/test_cli_fetch_vservice_vnics.py124
-rw-r--r--app/test/fetch/cli_fetch/test_data/__init__.py9
-rw-r--r--app/test/fetch/cli_fetch/test_data/cli_access.py58
-rw-r--r--app/test/fetch/cli_fetch/test_data/cli_fetch_host_pnics.py147
-rw-r--r--app/test/fetch/cli_fetch/test_data/cli_fetch_host_pnics_vpp.py204
-rw-r--r--app/test/fetch/cli_fetch/test_data/cli_fetch_host_verservices.py276
-rw-r--r--app/test/fetch/cli_fetch/test_data/cli_fetch_instance_vnics.py288
-rw-r--r--app/test/fetch/cli_fetch/test_data/cli_fetch_vconnectors.py103
-rw-r--r--app/test/fetch/cli_fetch/test_data/cli_fetch_vconnectors_ovs.py234
-rw-r--r--app/test/fetch/cli_fetch/test_data/cli_fetch_vconnectors_vpp.py137
-rw-r--r--app/test/fetch/cli_fetch/test_data/cli_fetch_vservice_vnics.py616
-rw-r--r--app/test/fetch/config/__init__.py9
-rw-r--r--app/test/fetch/config/test_config.py17
-rw-r--r--app/test/fetch/db_fetch/__init__.py9
-rw-r--r--app/test/fetch/db_fetch/mock_cursor.py25
-rw-r--r--app/test/fetch/db_fetch/test_data/__init__.py9
-rw-r--r--app/test/fetch/db_fetch/test_data/db_access.py40
-rw-r--r--app/test/fetch/db_fetch/test_data/db_fetch_aggregate_hosts.py34
-rw-r--r--app/test/fetch/db_fetch/test_data/db_fetch_aggregates.py17
-rw-r--r--app/test/fetch/db_fetch/test_data/db_fetch_host_network_agents.py65
-rw-r--r--app/test/fetch/db_fetch/test_data/db_fetch_instances.py91
-rw-r--r--app/test/fetch/db_fetch/test_data/db_fetch_oteps.py131
-rw-r--r--app/test/fetch/db_fetch/test_data/db_fetch_vedges_ovs.py168
-rw-r--r--app/test/fetch/db_fetch/test_data/db_fetch_vedges_vpp.py89
-rw-r--r--app/test/fetch/db_fetch/test_db_access.py108
-rw-r--r--app/test/fetch/db_fetch/test_db_fetch_aggregate_hosts.py60
-rw-r--r--app/test/fetch/db_fetch/test_db_fetch_aggregates.py26
-rw-r--r--app/test/fetch/db_fetch/test_db_fetch_instances.py37
-rw-r--r--app/test/fetch/db_fetch/test_db_fetch_oteps.py92
-rw-r--r--app/test/fetch/db_fetch/test_db_fetch_vedges_ovs.py109
-rw-r--r--app/test/fetch/db_fetch/test_db_fetch_vedges_vpp.py82
-rw-r--r--app/test/fetch/db_fetch/test_fetch_host_network_agents.py66
-rw-r--r--app/test/fetch/test_fetch.py46
-rw-r--r--app/test/scan/__init__.py10
-rw-r--r--app/test/scan/config/__init__.py9
-rw-r--r--app/test/scan/config/test_config.py17
-rw-r--r--app/test/scan/main.py17
-rw-r--r--app/test/scan/mock_module.py37
-rw-r--r--app/test/scan/test_data/__init__.py9
-rw-r--r--app/test/scan/test_data/configurations.py69
-rw-r--r--app/test/scan/test_data/metadata.py318
-rw-r--r--app/test/scan/test_data/scan.py435
-rw-r--r--app/test/scan/test_data/scanner.py355
-rw-r--r--app/test/scan/test_scan.py46
-rw-r--r--app/test/scan/test_scan_controller.py215
-rw-r--r--app/test/scan/test_scan_metadata_parser.py152
-rw-r--r--app/test/scan/test_scanner.py355
-rw-r--r--app/test/test_suite.py25
-rw-r--r--app/utils/__init__.py10
-rw-r--r--app/utils/binary_converter.py27
-rw-r--r--app/utils/config_file.py48
-rw-r--r--app/utils/constants.py37
-rw-r--r--app/utils/deep_merge.py77
-rw-r--r--app/utils/dict_naming_converter.py40
-rw-r--r--app/utils/exceptions.py13
-rw-r--r--app/utils/inventory_mgr.py445
-rw-r--r--app/utils/logging/__init__.py10
-rw-r--r--app/utils/logging/console_logger.py21
-rw-r--r--app/utils/logging/file_logger.py23
-rw-r--r--app/utils/logging/full_logger.py47
-rw-r--r--app/utils/logging/logger.py99
-rw-r--r--app/utils/logging/message_logger.py21
-rw-r--r--app/utils/logging/mongo_logging_handler.py53
-rw-r--r--app/utils/metadata_parser.py83
-rw-r--r--app/utils/mongo_access.py137
-rw-r--r--app/utils/singleton.py16
-rw-r--r--app/utils/special_char_converter.py32
-rw-r--r--app/utils/ssh_conn.py94
-rw-r--r--app/utils/ssh_connection.py217
-rw-r--r--app/utils/string_utils.py59
-rw-r--r--app/utils/util.py172
365 files changed, 32568 insertions, 0 deletions
diff --git a/LICENSE b/LICENSE
index f4346f8..cf46283 100644
--- a/LICENSE
+++ b/LICENSE
@@ -1,3 +1,5 @@
+Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems)
+and others
Copyright 2015 Open Platform for NFV Project, Inc. and its contributors
Licensed under the Apache License, Version 2.0 (the "License");
diff --git a/README.md b/README.md
new file mode 100644
index 0000000..2a9e986
--- /dev/null
+++ b/README.md
@@ -0,0 +1,50 @@
+This work is licensed under a Creative Commons Attribution 4.0
+International License.
+http://creativecommons.org/licenses/by/4.0
+
+Calipso - OpenStack Network Discovery and Assurance
+==================================================
+### About
+We are going to enhance the way Cloud Network Administrators(CNA) and Tenant Network Administrators(TNA)
+Understands, Monitors and Troubleshoot highly distributed OpenStack and other virtual Environments.
+
+We are following Domain-Driven-Design process and procedures:
+ref: http://www.methodsandtools.com/archive/archive.php?id=97
+<br>
+WIKI Central:<br>
+http://wikicentral.cisco.com/display/OSDNA/Home
+<br>
+JIVE:<br>
+https://cisco.jiveon.com/people/korlev/blog/2015/12/13/os-dna-project
+<br>
+Rally:<br>
+https://rally1.rallydev.com/#/48530212030d/dashboard
+<br>
+
+### Prototype Intent:
+
+Provide CNA and TNA with support for:
+<br>
+1. Building virtual Network inventory and visualizing all inter-connections in real-time
+<br>
+2. Monitor virtual network objects state and health
+<br>
+3. Troubleshoot failures in virtual networks
+<br>
+4. Assess impact of failure in virtual networks
+<br>
+
+### High Level Functionality Tree:
+<br>
+http://korlev-calipso.cisco.com/ <br>
+Code: https://cto-github.cisco.com/korlev/Calipso
+
+### Proto (mockups, updated Nov 15th)
+korlev-calipso-be.cisco.com:8081
+
+#### High Level Architecture: <br>
+see under /Architecture (outdated)
+
+### Contacts
+* Koren Lev (korlev)
+* Yaron Yogev (yayogev)
diff --git a/app/api/__init__.py b/app/api/__init__.py
new file mode 100644
index 0000000..1e85a2a
--- /dev/null
+++ b/app/api/__init__.py
@@ -0,0 +1,10 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+
diff --git a/app/api/app.py b/app/api/app.py
new file mode 100644
index 0000000..5fa3da9
--- /dev/null
+++ b/app/api/app.py
@@ -0,0 +1,71 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+import importlib
+
+import falcon
+
+from api.auth.token import Token
+from api.backends.ldap_access import LDAPAccess
+from api.exceptions.exceptions import CalipsoApiException
+from api.middleware.authentication import AuthenticationMiddleware
+from utils.inventory_mgr import InventoryMgr
+from utils.logging.full_logger import FullLogger
+from utils.mongo_access import MongoAccess
+
+
+class App:
+
+ ROUTE_DECLARATIONS = {
+ "/inventory": "resource.inventory.Inventory",
+ "/links": "resource.links.Links",
+ "/messages": "resource.messages.Messages",
+ "/cliques": "resource.cliques.Cliques",
+ "/clique_types": "resource.clique_types.CliqueTypes",
+ "/clique_constraints": "resource.clique_constraints.CliqueConstraints",
+ "/scans": "resource.scans.Scans",
+ "/scheduled_scans": "resource.scheduled_scans.ScheduledScans",
+ "/constants": "resource.constants.Constants",
+ "/monitoring_config_templates":
+ "resource.monitoring_config_templates.MonitoringConfigTemplates",
+ "/aggregates": "resource.aggregates.Aggregates",
+ "/environment_configs":
+ "resource.environment_configs.EnvironmentConfigs",
+ "/auth/tokens": "auth.tokens.Tokens"
+ }
+
+ responders_path = "api.responders"
+
+ def __init__(self, mongo_config="", ldap_config="",
+ log_level="", inventory="", token_lifetime=86400):
+ MongoAccess.set_config_file(mongo_config)
+ self.inv = InventoryMgr()
+ self.inv.set_collections(inventory)
+ self.log = FullLogger()
+ self.log.set_loglevel(log_level)
+ self.ldap_access = LDAPAccess(ldap_config)
+ Token.set_token_lifetime(token_lifetime)
+ self.middleware = AuthenticationMiddleware()
+ self.app = falcon.API(middleware=[self.middleware])
+ self.app.add_error_handler(CalipsoApiException)
+ self.set_routes(self.app)
+
+ def get_app(self):
+ return self.app
+
+ def set_routes(self, app):
+ for url in self.ROUTE_DECLARATIONS.keys():
+ class_path = self.ROUTE_DECLARATIONS.get(url)
+ module = self.responders_path + "." + \
+ class_path[:class_path.rindex(".")]
+ class_name = class_path.split('.')[-1]
+ module = importlib.import_module(module)
+ class_ = getattr(module, class_name)
+ resource = class_()
+ app.add_route(url, resource)
diff --git a/app/api/auth/__init__.py b/app/api/auth/__init__.py
new file mode 100644
index 0000000..1e85a2a
--- /dev/null
+++ b/app/api/auth/__init__.py
@@ -0,0 +1,10 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+
diff --git a/app/api/auth/auth.py b/app/api/auth/auth.py
new file mode 100644
index 0000000..04fc4b9
--- /dev/null
+++ b/app/api/auth/auth.py
@@ -0,0 +1,71 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+from api.auth.token import Token
+from api.backends.ldap_access import LDAPAccess
+from utils.inventory_mgr import InventoryMgr
+from utils.logging.full_logger import FullLogger
+
+
+class Auth:
+
+ def __init__(self):
+ super().__init__()
+ self.inv = InventoryMgr()
+ self.log = FullLogger()
+ self.tokens_coll = self.inv.client['tokens']['api_tokens']
+ self.ldap_access = LDAPAccess()
+
+ def get_token(self, token):
+ tokens = None
+ try:
+ tokens = list(self.tokens_coll.find({'token': token}))
+ except Exception as e:
+ self.log.error('Failed to get token for ', str(e))
+
+ return tokens
+
+ def write_token(self, token):
+ error = None
+ try:
+ self.tokens_coll.insert_one(token)
+ except Exception as e:
+ self.log.error("Failed to write new token {0} to database for {1}"
+ .format(token[token], str(e)))
+ error = 'Failed to create new token'
+
+ return error
+
+ def delete_token(self, token):
+ error = None
+ try:
+ self.tokens_coll.delete_one({'token': token})
+ except Exception as e:
+ self.log.error('Failed to delete token {0} for {1}'.
+ format(token, str(e)))
+ error = 'Failed to delete token {0}'.format(token)
+
+ return error
+
+ def validate_credentials(self, username, pwd):
+ return self.ldap_access.authenticate_user(username, pwd)
+
+ def validate_token(self, token):
+ error = None
+ tokens = self.get_token(token)
+ if not tokens:
+ error = "Token {0} doesn't exist".format(token)
+ elif len(tokens) > 1:
+ self.log.error('Multiple tokens found for {0}'.format(token))
+ error = "Multiple tokens found"
+ else:
+ t = tokens[0]
+ error = Token.validate_token(t)
+
+ return error
diff --git a/app/api/auth/token.py b/app/api/auth/token.py
new file mode 100644
index 0000000..d057d22
--- /dev/null
+++ b/app/api/auth/token.py
@@ -0,0 +1,39 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+import datetime
+import uuid
+
+
+class Token:
+ token_lifetime = 86400
+ FIELD = 'X-AUTH-TOKEN'
+
+ @classmethod
+ def set_token_lifetime(cls, lifetime):
+ Token.token_lifetime = lifetime
+
+ @classmethod
+ def new_uuid_token(cls, method):
+ token = {}
+ token['issued_at'] = datetime.datetime.now()
+ token['expires_at'] = token['issued_at'] +\
+ datetime.timedelta(seconds=Token.token_lifetime)
+ token['token'] = uuid.uuid4().hex
+ token['method'] = method
+ return token
+
+ @classmethod
+ def validate_token(cls, token):
+ error = None
+ now = datetime.datetime.now()
+ if now > token['expires_at']:
+ error = 'Token {0} has expired'.format(token['token'])
+
+ return error
diff --git a/app/api/backends/__init__.py b/app/api/backends/__init__.py
new file mode 100644
index 0000000..1e85a2a
--- /dev/null
+++ b/app/api/backends/__init__.py
@@ -0,0 +1,10 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+
diff --git a/app/api/backends/ldap_access.py b/app/api/backends/ldap_access.py
new file mode 100644
index 0000000..a998656
--- /dev/null
+++ b/app/api/backends/ldap_access.py
@@ -0,0 +1,89 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+import ssl
+
+from ldap3 import Server, Connection, Tls
+
+from utils.config_file import ConfigFile
+from utils.logging.full_logger import FullLogger
+from utils.singleton import Singleton
+
+
+class LDAPAccess(metaclass=Singleton):
+
+ default_config_file = "ldap.conf"
+ TLS_REQUEST_CERTS = {
+ "demand": ssl.CERT_REQUIRED,
+ "allow": ssl.CERT_OPTIONAL,
+ "never": ssl.CERT_NONE,
+ "default": ssl.CERT_NONE
+ }
+ user_ssl = True
+
+ def __init__(self, config_file_path=""):
+ super().__init__()
+ self.log = FullLogger()
+ self.ldap_params = self.get_ldap_params(config_file_path)
+ self.server = self.connect_ldap_server()
+
+ def get_ldap_params(self, config_file_path):
+ ldap_params = {
+ "url": "ldap://localhost:389"
+ }
+ if not config_file_path:
+ config_file_path = ConfigFile.get(self.default_config_file)
+ if config_file_path:
+ try:
+ config_file = ConfigFile(config_file_path)
+ params = config_file.read_config()
+ ldap_params.update(params)
+ except Exception as e:
+ self.log.error(str(e))
+ raise
+ if "user_tree_dn" not in ldap_params:
+ raise ValueError("user_tree_dn must be specified in " +
+ config_file_path)
+ if "user_id_attribute" not in ldap_params:
+ raise ValueError("user_id_attribute must be specified in " +
+ config_file_path)
+ return ldap_params
+
+ def connect_ldap_server(self):
+ ca_certificate_file = self.ldap_params.get('tls_cacertfile')
+ req_cert = self.ldap_params.get('tls_req_cert')
+ ldap_url = self.ldap_params.get('url')
+
+ if ca_certificate_file:
+ if not req_cert or req_cert not in self.TLS_REQUEST_CERTS.keys():
+ req_cert = 'default'
+ tls_req_cert = self.TLS_REQUEST_CERTS[req_cert]
+ tls = Tls(local_certificate_file=ca_certificate_file,
+ validate=tls_req_cert)
+ return Server(ldap_url, use_ssl=self.user_ssl, tls=tls)
+
+ return Server(ldap_url, use_ssl=self.user_ssl)
+
+ def authenticate_user(self, username, pwd):
+ if not self.server:
+ self.server = self.connect_ldap_server()
+
+ user_dn = self.ldap_params['user_id_attribute'] + "=" +
+ username + "," + self.ldap_params['user_tree_dn']
+ connection = Connection(self.server, user=user_dn, password=pwd)
+ # validate the user by binding
+ # bound is true if binding succeed, otherwise false
+ bound = False
+ try:
+ bound = connection.bind()
+ connection.unbind()
+ except Exception as e:
+ self.log.error('Failed to bind the server for {0}'.format(str(e)))
+
+ return bound
diff --git a/app/api/exceptions/__init__.py b/app/api/exceptions/__init__.py
new file mode 100644
index 0000000..1e85a2a
--- /dev/null
+++ b/app/api/exceptions/__init__.py
@@ -0,0 +1,10 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+
diff --git a/app/api/exceptions/exceptions.py b/app/api/exceptions/exceptions.py
new file mode 100644
index 0000000..f0a1d9f
--- /dev/null
+++ b/app/api/exceptions/exceptions.py
@@ -0,0 +1,26 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+from utils.logging.console_logger import ConsoleLogger
+
+
+class CalipsoApiException(Exception):
+ log = ConsoleLogger()
+
+ def __init__(self, status, body="", message=""):
+ super().__init__(message)
+ self.message = message
+ self.status = status
+ self.body = body
+
+ @staticmethod
+ def handle(ex, req, resp, params):
+ CalipsoApiException.log.error(ex.message)
+ resp.status = ex.status
+ resp.body = ex.body
diff --git a/app/api/middleware/__init__.py b/app/api/middleware/__init__.py
new file mode 100644
index 0000000..1e85a2a
--- /dev/null
+++ b/app/api/middleware/__init__.py
@@ -0,0 +1,10 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+
diff --git a/app/api/middleware/authentication.py b/app/api/middleware/authentication.py
new file mode 100644
index 0000000..bc62fa8
--- /dev/null
+++ b/app/api/middleware/authentication.py
@@ -0,0 +1,63 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+import base64
+
+from api.responders.responder_base import ResponderBase
+from api.auth.auth import Auth
+from api.auth.token import Token
+
+
+class AuthenticationMiddleware(ResponderBase):
+ def __init__(self):
+ super().__init__()
+ self.auth = Auth()
+ self.BASIC_AUTH = "AUTHORIZATION"
+ self.EXCEPTION_ROUTES = ['/auth/tokens']
+
+ def process_request(self, req, resp):
+ if req.path in self.EXCEPTION_ROUTES:
+ return
+
+ self.log.debug("Authentication middleware is processing the request")
+ headers = self.change_dict_naming_convention(req.headers,
+ lambda s: s.upper())
+ auth_error = None
+ if self.BASIC_AUTH in headers:
+ # basic authentication
+ self.log.debug("Authenticating the basic credentials")
+ basic = headers[self.BASIC_AUTH]
+ auth_error = self.authenticate_with_basic_auth(basic)
+ elif Token.FIELD in headers:
+ # token authentication
+ self.log.debug("Authenticating token")
+ token = headers[Token.FIELD]
+ auth_error = self.auth.validate_token(token)
+ else:
+ auth_error = "Authentication required"
+
+ if auth_error:
+ self.unauthorized(auth_error)
+
+ def authenticate_with_basic_auth(self, basic):
+ error = None
+ if not basic or not basic.startswith("Basic"):
+ error = "Credentials not provided"
+ else:
+ # get username and password
+ credential = basic.lstrip("Basic").lstrip()
+ username_password = base64.b64decode(credential).decode("utf-8")
+ credentials = username_password.split(":")
+ if not self.auth.validate_credentials(credentials[0], credentials[1]):
+ self.log.info("Authentication for {0} failed".format(credentials[0]))
+ error = "Authentication failed"
+ else:
+ self.log.info("Authentication for {0} succeeded".format(credentials[0]))
+
+ return error
diff --git a/app/api/responders/__init__.py b/app/api/responders/__init__.py
new file mode 100644
index 0000000..1e85a2a
--- /dev/null
+++ b/app/api/responders/__init__.py
@@ -0,0 +1,10 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+
diff --git a/app/api/responders/auth/__init__.py b/app/api/responders/auth/__init__.py
new file mode 100644
index 0000000..1e85a2a
--- /dev/null
+++ b/app/api/responders/auth/__init__.py
@@ -0,0 +1,10 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+
diff --git a/app/api/responders/auth/tokens.py b/app/api/responders/auth/tokens.py
new file mode 100644
index 0000000..0b3a22f
--- /dev/null
+++ b/app/api/responders/auth/tokens.py
@@ -0,0 +1,117 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+from datetime import datetime
+
+from bson.objectid import ObjectId
+
+from api.auth.auth import Auth
+from api.auth.token import Token
+from api.responders.responder_base import ResponderBase
+from api.validation.data_validate import DataValidate
+from utils.string_utils import stringify_object_values_by_types
+
+
+class Tokens(ResponderBase):
+
+ def __init__(self):
+ super().__init__()
+ self.auth_requirements = {
+ 'methods': self.require(list, False,
+ DataValidate.LIST,
+ ['credentials', 'token'],
+ True),
+ 'credentials': self.require(dict, True),
+ 'token': self.require(str)
+ }
+
+ self.credential_requirements = {
+ 'username': self.require(str, mandatory=True),
+ 'password': self.require(str, mandatory=True)
+ }
+ self.auth = Auth()
+
+ def on_post(self, req, resp):
+ self.log.debug('creating new token')
+ error, data = self.get_content_from_request(req)
+ if error:
+ self.bad_request(error)
+
+ if 'auth' not in data:
+ self.bad_request('Request must contain auth object')
+
+ auth = data['auth']
+
+ self.validate_query_data(auth, self.auth_requirements)
+
+ if 'credentials' in auth:
+ self.validate_query_data(auth['credentials'],
+ self.credential_requirements)
+
+ auth_error = self.authenticate(auth)
+ if auth_error:
+ self.unauthorized(auth_error)
+
+ new_token = Token.new_uuid_token(auth['method'])
+ write_error = self.auth.write_token(new_token)
+
+ if write_error:
+ # TODO if writing token to the database failed, what kind of error should be return?
+ self.bad_request(write_error)
+
+ stringify_object_values_by_types(new_token, [datetime, ObjectId])
+ self.set_successful_response(resp, new_token, '201')
+
+ def authenticate(self, auth):
+ error = None
+ methods = auth['methods']
+ credentials = auth.get('credentials')
+ token = auth.get('token')
+
+ if not token and not credentials:
+ return 'must provide credentials or token'
+
+ if 'credentials' in methods:
+ if not credentials:
+ return'credentials must be provided for credentials method'
+ else:
+ if not self.auth.validate_credentials(credentials['username'],
+ credentials['password']):
+ error = 'authentication failed'
+ else:
+ auth['method'] = "credentials"
+ return None
+
+ if 'token' in methods:
+ if not token:
+ return 'token must be provided for token method'
+ else:
+ error = self.auth.validate_token(token)
+ if not error:
+ auth['method'] = 'token'
+
+ return error
+
+ def on_delete(self, req, resp):
+ headers = self.change_dict_naming_convention(req.headers,
+ lambda s: s.upper())
+ if Token.FIELD not in headers:
+ self.unauthorized('Authentication failed')
+
+ token = headers[Token.FIELD]
+ error = self.auth.validate_token(token)
+ if error:
+ self.unauthorized(error)
+
+ delete_error = self.auth.delete_token(token)
+
+ if delete_error:
+ self.bad_request(delete_error)
+
+ self.set_successful_response(resp)
diff --git a/app/api/responders/resource/__init__.py b/app/api/responders/resource/__init__.py
new file mode 100644
index 0000000..1e85a2a
--- /dev/null
+++ b/app/api/responders/resource/__init__.py
@@ -0,0 +1,10 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+
diff --git a/app/api/responders/resource/aggregates.py b/app/api/responders/resource/aggregates.py
new file mode 100644
index 0000000..36fcfa4
--- /dev/null
+++ b/app/api/responders/resource/aggregates.py
@@ -0,0 +1,157 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+from api.responders.responder_base import ResponderBase
+from api.validation.data_validate import DataValidate
+
+
+class Aggregates(ResponderBase):
+ def __init__(self):
+ super().__init__()
+ self.AGGREGATE_TYPES = ["environment", "message", "constant"]
+ self.AGGREGATES_MAP = {
+ "environment": self.get_environments_aggregates,
+ "message": self.get_messages_aggregates,
+ "constant": self.get_constants_aggregates
+ }
+
+ def on_get(self, req, resp):
+ self.log.debug("Getting aggregates information")
+
+ filters = self.parse_query_params(req)
+ filters_requirements = {
+ "env_name": self.require(str),
+ "type": self.require(str, validate=DataValidate.LIST,
+ requirement=self.AGGREGATE_TYPES,
+ mandatory=True,
+ error_messages={"mandatory":
+ "type must be specified: " +
+ "environment/" +
+ " message/" +
+ "constant"})
+ }
+ self.validate_query_data(filters, filters_requirements)
+ query = self.build_query(filters)
+ query_type = query["type"]
+ if query_type == "environment":
+ env_name = query.get("env_name")
+ if not env_name:
+ self.bad_request("env_name must be specified")
+ if not self.check_environment_name(env_name):
+ self.bad_request("unknown environment: " + env_name)
+
+ aggregates = self.AGGREGATES_MAP[query_type](query)
+ self.set_successful_response(resp, aggregates)
+
+ def build_query(self, filters):
+ query = {}
+ env_name = filters.get("env_name")
+ query_type = filters["type"]
+ query["type"] = filters["type"]
+ if query_type == "environment":
+ if env_name:
+ query['env_name'] = env_name
+ return query
+ return query
+
+ def get_environments_aggregates(self, query):
+ env_name = query['env_name']
+ aggregates = {
+ "type": query["type"],
+ "env_name": env_name,
+ "aggregates": {
+ "object_types": {
+
+ }
+ }
+ }
+ pipeline = [
+ {
+ '$match': {
+ 'environment': env_name
+ }
+ },
+ {
+ '$group': {
+ '_id': '$type',
+ 'total': {
+ '$sum': 1
+ }
+ }
+ }
+ ]
+ groups = self.aggregate(pipeline, "inventory")
+ for group in groups:
+ aggregates['aggregates']['object_types'][group['_id']] = \
+ group['total']
+ return aggregates
+
+ def get_messages_aggregates(self, query):
+ aggregates = {
+ "type": query['type'],
+ "aggregates": {
+ "levels": {},
+ "environments": {}
+ }
+ }
+ env_pipeline = [
+ {
+ '$group': {
+ '_id': '$environment',
+ 'total': {
+ '$sum': 1
+ }
+ }
+ }
+ ]
+ environments = self.aggregate(env_pipeline, "messages")
+ for environment in environments:
+ aggregates['aggregates']['environments'][environment['_id']] = \
+ environment['total']
+ level_pipeline = [
+ {
+ '$group': {
+ '_id': '$level',
+ 'total': {
+ '$sum': 1
+ }
+ }
+ }
+ ]
+ levels = self.aggregate(level_pipeline, "messages")
+ for level in levels:
+ aggregates['aggregates']['levels'][level['_id']] = \
+ level['total']
+
+ return aggregates
+
+ def get_constants_aggregates(self, query):
+ aggregates = {
+ "type": query['type'],
+ "aggregates": {
+ "names": {}
+ }
+ }
+ pipeline = [
+ {
+ '$project': {
+ '_id': 0,
+ 'name': 1,
+ 'total': {
+ '$size': '$data'
+ }
+ }
+ }
+ ]
+ constants = self.aggregate(pipeline, "constants")
+ for constant in constants:
+ aggregates['aggregates']['names'][constant['name']] = \
+ constant['total']
+
+ return aggregates
diff --git a/app/api/responders/resource/clique_constraints.py b/app/api/responders/resource/clique_constraints.py
new file mode 100644
index 0000000..eddead9
--- /dev/null
+++ b/app/api/responders/resource/clique_constraints.py
@@ -0,0 +1,67 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+from api.responders.responder_base import ResponderBase
+from api.validation.data_validate import DataValidate
+from bson.objectid import ObjectId
+
+
+class CliqueConstraints(ResponderBase):
+ def __init__(self):
+ super().__init__()
+ self.ID = '_id'
+ self.PROJECTION = {
+ self.ID: True
+ }
+ self.COLLECTION = 'clique_constraints'
+
+ def on_get(self, req, resp):
+ self.log.debug("Getting clique_constraints")
+ filters = self.parse_query_params(req)
+ focal_point_types = self.get_constants_by_name("object_types")
+ filters_requirements = {
+ 'id': self.require(ObjectId, True),
+ 'focal_point_type': self.require(str, False, DataValidate.LIST,
+ focal_point_types),
+ 'constraint': self.require([list, str]),
+ 'page': self.require(int, True),
+ 'page_size': self.require(int, True)
+ }
+ self.validate_query_data(filters, filters_requirements)
+ page, page_size = self.get_pagination(filters)
+ query = self.build_query(filters)
+ if self.ID in query:
+ clique_constraint = self.get_object_by_id(self.COLLECTION,
+ query,
+ [ObjectId], self.ID)
+ self.set_successful_response(resp, clique_constraint)
+ else:
+ clique_constraints_ids = self.get_objects_list(self.COLLECTION,
+ query,
+ page, page_size, self.PROJECTION)
+ self.set_successful_response(
+ resp, {"clique_constraints": clique_constraints_ids}
+ )
+
+ def build_query(self, filters):
+ query = {}
+ filters_keys = ['focal_point_type']
+ self.update_query_with_filters(filters, filters_keys, query)
+ constraints = filters.get('constraint')
+ if constraints:
+ if type(constraints) != list:
+ constraints = [constraints]
+
+ query['constraints'] = {
+ '$all': constraints
+ }
+ _id = filters.get('id')
+ if _id:
+ query[self.ID] = _id
+ return query
diff --git a/app/api/responders/resource/clique_types.py b/app/api/responders/resource/clique_types.py
new file mode 100644
index 0000000..9a39dc8
--- /dev/null
+++ b/app/api/responders/resource/clique_types.py
@@ -0,0 +1,103 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+from api.responders.responder_base import ResponderBase
+from api.validation.data_validate import DataValidate
+from bson.objectid import ObjectId
+
+
+class CliqueTypes(ResponderBase):
+ def __init__(self):
+ super().__init__()
+ self.COLLECTION = "clique_types"
+ self.ID = "_id"
+ self.PROJECTION = {
+ self.ID: True,
+ "focal_point_type": True,
+ "link_types": True,
+ "environment": True
+ }
+
+ def on_get(self, req, resp):
+ self.log.debug("Getting clique types")
+
+ filters = self.parse_query_params(req)
+ focal_point_types = self.get_constants_by_name("object_types")
+ link_types = self.get_constants_by_name("link_types")
+ filters_requirements = {
+ 'env_name': self.require(str, mandatory=True),
+ 'id': self.require(ObjectId, True),
+ 'focal_point_type': self.require(str,
+ validate=DataValidate.LIST,
+ requirement=focal_point_types),
+ 'link_type': self.require([list, str],
+ validate=DataValidate.LIST,
+ requirement=link_types),
+ 'page': self.require(int, True),
+ 'page_size': self.require(int, True)
+ }
+
+ self.validate_query_data(filters, filters_requirements)
+ page, page_size = self.get_pagination(filters)
+ query = self.build_query(filters)
+ if self.ID in query:
+ clique_type = self.get_object_by_id(self.COLLECTION, query,
+ [ObjectId], self.ID)
+ self.set_successful_response(resp, clique_type)
+ else:
+ clique_types_ids = self.get_objects_list(self.COLLECTION,
+ query,
+ page, page_size, self.PROJECTION)
+ self.set_successful_response(resp,
+ {"clique_types": clique_types_ids})
+
+ def on_post(self, req, resp):
+ self.log.debug("Posting new clique_type")
+ error, clique_type = self.get_content_from_request(req)
+ if error:
+ self.bad_request(error)
+ focal_point_types = self.get_constants_by_name("object_types")
+ link_types = self.get_constants_by_name("link_types")
+ clique_type_requirements = {
+ 'environment': self.require(str, mandatory=True),
+ 'focal_point_type': self.require(str, False, DataValidate.LIST,
+ focal_point_types, True),
+ 'link_types': self.require(list, False, DataValidate.LIST,
+ link_types, True),
+ 'name': self.require(str, mandatory=True)
+ }
+
+ self.validate_query_data(clique_type, clique_type_requirements)
+
+ env_name = clique_type['environment']
+ if not self.check_environment_name(env_name):
+ self.bad_request("unkown environment: " + env_name)
+
+ self.write(clique_type, self.COLLECTION)
+ self.set_successful_response(resp,
+ {"message": "created a new clique_type "
+ "for environment {0}"
+ .format(env_name)},
+ "201")
+
+ def build_query(self, filters):
+ query = {}
+ filters_keys = ['focal_point_type']
+ self.update_query_with_filters(filters, filters_keys, query)
+ link_types = filters.get('link_type')
+ if link_types:
+ if type(link_types) != list:
+ link_types = [link_types]
+ query['link_types'] = {'$all': link_types}
+ _id = filters.get('id')
+ if _id:
+ query[self.ID] = _id
+
+ query['environment'] = filters['env_name']
+ return query
diff --git a/app/api/responders/resource/cliques.py b/app/api/responders/resource/cliques.py
new file mode 100644
index 0000000..ece347a
--- /dev/null
+++ b/app/api/responders/resource/cliques.py
@@ -0,0 +1,73 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+from api.validation.data_validate import DataValidate
+from api.responders.responder_base import ResponderBase
+from bson.objectid import ObjectId
+
+from utils.util import generate_object_ids
+
+
+class Cliques(ResponderBase):
+ def __init__(self):
+ super().__init__()
+ self.COLLECTION = "cliques"
+ self.ID = '_id'
+ self.PROJECTION = {
+ self.ID: True,
+ "focal_point_type": True,
+ "environment": True
+ }
+
+ def on_get(self, req, resp):
+ self.log.debug("Getting cliques")
+
+ filters = self.parse_query_params(req)
+ focal_point_types = self.get_constants_by_name("object_types")
+ link_types = self.get_constants_by_name("link_types")
+ filters_requirements = {
+ 'env_name': self.require(str, mandatory=True),
+ 'id': self.require(ObjectId, True),
+ 'focal_point': self.require(ObjectId, True),
+ 'focal_point_type': self.require(str, validate=DataValidate.LIST,
+ requirement=focal_point_types),
+ 'link_type': self.require(str, validate=DataValidate.LIST,
+ requirement=link_types),
+ 'link_id': self.require(ObjectId, True),
+ 'page': self.require(int, True),
+ 'page_size': self.require(int, True)
+ }
+ self.validate_query_data(filters, filters_requirements)
+ page, page_size = self.get_pagination(filters)
+ query = self.build_query(filters)
+
+ if self.ID in query:
+ clique = self.get_object_by_id(self.COLLECTION, query,
+ [ObjectId], self.ID)
+ self.set_successful_response(resp, clique)
+ else:
+ cliques_ids = self.get_objects_list(self.COLLECTION, query,
+ page, page_size, self.PROJECTION)
+ self.set_successful_response(resp, {"cliques": cliques_ids})
+
+ def build_query(self, filters):
+ query = {}
+ filters_keys = ['focal_point', 'focal_point_type']
+ self.update_query_with_filters(filters, filters_keys, query)
+ link_type = filters.get('link_type')
+ if link_type:
+ query['links_detailed.link_type'] = link_type
+ link_id = filters.get('link_id')
+ if link_id:
+ query['links_detailed._id'] = link_id
+ _id = filters.get('id')
+ if _id:
+ query[self.ID] = _id
+ query['environment'] = filters['env_name']
+ return query
diff --git a/app/api/responders/resource/constants.py b/app/api/responders/resource/constants.py
new file mode 100644
index 0000000..be71b5d
--- /dev/null
+++ b/app/api/responders/resource/constants.py
@@ -0,0 +1,30 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+from api.responders.responder_base import ResponderBase
+from bson.objectid import ObjectId
+
+
+class Constants(ResponderBase):
+ def __init__(self):
+ super().__init__()
+ self.ID = '_id'
+ self.COLLECTION = 'constants'
+
+ def on_get(self, req, resp):
+ self.log.debug("Getting constants with name")
+ filters = self.parse_query_params(req)
+ filters_requirements = {
+ "name": self.require(str, mandatory=True),
+ }
+ self.validate_query_data(filters, filters_requirements)
+ query = {"name": filters['name']}
+ constant = self.get_object_by_id(self.COLLECTION, query,
+ [ObjectId], self.ID)
+ self.set_successful_response(resp, constant)
diff --git a/app/api/responders/resource/environment_configs.py b/app/api/responders/resource/environment_configs.py
new file mode 100644
index 0000000..bee6a4d
--- /dev/null
+++ b/app/api/responders/resource/environment_configs.py
@@ -0,0 +1,381 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+from api.validation import regex
+from api.validation.data_validate import DataValidate
+from api.responders.responder_base import ResponderBase
+from bson.objectid import ObjectId
+from datetime import datetime
+from utils.constants import EnvironmentFeatures
+from utils.inventory_mgr import InventoryMgr
+
+
+class EnvironmentConfigs(ResponderBase):
+ def __init__(self):
+ super(EnvironmentConfigs, self).__init__()
+ self.inv = InventoryMgr()
+ self.ID = "name"
+ self.PROJECTION = {
+ self.ID: True,
+ "_id": False,
+ "name": True,
+ "distribution": True
+ }
+ self.COLLECTION = "environments_config"
+ self.CONFIGURATIONS_NAMES = ["mysql", "OpenStack",
+ "CLI", "AMQP", "Monitoring",
+ "NFV_provider", "ACI"]
+ self.OPTIONAL_CONFIGURATIONS_NAMES = ["AMQP", "Monitoring",
+ "NFV_provider", "ACI"]
+
+ self.provision_types = self.\
+ get_constants_by_name("environment_provision_types")
+ self.env_types = self.get_constants_by_name("env_types")
+ self.monitoring_types = self.\
+ get_constants_by_name("environment_monitoring_types")
+ self.distributions = self.\
+ get_constants_by_name("distributions")
+ self.mechanism_drivers = self.\
+ get_constants_by_name("mechanism_drivers")
+ self.operational_values = self.\
+ get_constants_by_name("environment_operational_status")
+ self.type_drivers = self.\
+ get_constants_by_name("type_drivers")
+
+ self.CONFIGURATIONS_REQUIREMENTS = {
+ "mysql": {
+ "name": self.require(str, mandatory=True),
+ "host": self.require(str,
+ validate=DataValidate.REGEX,
+ requirement=[regex.IP, regex.HOSTNAME],
+ mandatory=True),
+ "password": self.require(str, mandatory=True),
+ "port": self.require(int,
+ True,
+ DataValidate.REGEX,
+ regex.PORT,
+ mandatory=True),
+ "user": self.require(str, mandatory=True)
+ },
+ "OpenStack": {
+ "name": self.require(str, mandatory=True),
+ "admin_token": self.require(str, mandatory=True),
+ "host": self.require(str,
+ validate=DataValidate.REGEX,
+ requirement=[regex.IP, regex.HOSTNAME],
+ mandatory=True),
+ "port": self.require(int,
+ True,
+ validate=DataValidate.REGEX,
+ requirement=regex.PORT,
+ mandatory=True),
+ "pwd": self.require(str, mandatory=True),
+ "user": self.require(str, mandatory=True)
+ },
+ "CLI": {
+ "name": self.require(str, mandatory=True),
+ "host": self.require(str,
+ validate=DataValidate.REGEX,
+ requirement=[regex.IP, regex.HOSTNAME],
+ mandatory=True),
+ "user": self.require(str, mandatory=True),
+ "pwd": self.require(str),
+ "key": self.require(str,
+ validate=DataValidate.REGEX,
+ requirement=regex.PATH)
+ },
+ "AMQP": {
+ "name": self.require(str, mandatory=True),
+ "host": self.require(str,
+ validate=DataValidate.REGEX,
+ requirement=[regex.IP, regex.HOSTNAME],
+ mandatory=True),
+ "password": self.require(str, mandatory=True),
+ "port": self.require(int,
+ True,
+ validate=DataValidate.REGEX,
+ requirement=regex.PORT,
+ mandatory=True),
+ "user": self.require(str, mandatory=True)
+ },
+ "Monitoring": {
+ "name": self.require(str, mandatory=True),
+ "config_folder": self.require(str,
+ validate=DataValidate.REGEX,
+ requirement=regex.PATH,
+ mandatory=True),
+ "provision": self.require(str,
+ validate=DataValidate.LIST,
+ requirement=self.provision_types,
+ mandatory=True),
+ "env_type": self.require(str,
+ validate=DataValidate.LIST,
+ requirement=self.env_types,
+ mandatory=True),
+ "api_port": self.require(int, True, mandatory=True),
+ "rabbitmq_pass": self.require(str, mandatory=True),
+ "rabbitmq_user": self.require(str, mandatory=True),
+ "rabbitmq_port": self.require(int,
+ True,
+ validate=DataValidate.REGEX,
+ requirement=regex.PORT,
+ mandatory=True),
+ "ssh_port": self.require(int,
+ True,
+ validate=DataValidate.REGEX,
+ requirement=regex.PORT),
+ "ssh_user": self.require(str),
+ "ssh_password": self.require(str),
+ "server_ip": self.require(str,
+ validate=DataValidate.REGEX,
+ requirement=[regex.IP, regex.HOSTNAME],
+ mandatory=True),
+ "server_name": self.require(str, mandatory=True),
+ "type": self.require(str,
+ validate=DataValidate.LIST,
+ requirement=self.monitoring_types,
+ mandatory=True)
+ },
+ "NFV_provider": {
+ "name": self.require(str, mandatory=True),
+ "host": self.require(str,
+ validate=DataValidate.REGEX,
+ requirement=[regex.IP, regex.HOSTNAME],
+ mandatory=True),
+ "nfv_token": self.require(str, mandatory=True),
+ "port": self.require(int,
+ True,
+ DataValidate.REGEX,
+ regex.PORT,
+ True),
+ "user": self.require(str, mandatory=True),
+ "pwd": self.require(str, mandatory=True)
+ },
+ "ACI": {
+ "name": self.require(str, mandatory=True),
+ "host": self.require(str,
+ validate=DataValidate.REGEX,
+ requirement=[regex.IP, regex.HOSTNAME],
+ mandatory=True),
+ "user": self.require(str, mandatory=True),
+ "pwd": self.require(str, mandatory=True)
+ }
+ }
+ self.AUTH_REQUIREMENTS = {
+ "view-env": self.require(list, mandatory=True),
+ "edit-env": self.require(list, mandatory=True)
+ }
+
+ def on_get(self, req, resp):
+ self.log.debug("Getting environment config")
+ filters = self.parse_query_params(req)
+
+ filters_requirements = {
+ "name": self.require(str),
+ "distribution": self.require(str, False,
+ DataValidate.LIST,
+ self.distributions),
+ "mechanism_drivers": self.require([str, list],
+ False,
+ DataValidate.LIST,
+ self.mechanism_drivers),
+ "type_drivers": self.require(str, False,
+ DataValidate.LIST,
+ self.type_drivers),
+ "user": self.require(str),
+ "listen": self.require(bool, True),
+ "scanned": self.require(bool, True),
+ "monitoring_setup_done": self.require(bool, True),
+ "operational": self.require(str, False,
+ DataValidate.LIST,
+ self.operational_values),
+ "page": self.require(int, True),
+ "page_size": self.require(int, True)
+ }
+
+ self.validate_query_data(filters, filters_requirements)
+ page, page_size = self.get_pagination(filters)
+
+ query = self.build_query(filters)
+
+ if self.ID in query:
+ environment_config = self.get_object_by_id(self.COLLECTION, query,
+ [ObjectId, datetime], self.ID)
+ self.set_successful_response(resp, environment_config)
+ else:
+ objects_ids = self.get_objects_list(self.COLLECTION, query,
+ page, page_size, self.PROJECTION)
+ self.set_successful_response(resp, {'environment_configs': objects_ids})
+
+ def build_query(self, filters):
+ query = {}
+ filters_keys = ["name", "distribution", "type_drivers", "user",
+ "listen", "monitoring_setup_done", "scanned",
+ "operational"]
+ self.update_query_with_filters(filters, filters_keys, query)
+ mechanism_drivers = filters.get("mechanism_drivers")
+ if mechanism_drivers:
+ if type(mechanism_drivers) != list:
+ mechanism_drivers = [mechanism_drivers]
+ query['mechanism_drivers'] = {'$all': mechanism_drivers}
+
+ return query
+
+ def on_post(self, req, resp):
+ self.log.debug("Creating a new environment config")
+
+ error, env_config = self.get_content_from_request(req)
+ if error:
+ self.bad_request(error)
+
+ environment_config_requirement = {
+ "app_path": self.require(str, mandatory=True),
+ "configuration": self.require(list, mandatory=True),
+ "distribution": self.require(str, False, DataValidate.LIST,
+ self.distributions, True),
+ "listen": self.require(bool, True, mandatory=True),
+ "user": self.require(str),
+ "mechanism_drivers": self.require(list, False, DataValidate.LIST,
+ self.mechanism_drivers, True),
+ "name": self.require(str, mandatory=True),
+ "operational": self.require(str, True, DataValidate.LIST,
+ self.operational_values, mandatory=True),
+ "scanned": self.require(bool, True),
+ "last_scanned": self.require(str),
+ "type": self.require(str, mandatory=True),
+ "type_drivers": self.require(str, False, DataValidate.LIST,
+ self.type_drivers, True),
+ "enable_monitoring": self.require(bool, True),
+ "monitoring_setup_done": self.require(bool, True),
+ "auth": self.require(dict)
+ }
+ self.validate_query_data(env_config,
+ environment_config_requirement,
+ can_be_empty_keys=["last_scanned"]
+ )
+ self.check_and_convert_datetime("last_scanned", env_config)
+ # validate the configurations
+ configurations = env_config['configuration']
+ config_validation = self.validate_environment_config(configurations)
+
+ if not config_validation['passed']:
+ self.bad_request(config_validation['error_message'])
+
+ err_msg = self.validate_env_config_with_supported_envs(env_config)
+ if err_msg:
+ self.bad_request(err_msg)
+
+ err_msg = self.validate_env_config_with_constraints(env_config)
+ if err_msg:
+ self.bad_request(err_msg)
+
+ if "auth" in env_config:
+ err_msg = self.validate_data(env_config.get("auth"),
+ self.AUTH_REQUIREMENTS)
+ if err_msg:
+ self.bad_request("auth error: " + err_msg)
+
+ if "scanned" not in env_config:
+ env_config["scanned"] = False
+
+ self.write(env_config, self.COLLECTION)
+ self.set_successful_response(resp,
+ {"message": "created environment_config "
+ "for {0}"
+ .format(env_config["name"])},
+ "201")
+
+ def validate_environment_config(self, configurations):
+ configurations_of_names = {}
+ validation = {"passed": True}
+ if [config for config in configurations
+ if 'name' not in config]:
+ validation['passed'] = False
+ validation['error_message'] = "configuration must have name"
+ return validation
+
+ unknown_configs = [config['name'] for config in configurations
+ if config['name'] not in self.CONFIGURATIONS_NAMES]
+ if unknown_configs:
+ validation['passed'] = False
+ validation['error_message'] = 'Unknown configurations: {0}'. \
+ format(' and '.join(unknown_configs))
+ return validation
+
+ for name in self.CONFIGURATIONS_NAMES:
+ configs = self.get_configuration_by_name(name, configurations)
+ if configs:
+ if len(configs) > 1:
+ validation["passed"] = False
+ validation["error_message"] = "environment configurations can " \
+ "only contain one " \
+ "configuration for {0}".format(name)
+ return validation
+ configurations_of_names[name] = configs[0]
+ else:
+ if name not in self.OPTIONAL_CONFIGURATIONS_NAMES:
+ validation["passed"] = False
+ validation['error_message'] = "configuration for {0} " \
+ "is mandatory".format(name)
+ return validation
+
+ for name, config in configurations_of_names.items():
+ error_message = self.validate_configuration(name, config)
+ if error_message:
+ validation['passed'] = False
+ validation['error_message'] = "{0} error: {1}".\
+ format(name, error_message)
+ break
+ if name is 'CLI':
+ if 'key' not in config and 'pwd' not in config:
+ validation['passed'] = False
+ validation['error_message'] = 'CLI error: either key ' \
+ 'or pwd must be provided'
+ return validation
+
+ def validate_env_config_with_supported_envs(self, env_config):
+ # validate the environment config with supported environments
+ matches = {
+ 'environment.distribution': env_config['distribution'],
+ 'environment.type_drivers': env_config['type_drivers'],
+ 'environment.mechanism_drivers': {'$in': env_config['mechanism_drivers']}
+ }
+
+ err_prefix = 'configuration not accepted: '
+ if not self.inv.is_feature_supported_in_env(matches,
+ EnvironmentFeatures.SCANNING):
+ return err_prefix + 'scanning is not supported in this environment'
+
+ configs = env_config['configuration']
+ if not self.inv.is_feature_supported_in_env(matches,
+ EnvironmentFeatures.MONITORING) \
+ and self.get_configuration_by_name('Monitoring', configs):
+ return err_prefix + 'monitoring is not supported in this environment, ' \
+ 'please remove the Monitoring configuration'
+
+ if not self.inv.is_feature_supported_in_env(matches,
+ EnvironmentFeatures.LISTENING) \
+ and self.get_configuration_by_name('AMQP', configs):
+ return err_prefix + 'listening is not supported in this environment, ' \
+ 'please remove the AMQP configuration'
+
+ return None
+
+ def validate_env_config_with_constraints(self, env_config):
+ if env_config['listen'] and \
+ not self.get_configuration_by_name('AMQP', env_config['configuration']):
+ return 'configuration not accepted: ' \
+ 'must provide AMQP configuration to listen the environment'
+
+ def get_configuration_by_name(self, name, configurations):
+ return [config for config in configurations if config['name'] == name]
+
+ def validate_configuration(self, name, configuration):
+ return self.validate_data(configuration,
+ self.CONFIGURATIONS_REQUIREMENTS[name])
diff --git a/app/api/responders/resource/inventory.py b/app/api/responders/resource/inventory.py
new file mode 100644
index 0000000..02bc486
--- /dev/null
+++ b/app/api/responders/resource/inventory.py
@@ -0,0 +1,65 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+from api.responders.responder_base import ResponderBase
+from bson.objectid import ObjectId
+from datetime import datetime
+
+
+class Inventory(ResponderBase):
+ def __init__(self):
+ super().__init__()
+ self.COLLECTION = 'inventory'
+ self.ID = 'id'
+ self.PROJECTION = {
+ self.ID: True,
+ "name": True,
+ "name_path": True
+ }
+
+ def on_get(self, req, resp):
+ self.log.debug("Getting objects from inventory")
+
+ filters = self.parse_query_params(req)
+ filters_requirements = {
+ 'env_name': self.require(str, mandatory=True),
+ 'id': self.require(str),
+ 'id_path': self.require(str),
+ 'parent_id': self.require(str),
+ 'parent_path': self.require(str),
+ 'sub_tree': self.require(bool, True),
+ 'page': self.require(int, True),
+ 'page_size': self.require(int, True)
+ }
+ self.validate_query_data(filters, filters_requirements)
+ page, page_size = self.get_pagination(filters)
+ query = self.build_query(filters)
+ if self.ID in query:
+ obj = self.get_object_by_id(self.COLLECTION, query,
+ [ObjectId, datetime], self.ID)
+ self.set_successful_response(resp, obj)
+ else:
+ objects_ids = self.get_objects_list(self.COLLECTION, query,
+ page, page_size, self.PROJECTION)
+ self.set_successful_response(resp, {"objects": objects_ids})
+
+ def build_query(self, filters):
+ query = {}
+ filters_keys = ['parent_id', 'id_path', 'id']
+ self.update_query_with_filters(filters, filters_keys, query)
+ parent_path = filters.get('parent_path')
+ if parent_path:
+ regular_expression = parent_path
+ if filters.get('sub_tree', False):
+ regular_expression += "[/]?"
+ else:
+ regular_expression += "/[^/]+$"
+ query['id_path'] = {"$regex": regular_expression}
+ query['environment'] = filters['env_name']
+ return query
diff --git a/app/api/responders/resource/links.py b/app/api/responders/resource/links.py
new file mode 100644
index 0000000..33fd432
--- /dev/null
+++ b/app/api/responders/resource/links.py
@@ -0,0 +1,76 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+from api.responders.responder_base import ResponderBase
+from api.validation.data_validate import DataValidate
+from bson.objectid import ObjectId
+
+
+class Links(ResponderBase):
+ def __init__(self):
+ super().__init__()
+ self.COLLECTION = 'links'
+ self.ID = '_id'
+ self.PROJECTION = {
+ self.ID: True,
+ "link_name": True,
+ "link_type": True,
+ "environment": True,
+ "host": True
+ }
+
+ def on_get(self, req, resp):
+ self.log.debug("Getting links from links")
+
+ filters = self.parse_query_params(req)
+
+ link_types = self.get_constants_by_name("link_types")
+ link_states = self.get_constants_by_name("link_states")
+ filters_requirements = {
+ 'env_name': self.require(str, mandatory=True),
+ 'id': self.require(ObjectId, True),
+ 'host': self.require(str),
+ 'link_type': self.require(str, validate=DataValidate.LIST,
+ requirement=link_types),
+ 'link_name': self.require(str),
+ 'source_id': self.require(str),
+ 'target_id': self.require(str),
+ 'state': self.require(str, validate=DataValidate.LIST,
+ requirement=link_states),
+ 'page': self.require(int, True),
+ 'page_size': self.require(int, True)
+ }
+
+ self.validate_query_data(filters, filters_requirements, r'^attributes\:\w+$')
+ filters = self.change_dict_naming_convention(filters, self.replace_colon_with_dot)
+ page, page_size = self.get_pagination(filters)
+ query = self.build_query(filters)
+ if self.ID in query:
+ link = self.get_object_by_id(self.COLLECTION, query,
+ [ObjectId], self.ID)
+ self.set_successful_response(resp, link)
+ else:
+ links_ids = self.get_objects_list(self.COLLECTION, query,
+ page, page_size, self.PROJECTION)
+ self.set_successful_response(resp, {"links": links_ids})
+
+ def build_query(self, filters):
+ query = {}
+ filters_keys = ['host', 'link_type', 'link_name',
+ 'source_id', 'target_id', 'state']
+ self.update_query_with_filters(filters, filters_keys, query)
+ # add attributes to the query
+ for key in filters.keys():
+ if key.startswith("attributes."):
+ query[key] = filters[key]
+ _id = filters.get('id')
+ if _id:
+ query[self.ID] = _id
+ query['environment'] = filters['env_name']
+ return query
diff --git a/app/api/responders/resource/messages.py b/app/api/responders/resource/messages.py
new file mode 100644
index 0000000..0dda31b
--- /dev/null
+++ b/app/api/responders/resource/messages.py
@@ -0,0 +1,78 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+from datetime import datetime
+
+from api.responders.responder_base import ResponderBase
+from api.validation.data_validate import DataValidate
+from bson.objectid import ObjectId
+
+
+class Messages(ResponderBase):
+ def __init__(self):
+ super().__init__()
+ self.ID = "id"
+ self.COLLECTION = 'messages'
+ self.PROJECTION = {
+ self.ID: True,
+ "environment": True,
+ "source_system": True,
+ "level": True
+ }
+
+ def on_get(self, req, resp):
+ self.log.debug("Getting messages from messages")
+ filters = self.parse_query_params(req)
+ messages_severity = self.get_constants_by_name("messages_severity")
+ object_types = self.get_constants_by_name("object_types")
+ filters_requirements = {
+ 'env_name': self.require(str, mandatory=True),
+ 'source_system': self.require(str),
+ 'id': self.require(str),
+ 'level': self.require(str, validate=DataValidate.LIST,
+ requirement=messages_severity),
+ 'related_object': self.require(str),
+ 'related_object_type': self.require(str, validate=DataValidate.LIST,
+ requirement=object_types),
+ 'start_time': self.require(str),
+ 'end_time': self.require(str),
+ 'page': self.require(int, True),
+ 'page_size': self.require(int, True)
+ }
+ self.validate_query_data(filters, filters_requirements)
+ page, page_size = self.get_pagination(filters)
+ self.check_and_convert_datetime('start_time', filters)
+ self.check_and_convert_datetime('end_time', filters)
+
+ query = self.build_query(filters)
+ if self.ID in query:
+ message = self.get_object_by_id(self.COLLECTION, query,
+ [ObjectId, datetime], self.ID)
+ self.set_successful_response(resp, message)
+ else:
+ objects_ids = self.get_objects_list(self.COLLECTION, query,
+ page, page_size, self.PROJECTION)
+ self.set_successful_response(resp, {'messages': objects_ids})
+
+ def build_query(self, filters):
+ query = {}
+ filters_keys = ['source_system', 'id', 'level', 'related_object',
+ 'related_object_type']
+ self.update_query_with_filters(filters, filters_keys, query)
+ start_time = filters.get('start_time')
+ if start_time:
+ query['timestamp'] = {"$gte": start_time}
+ end_time = filters.get('end_time')
+ if end_time:
+ if 'timestamp' in query:
+ query['timestamp'].update({"$lte": end_time})
+ else:
+ query['timestamp'] = {"$lte": end_time}
+ query['environment'] = filters['env_name']
+ return query
diff --git a/app/api/responders/resource/monitoring_config_templates.py b/app/api/responders/resource/monitoring_config_templates.py
new file mode 100644
index 0000000..42d3973
--- /dev/null
+++ b/app/api/responders/resource/monitoring_config_templates.py
@@ -0,0 +1,65 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+from api.validation.data_validate import DataValidate
+from api.responders.responder_base import ResponderBase
+from bson.objectid import ObjectId
+
+
+class MonitoringConfigTemplates(ResponderBase):
+ def __init__(self):
+ super().__init__()
+ self.ID = "_id"
+ self.COLLECTION = "monitoring_config_templates"
+ self.PROJECTION = {
+ self.ID: True,
+ "side": True,
+ "type": True
+ }
+
+ def on_get(self, req, resp):
+ self.log.debug("Getting monitoring config template")
+
+ filters = self.parse_query_params(req)
+
+ sides = self.get_constants_by_name("monitoring_sides")
+ filters_requirements = {
+ "id": self.require(ObjectId, True),
+ "order": self.require(int, True),
+ "side": self.require(str, validate=DataValidate.LIST,
+ requirement=sides),
+ "type": self.require(str),
+ "page": self.require(int, True),
+ "page_size": self.require(int, True)
+ }
+
+ self.validate_query_data(filters, filters_requirements)
+
+ page, page_size = self.get_pagination(filters)
+ query = self.build_query(filters)
+ if self.ID in query:
+ template = self.get_object_by_id(self.COLLECTION, query,
+ [ObjectId], self.ID)
+ self.set_successful_response(resp, template)
+ else:
+ templates = self.get_objects_list(self.COLLECTION, query,
+ page, page_size, self.PROJECTION)
+ self.set_successful_response(
+ resp,
+ {"monitoring_config_templates": templates}
+ )
+
+ def build_query(self, filters):
+ query = {}
+ filters_keys = ["order", "side", "type"]
+ self.update_query_with_filters(filters, filters_keys, query)
+ _id = filters.get('id')
+ if _id:
+ query[self.ID] = _id
+ return query
diff --git a/app/api/responders/resource/scans.py b/app/api/responders/resource/scans.py
new file mode 100644
index 0000000..c9ad2e2
--- /dev/null
+++ b/app/api/responders/resource/scans.py
@@ -0,0 +1,111 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+from api.validation.data_validate import DataValidate
+from api.responders.responder_base import ResponderBase
+from bson.objectid import ObjectId
+from datetime import datetime
+
+
+class Scans(ResponderBase):
+ def __init__(self):
+ super().__init__()
+ self.COLLECTION = "scans"
+ self.ID = "_id"
+ self.PROJECTION = {
+ self.ID: True,
+ "environment": True,
+ "status": True,
+ "scan_completed": True
+ }
+
+ def on_get(self, req, resp):
+ self.log.debug("Getting scans")
+ filters = self.parse_query_params(req)
+
+ scan_statuses = self.get_constants_by_name("scan_statuses")
+ filters_requirements = {
+ "env_name": self.require(str, mandatory=True),
+ "id": self.require(ObjectId, True),
+ "base_object": self.require(str),
+ "status": self.require(str, False, DataValidate.LIST, scan_statuses),
+ "page": self.require(int, True),
+ "page_size": self.require(int, True)
+ }
+
+ self.validate_query_data(filters, filters_requirements)
+ page, page_size = self.get_pagination(filters)
+
+ query = self.build_query(filters)
+ if "_id" in query:
+ scan = self.get_object_by_id(self.COLLECTION, query,
+ [ObjectId, datetime], self.ID)
+ self.set_successful_response(resp, scan)
+ else:
+ scans_ids = self.get_objects_list(self.COLLECTION, query,
+ page, page_size, self.PROJECTION)
+ self.set_successful_response(resp, {"scans": scans_ids})
+
+ def on_post(self, req, resp):
+ self.log.debug("Posting new scan")
+ error, scan = self.get_content_from_request(req)
+ if error:
+ self.bad_request(error)
+
+ scan_statuses = self.get_constants_by_name("scan_statuses")
+ log_levels = self.get_constants_by_name("log_levels")
+
+ scan_requirements = {
+ "status": self.require(str,
+ validate=DataValidate.LIST,
+ requirement=scan_statuses,
+ mandatory=True),
+ "log_level": self.require(str,
+ validate=DataValidate.LIST,
+ requirement=log_levels),
+ "clear": self.require(bool, True),
+ "scan_only_inventory": self.require(bool, True),
+ "scan_only_links": self.require(bool, True),
+ "scan_only_cliques": self.require(bool, True),
+ "environment": self.require(str, mandatory=True),
+ "inventory": self.require(str),
+ "object_id": self.require(str)
+ }
+ self.validate_query_data(scan, scan_requirements)
+ scan_only_keys = [k for k in scan if k.startswith("scan_only_")]
+ if len(scan_only_keys) > 1:
+ self.bad_request("multiple scan_only_* flags found: {0}. "
+ "only one of them can be set."
+ .format(", ".join(scan_only_keys)))
+
+ env_name = scan["environment"]
+ if not self.check_environment_name(env_name):
+ self.bad_request("unkown environment: " + env_name)
+
+ scan["scan_completed"] = False
+ scan["submit_timestamp"] = datetime.now()
+ self.write(scan, self.COLLECTION)
+ self.set_successful_response(resp,
+ {"message": "created a new scan for "
+ "environment {0}"
+ .format(env_name)},
+ "201")
+
+ def build_query(self, filters):
+ query = {}
+ filters_keys = ["status"]
+ self.update_query_with_filters(filters, filters_keys, query)
+ base_object = filters.get("base_object")
+ if base_object:
+ query['object_id'] = base_object
+ _id = filters.get("id")
+ if _id:
+ query['_id'] = _id
+ query['environment'] = filters['env_name']
+ return query
diff --git a/app/api/responders/resource/scheduled_scans.py b/app/api/responders/resource/scheduled_scans.py
new file mode 100644
index 0000000..0588cd0
--- /dev/null
+++ b/app/api/responders/resource/scheduled_scans.py
@@ -0,0 +1,113 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+from api.validation.data_validate import DataValidate
+from api.responders.responder_base import ResponderBase
+from bson.objectid import ObjectId
+from datetime import datetime
+
+
+class ScheduledScans(ResponderBase):
+ def __init__(self):
+ super().__init__()
+ self.COLLECTION = "scheduled_scans"
+ self.ID = "_id"
+ self.PROJECTION = {
+ self.ID: True,
+ "environment": True,
+ "scheduled_timestamp": True,
+ "freq": True
+ }
+ self.SCAN_FREQ = [
+ "YEARLY",
+ "MONTHLY",
+ "WEEKLY",
+ "DAILY",
+ "HOURLY"
+ ]
+
+ def on_get(self, req, resp):
+ self.log.debug("Getting scheduled scans")
+ filters = self.parse_query_params(req)
+
+ filters_requirements = {
+ "environment": self.require(str, mandatory=True),
+ "id": self.require(ObjectId, True),
+ "freq": self.require(str, False,
+ DataValidate.LIST, self.SCAN_FREQ),
+ "page": self.require(int, True),
+ "page_size": self.require(int, True)
+ }
+
+ self.validate_query_data(filters, filters_requirements)
+ page, page_size = self.get_pagination(filters)
+
+ query = self.build_query(filters)
+ if self.ID in query:
+ scheduled_scan = self.get_object_by_id(self.COLLECTION, query,
+ [ObjectId, datetime],
+ self.ID)
+ self.set_successful_response(resp, scheduled_scan)
+ else:
+ scheduled_scan_ids = self.get_objects_list(self.COLLECTION, query,
+ page, page_size,
+ self.PROJECTION,
+ [datetime])
+ self.set_successful_response(resp,
+ {"scheduled_scans": scheduled_scan_ids})
+
+ def on_post(self, req, resp):
+ self.log.debug("Posting new scheduled scan")
+ error, scheduled_scan = self.get_content_from_request(req)
+ if error:
+ self.bad_request(error)
+
+ log_levels = self.get_constants_by_name("log_levels")
+ scheduled_scan_requirements = {
+ "environment": self.require(str, mandatory=True),
+ "scan_only_links": self.require(bool, True),
+ "scan_only_cliques": self.require(bool, True),
+ "scan_only_inventory": self.require(bool, True),
+ "freq": self.require(str, validate=DataValidate.LIST,
+ requirement=self.SCAN_FREQ,
+ mandatory=True),
+ "log_level": self.require(str,
+ validate=DataValidate.LIST,
+ requirement=log_levels),
+ "clear": self.require(bool, True),
+ "submit_timestamp": self.require(str, mandatory=True)
+ }
+ self.validate_query_data(scheduled_scan, scheduled_scan_requirements)
+ self.check_and_convert_datetime("submit_timestamp", scheduled_scan)
+ scan_only_keys = [k for k in scheduled_scan if k.startswith("scan_only_")]
+ if len(scan_only_keys) > 1:
+ self.bad_request("multiple scan_only_* flags found: {0}. "
+ "only one of them can be set."
+ .format(", ".join(scan_only_keys)))
+
+ env_name = scheduled_scan["environment"]
+ if not self.check_environment_name(env_name):
+ self.bad_request("unkown environment: " + env_name)
+
+ self.write(scheduled_scan, self.COLLECTION)
+ self.set_successful_response(resp,
+ {"message": "created a new scheduled scan for "
+ "environment {0}"
+ .format(env_name)},
+ "201")
+
+ def build_query(self, filters):
+ query = {}
+ filters_keys = ["freq", "environment"]
+ self.update_query_with_filters(filters, filters_keys, query)
+
+ _id = filters.get("id")
+ if _id:
+ query["_id"] = _id
+ return query
diff --git a/app/api/responders/responder_base.py b/app/api/responders/responder_base.py
new file mode 100644
index 0000000..479a897
--- /dev/null
+++ b/app/api/responders/responder_base.py
@@ -0,0 +1,223 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+import json
+import re
+from urllib import parse
+
+from dateutil import parser
+from pymongo import errors
+
+from api.exceptions import exceptions
+from api.validation.data_validate import DataValidate
+from utils.dict_naming_converter import DictNamingConverter
+from utils.inventory_mgr import InventoryMgr
+from utils.logging.full_logger import FullLogger
+from utils.string_utils import jsonify, stringify_object_values_by_types
+
+
+class ResponderBase(DataValidate, DictNamingConverter):
+ UNCHANGED_COLLECTIONS = ["monitoring_config_templates",
+ "environments_config",
+ "messages",
+ "scheduled_scans"]
+
+ def __init__(self):
+ super().__init__()
+ self.log = FullLogger()
+ self.inv = InventoryMgr()
+
+ def set_successful_response(self, resp, body="", status="200"):
+ if not isinstance(body, str):
+ try:
+ body = jsonify(body)
+ except Exception as e:
+ self.log.exception(e)
+ raise ValueError("The response body should be a string")
+ resp.status = status
+ resp.body = body
+
+ def set_error_response(self, title="", code="", message="", body=""):
+ if body:
+ raise exceptions.CalipsoApiException(code, body, message)
+ body = {
+ "error": {
+ "message": message,
+ "code": code,
+ "title": title
+ }
+ }
+ body = jsonify(body)
+ raise exceptions.CalipsoApiException(code, body, message)
+
+ def not_found(self, message="Requested resource not found"):
+ self.set_error_response("Not Found", "404", message)
+
+ def conflict(self,
+ message="The posted data conflicts with the existing data"):
+ self.set_error_response("Conflict", "409", message)
+
+ def bad_request(self, message="Invalid request content"):
+ self.set_error_response("Bad Request", "400", message)
+
+ def unauthorized(self, message="Request requires authorization"):
+ self.set_error_response("Unauthorized", "401", message)
+
+ def validate_query_data(self, data, data_requirements,
+ additional_key_reg=None,
+ can_be_empty_keys=[]):
+ error_message = self.validate_data(data, data_requirements,
+ additional_key_reg,
+ can_be_empty_keys)
+ if error_message:
+ self.bad_request(error_message)
+
+ def check_and_convert_datetime(self, time_key, data):
+ time = data.get(time_key)
+
+ if time:
+ time = time.replace(' ', '+')
+ try:
+ data[time_key] = parser.parse(time)
+ except Exception:
+ self.bad_request("{0} must follow ISO 8610 date and time format,"
+ "YYYY-MM-DDThh:mm:ss.sss+hhmm".format(time_key))
+
+ def check_environment_name(self, env_name):
+ query = {"name": env_name}
+ objects = self.read("environments_config", query)
+ if not objects:
+ return False
+ return True
+
+ def get_object_by_id(self, collection, query, stringify_types, id):
+ objs = self.read(collection, query)
+ if not objs:
+ env_name = query.get("environment")
+ if env_name and \
+ not self.check_environment_name(env_name):
+ self.bad_request("unkown environment: " + env_name)
+ self.not_found()
+ obj = objs[0]
+ stringify_object_values_by_types(obj, stringify_types)
+ if id is "_id":
+ obj['id'] = obj.get('_id')
+ return obj
+
+ def get_objects_list(self, collection, query, page, page_size,
+ projection, stringify_types=None):
+ objects = self.read(collection, query, projection, page, page_size)
+ if not objects:
+ env_name = query.get("environment")
+ if env_name and \
+ not self.check_environment_name(env_name):
+ self.bad_request("unkown environment: " + env_name)
+ self.not_found()
+ for obj in objects:
+ if "id" not in obj and "_id" in obj:
+ obj["id"] = str(obj["_id"])
+ if "_id" in obj:
+ del obj["_id"]
+ if stringify_types:
+ stringify_object_values_by_types(objects, stringify_types)
+
+ return objects
+
+ def parse_query_params(self, req):
+ query_string = req.query_string
+ if not query_string:
+ return {}
+ try:
+ query_params = dict((k, v if len(v) > 1 else v[0])
+ for k, v in
+ parse.parse_qs(query_string,
+ keep_blank_values=True,
+ strict_parsing=True).items())
+ return query_params
+ except ValueError as e:
+ self.bad_request(str("Invalid query string: {0}".format(str(e))))
+
+ def replace_colon_with_dot(self, s):
+ return s.replace(':', '.')
+
+ def get_pagination(self, filters):
+ page_size = filters.get('page_size', 1000)
+ page = filters.get('page', 0)
+ return page, page_size
+
+ def update_query_with_filters(self, filters, filters_keys, query):
+ for filter_key in filters_keys:
+ filter = filters.get(filter_key)
+ if filter is not None:
+ query.update({filter_key: filter})
+
+ def get_content_from_request(self, req):
+ error = ""
+ content = ""
+ if not req.content_length:
+ error = "No data found in the request body"
+ return error, content
+
+ data = req.stream.read()
+ content_string = data.decode()
+ try:
+ content = json.loads(content_string)
+ if not isinstance(content, dict):
+ error = "The data in the request body must be an object"
+ except Exception:
+ error = "The request can not be fulfilled due to bad syntax"
+
+ return error, content
+
+ def get_collection_by_name(self, name):
+ if name in self.UNCHANGED_COLLECTIONS:
+ return self.inv.db[name]
+ return self.inv.collections[name]
+
+ def get_constants_by_name(self, name):
+ constants = self.get_collection_by_name("constants").\
+ find_one({"name": name})
+ # consts = [d['value'] for d in constants['data']]
+ consts = []
+ if not constants:
+ self.log.error('constant type: ' + name +
+ 'no constants exists')
+ return consts
+ for d in constants['data']:
+ try:
+ consts.append(d['value'])
+ except KeyError:
+ self.log.error('constant type: ' + name +
+ ': no "value" key for data: ' + str(d))
+ return consts
+
+ def read(self, collection, matches={}, projection=None, skip=0, limit=1000):
+ collection = self.get_collection_by_name(collection)
+ skip *= limit
+ query = collection.find(matches, projection).skip(skip).limit(limit)
+ return list(query)
+
+ def write(self, document, collection="inventory"):
+ try:
+ self.get_collection_by_name(collection).\
+ insert_one(document)
+ except errors.DuplicateKeyError as e:
+ self.conflict("The key value ({0}) already exists".
+ format(', '.
+ join(self.get_duplicate_key_values(e.details['errmsg']))))
+ except errors.WriteError as e:
+ self.bad_request('Failed to create resource for {0}'.format(str(e)))
+
+ def get_duplicate_key_values(self, err_msg):
+ return ["'{0}'".format(key) for key in re.findall(r'"([^",]+)"', err_msg)]
+
+ def aggregate(self, pipeline, collection):
+ collection = self.get_collection_by_name(collection)
+ data = collection.aggregate(pipeline)
+ return list(data)
diff --git a/app/api/server.py b/app/api/server.py
new file mode 100755
index 0000000..3fef46e
--- /dev/null
+++ b/app/api/server.py
@@ -0,0 +1,74 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+import argparse
+
+from gunicorn.app.base import BaseApplication
+from gunicorn.six import iteritems
+
+from api.app import App
+
+
+# This class is used to integrate Gunicorn with falcon application
+class StandaloneApplication(BaseApplication):
+ def __init__(self, app, options=None):
+ self.options = options
+ self.application = app
+ super().__init__()
+
+ def load_config(self):
+ config = dict([(key, value) for key, value in iteritems(self.options)
+ if key in self.cfg.settings and value is not None])
+ for key, value in iteritems(config):
+ self.cfg.set(key.lower(), value)
+
+ def load(self):
+ return self.application
+
+
+def get_args():
+ parser = argparse.ArgumentParser(description="Parameters for Calipso API")
+ parser.add_argument("-m", "--mongo_config", nargs="?", type=str,
+ default="",
+ help="name of config file with mongo access "
+ "details")
+ parser.add_argument("--ldap_config", nargs="?", type=str,
+ default="",
+ help="name of the config file with ldap server "
+ "config details")
+ parser.add_argument("-l", "--loglevel", nargs="?", type=str,
+ default="INFO",
+ help="logging level \n(default: 'INFO')")
+ parser.add_argument("-b", "--bind", nargs="?", type=str,
+ default="127.0.0.1:8000",
+ help="binding address of the API server\n"
+ "(default 127.0.0.1:8000)")
+ parser.add_argument("-y", "--inventory", nargs="?", type=str,
+ default="inventory",
+ help="name of inventory collection \n" +
+ "(default: 'inventory')")
+ parser.add_argument("-t", "--token-lifetime", nargs="?", type=int,
+ default=86400,
+ help="lifetime of the token")
+ args = parser.parse_args()
+ return args
+
+
+if __name__ == "__main__":
+ args = get_args()
+ # Gunicorn configuration
+ options = {
+ "bind": args.bind
+ }
+ app = App(args.mongo_config,
+ args.ldap_config,
+ args.loglevel,
+ args.inventory,
+ args.token_lifetime).get_app()
+ StandaloneApplication(app, options).run()
diff --git a/app/api/validation/__init__.py b/app/api/validation/__init__.py
new file mode 100644
index 0000000..1e85a2a
--- /dev/null
+++ b/app/api/validation/__init__.py
@@ -0,0 +1,10 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+
diff --git a/app/api/validation/data_validate.py b/app/api/validation/data_validate.py
new file mode 100644
index 0000000..6928c4b
--- /dev/null
+++ b/app/api/validation/data_validate.py
@@ -0,0 +1,185 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+import re
+
+
+from api.validation import regex
+
+
+class DataValidate:
+ LIST = "list"
+ REGEX = "regex"
+
+ def __init__(self):
+ super().__init__()
+ self.BOOL_CONVERSION = {
+ "true": True,
+ "1": True,
+ 1: True,
+ "false": False,
+ "0": False,
+ 0: False
+ }
+ self.TYPES_CUSTOMIZED_NAMES = {
+ 'str': 'string',
+ 'bool': 'boolean',
+ 'int': 'integer',
+ 'ObjectId': 'MongoDB ObjectId'
+ }
+ self.VALIDATE_SWITCHER = {
+ self.LIST: self.validate_value_in_list,
+ self.REGEX: regex.validate
+ }
+
+ def validate_type(self, obj, t, convert_to_type):
+ if convert_to_type:
+ # user may input different values for the
+ # boolean True or False, convert the number or
+ # the string to corresponding python bool values
+ if t == bool:
+ if isinstance(obj, str):
+ obj = obj.lower()
+ if obj in self.BOOL_CONVERSION:
+ return self.BOOL_CONVERSION[obj]
+ return None
+ try:
+ obj = t(obj)
+ except Exception:
+ return None
+ return obj
+ else:
+ return obj if isinstance(obj, t) else None
+
+ # get the requirement for validation
+ # this requirement object will be used in validate_data method
+ @staticmethod
+ def require(types, convert_to_type=False, validate=None,
+ requirement=None, mandatory=False, error_messages=None):
+ if error_messages is None:
+ error_messages = {}
+ return {
+ "types": types,
+ "convert_to_type": convert_to_type,
+ "validate": validate,
+ "requirement": requirement,
+ "mandatory": mandatory,
+ "error_messages": error_messages
+ }
+
+ def validate_data(self, data, requirements,
+ additional_key_re=None,
+ can_be_empty_keys=[]):
+
+ illegal_keys = [key for key in data.keys()
+ if key not in requirements.keys()]
+
+ if additional_key_re:
+ illegal_keys = [key for key in illegal_keys
+ if not re.match(additional_key_re, key)]
+
+ if illegal_keys:
+ return 'Invalid key(s): {0}'.format(' and '.join(illegal_keys))
+
+ for key, requirement in requirements.items():
+ value = data.get(key)
+ error_messages = requirement['error_messages']
+
+ if not value and value is not False and value is not 0:
+ if key in data and key not in can_be_empty_keys:
+ return "Invalid data: value of {0} key doesn't exist ".format(key)
+ # check if the key is mandatory
+ mandatory_error = error_messages.get('mandatory')
+ error_message = self.mandatory_check(key,
+ requirement['mandatory'],
+ mandatory_error)
+ if error_message:
+ return error_message
+ continue
+
+ # check the required types
+ error_message = self.types_check(requirement["types"],
+ requirement["convert_to_type"],
+ key,
+ value, data,
+ error_messages.get('types'))
+ if error_message:
+ return error_message
+
+ # after the types check, the value of the key may be changed
+ # get the value again
+ value = data[key]
+ validate = requirement.get('validate')
+ if not validate:
+ continue
+ requirement_value = requirement.get('requirement')
+ # validate the data against the requirement
+ req_error = error_messages.get("requirement")
+ error_message = self.requirement_check(key, value, validate,
+ requirement_value,
+ req_error)
+ if error_message:
+ return error_message
+ return None
+
+ @staticmethod
+ def mandatory_check(key, mandatory, error_message):
+ if mandatory:
+ return error_message if error_message \
+ else "{} must be specified".format(key)
+ return None
+
+ def types_check(self, requirement_types, convert_to_type, key,
+ value, data, error_message):
+ if not isinstance(requirement_types, list):
+ requirement_types = [requirement_types]
+ for requirement_type in requirement_types:
+ converted_val = self.validate_type(
+ value, requirement_type, convert_to_type
+ )
+ if converted_val is not None:
+ if convert_to_type:
+ # value has been converted, update the data
+ data[key] = converted_val
+ return None
+ required_types = self.get_type_names(requirement_types)
+ return error_message if error_message else \
+ "{0} must be {1}".format(key, " or ".join(required_types))
+
+ def requirement_check(self, key, value, validate,
+ requirement, error_message):
+ return self.VALIDATE_SWITCHER[validate](key, value, requirement,
+ error_message)
+
+ @staticmethod
+ def validate_value_in_list(key, value,
+ required_list, error_message):
+ if not isinstance(value, list):
+ value = [value]
+
+ if [v for v in value if v not in required_list]:
+ return error_message if error_message else\
+ "The possible value of {0} is {1}".\
+ format(key, " or ".join(required_list))
+ return None
+
+ # get customized type names from type names array
+ def get_type_names(self, types):
+ return [self.get_type_name(t) for t in types]
+
+ # get customized type name from string <class 'type'>
+ def get_type_name(self, t):
+ t = str(t)
+ a = t.split(" ")[1]
+ type_name = a.rstrip(">").strip("'")
+ # strip the former module names
+ type_name = type_name.split('.')[-1]
+ if type_name in self.TYPES_CUSTOMIZED_NAMES.keys():
+ type_name = self.TYPES_CUSTOMIZED_NAMES[type_name]
+ return type_name
diff --git a/app/api/validation/regex.py b/app/api/validation/regex.py
new file mode 100644
index 0000000..2684636
--- /dev/null
+++ b/app/api/validation/regex.py
@@ -0,0 +1,57 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+import re
+
+PORT = "port number"
+IP = "ipv4/ipv6 address"
+HOSTNAME = "host name"
+PATH = "path"
+
+_PORT_REGEX = re.compile('^0*(?:6553[0-5]|655[0-2][0-9]|65[0-4][0-9]{2}|'
+ '6[0-4][0-9]{3}|[1-5][0-9]{4}|[1-9][0-9]{1,3}|[0-9])$')
+
+_HOSTNAME_REGEX = re.compile('^([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9\-]{0,61}[a-zA-Z0-9])'
+ '(\.([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9\-]{0,61}[a-zA-Z0-9]))*$')
+
+_PATH_REGEX = re.compile('^(\/){1}([^\/\0]+(\/)?)+$')
+
+_IPV4_REGEX = re.compile('^(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)(\.(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)){3}$')
+_IPV6_REGEX = re.compile('^(((?=.*(::))(?!.*\3.+\3))\3?|[\dA-F]{1,4}:)([\dA-F]{1,4}(\3|:\b)|\2){5}(([\dA-F]{1,4}'
+ '(\3|:\b|$)|\2){2}|(((2[0-4]|1\d|[1-9])?\d|25[0-5])\.?\b){4})$')
+
+_REGEX_MAP = {
+ PORT: _PORT_REGEX,
+ HOSTNAME: _HOSTNAME_REGEX,
+ PATH: _PATH_REGEX,
+ IP: [_IPV4_REGEX, _IPV6_REGEX]
+}
+
+
+def validate(key, value, regex_names, error_message=None):
+ if not isinstance(regex_names, list):
+ regex_names = [regex_names]
+
+ for regex_name in regex_names:
+ regexes = _REGEX_MAP[regex_name]
+
+ if not isinstance(regexes, list):
+ regexes = [regexes]
+
+ try:
+ value = str(value)
+ match_regexes = [regex for regex in regexes
+ if regex.match(value)]
+ if match_regexes:
+ return None
+ except:
+ pass
+
+ return error_message if error_message else \
+ '{0} must be a valid {1}'.format(key, " or ".join(regex_names))
diff --git a/app/config/events.json b/app/config/events.json
new file mode 100644
index 0000000..c067754
--- /dev/null
+++ b/app/config/events.json
@@ -0,0 +1,58 @@
+{
+ "handlers_package": "discover.events",
+ "queues": [
+ {
+ "queue": "notifications.nova",
+ "exchange": "nova"
+ },
+ {
+ "queue": "notifications.neutron",
+ "exchange": "neutron"
+ },
+ {
+ "queue": "notifications.neutron",
+ "exchange": "dhcp_agent"
+ },
+ {
+ "queue": "notifications.info",
+ "exchange": "info"
+ }
+ ],
+ "event_handlers": {
+ "compute.instance.create.end": "EventInstanceAdd",
+ "compute.instance.rebuild.end": "EventInstanceAdd",
+ "compute.instance.update": "EventInstanceUpdate",
+ "compute.instance.delete.end": "EventInstanceDelete",
+ "network.create.start": "EventNetworkAdd",
+ "network.create.end": "EventNetworkAdd",
+ "network.update": "EventNetworkUpdate",
+ "network.update.start": "EventNetworkUpdate",
+ "network.update.end": "EventNetworkUpdate",
+ "network.delete": "EventNetworkDelete",
+ "network.delete.start": "EventNetworkDelete",
+ "network.delete.end": "EventNetworkDelete",
+ "subnet.create": "EventSubnetAdd",
+ "subnet.create.start": "EventSubnetAdd",
+ "subnet.create.end": "EventSubnetAdd",
+ "subnet.update": "EventSubnetUpdate",
+ "subnet.update.start": "EventSubnetUpdate",
+ "subnet.update.end": "EventSubnetUpdate",
+ "subnet.delete": "EventSubnetDelete",
+ "subnet.delete.start": "EventSubnetDelete",
+ "subnet.delete.end": "EventSubnetDelete",
+ "port.create.end": "EventPortAdd",
+ "port.update.end": "EventPortUpdate",
+ "port.delete.end": "EventPortDelete",
+ "router.create": "EventRouterAdd",
+ "router.create.start": "EventRouterAdd",
+ "router.create.end": "EventRouterAdd",
+ "router.update": "EventRouterUpdate",
+ "router.update.start": "EventRouterUpdate",
+ "router.update.end": "EventRouterUpdate",
+ "router.delete": "EventRouterDelete",
+ "router.delete.start": "EventRouterDelete",
+ "router.delete.end": "EventRouterDelete",
+ "router.interface.create": "EventInterfaceAdd",
+ "router.interface.delete": "EventInterfaceDelete"
+ }
+} \ No newline at end of file
diff --git a/app/config/scanners.json b/app/config/scanners.json
new file mode 100644
index 0000000..3c17918
--- /dev/null
+++ b/app/config/scanners.json
@@ -0,0 +1,370 @@
+{
+ "scanners_package": "discover.fetchers",
+ "scanners": {
+ "ScanAggregate": [
+ {
+ "type": "host_ref",
+ "fetcher": "DbFetchAggregateHosts"
+ }
+ ],
+ "ScanAggregatesRoot": [
+ {
+ "type": "aggregate",
+ "fetcher": "DbFetchAggregates",
+ "children_scanner": "ScanAggregate"
+ }
+ ],
+ "ScanAvailabilityZone": [
+ {
+ "type": "host",
+ "fetcher": "DbFetchAZNetworkHosts",
+ "children_scanner": "ScanHost"
+ }
+ ],
+ "ScanAvailabilityZonesRoot": [
+ {
+ "type": "availability_zone",
+ "fetcher": "DbFetchAvailabilityZones",
+ "children_scanner": "ScanAvailabilityZone"
+ }
+ ],
+ "ScanEnvironment": [
+ {
+ "type": "regions_folder",
+ "fetcher": {
+ "folder": true,
+ "types_name": "regions",
+ "parent_type": "environment"
+ },
+ "children_scanner": "ScanRegionsRoot"
+ },
+ {
+ "type": "projects_folder",
+ "fetcher": {
+ "folder": true,
+ "types_name": "projects",
+ "parent_type": "environment"
+ },
+ "children_scanner": "ScanProjectsRoot"
+ }
+ ],
+ "ScanHostNetworkAgentsRoot": [
+ {
+ "type": "network_agent",
+ "fetcher": "DbFetchHostNetworkAgents"
+ }
+ ],
+ "ScanHost": [
+ {
+ "_comment": "creating only top folder for vServices",
+ "type": "vservices_folder",
+ "fetcher": {
+ "folder": true,
+ "types_name": "vservices",
+ "parent_type": "host",
+ "text": "vServices"
+ },
+ "children_scanner": "ScanInstancesRoot"
+ },
+ {
+ "type": "vservice",
+ "fetcher": "CliFetchHostVservices"
+ },
+ {
+ "_comment": "fetch vService vNICs from host for efficiency",
+ "type": "vnic",
+ "fetcher": "CliFetchVserviceVnics"
+ },
+ {
+ "type": "instances_folder",
+ "fetcher": {
+ "folder": true,
+ "types_name": "instances",
+ "parent_type": "host"
+ },
+ "children_scanner": "ScanInstancesRoot"
+ },
+ {
+ "type": "pnics_folder",
+ "fetcher": {
+ "folder": true,
+ "types_name": "pnics",
+ "parent_type": "host",
+ "text": "pNICs"
+ },
+ "environment_condition": {
+ "mechanism_drivers": [
+ "OVS",
+ "LXB"
+ ]
+ },
+ "children_scanner": "ScanPnicsRoot"
+ },
+ {
+ "type": "vconnectors_folder",
+ "fetcher": {
+ "folder": true,
+ "types_name": "vconnectors",
+ "parent_type": "host",
+ "text": "vConnectors"
+ },
+ "children_scanner": "ScanVconnectorsRoot"
+ },
+ {
+ "type": "vedges_folder",
+ "fetcher": {
+ "folder": true,
+ "types_name": "vedges",
+ "parent_type": "host",
+ "text": "vEdges"
+ },
+ "children_scanner": "ScanVedgesRoot"
+ },
+ {
+ "type": "network_agents_folder",
+ "fetcher": {
+ "folder": true,
+ "types_name": "network_agents",
+ "parent_type": "host",
+ "text": "Network agents"
+ },
+ "children_scanner": "ScanHostNetworkAgentsRoot"
+ }
+ ],
+ "ScanInstance": [
+ {
+ "type": "vnics_folder",
+ "fetcher": {
+ "folder": true,
+ "types_name": "vnics",
+ "parent_type": "instance",
+ "text": "vNICs"
+ },
+ "children_scanner": "ScanVnicsRoot"
+ }
+ ],
+ "ScanInstancesRoot": [
+ {
+ "type": "instance",
+ "fetcher": "ApiFetchHostInstances",
+ "children_scanner": "ScanInstance"
+ }
+ ],
+ "ScanNetworkAgentsRoot": [
+ {
+ "type": "network_agent",
+ "fetcher": "DbFetchHostNetworkAgents"
+ }
+ ],
+ "ScanNetwork": [
+ {
+ "type": "ports_folder",
+ "fetcher": {
+ "folder": true,
+ "types_name": "ports",
+ "parent_type": "network"
+ }
+ },
+ {
+ "type": "network_services_folder",
+ "fetcher": {
+ "folder": true,
+ "types_name": "network_services",
+ "parent_type": "network",
+ "text": "Network vServices"
+ }
+ }
+ ],
+ "ScanNetworksRoot": [
+ {
+ "type": "network",
+ "fetcher": "ApiFetchNetworks",
+ "children_scanner": "ScanNetwork"
+ },
+ {
+ "type": "port",
+ "fetcher": "ApiFetchPorts"
+ }
+ ],
+ "ScanOteps": [
+ {
+ "type": "otep",
+ "environment_condition": {
+ "mechanism_drivers": "OVS"
+ },
+ "fetcher": "DbFetchOteps"
+ },
+ {
+ "type": "otep",
+ "environment_condition": {
+ "mechanism_drivers": "VPP"
+ },
+ "fetcher": "DbFetchOteps"
+ },
+ {
+ "type": "otep",
+ "environment_condition": {
+ "mechanism_drivers": "LXB"
+ },
+ "fetcher": "CliFetchOtepsLxb"
+ }
+ ],
+ "ScanPnicsRoot": [
+ {
+ "type": "pnic",
+ "environment_condition": {
+ "mechanism_drivers": [
+ "OVS",
+ "LXB"
+ ]
+ },
+ "fetcher": "CliFetchHostPnics",
+ "children_scanner": "ScanHostPnic"
+ }
+ ],
+ "ScanHostPnic": [
+ {
+ "type": "pnic",
+ "fetcher": "AciFetchSwitchPnic"
+ }
+ ],
+ "ScanProject": [
+ {
+ "type": "availability_zone",
+ "fetcher": "ApiFetchAvailabilityZones"
+ },
+ {
+ "type": "host",
+ "fetcher": "ApiFetchProjectHosts",
+ "children_scanner": "ScanHost"
+ }
+ ],
+ "ScanProjectsRoot": [
+ {
+ "type": "project",
+ "fetcher": "ApiFetchProjects",
+ "object_id_to_use_in_child": "name",
+ "children_scanner": "ScanProject"
+ }
+ ],
+ "ScanRegion": [
+ {
+ "type": "aggregates_folder",
+ "fetcher": {
+ "folder": true,
+ "types_name": "aggregates",
+ "parent_type": "region"
+ },
+ "children_scanner": "ScanAggregatesRoot"
+ },
+ {
+ "type": "network",
+ "fetcher": "ApiFetchNetworks",
+ "children_scanner": "ScanNetwork"
+ },
+ {
+ "type": "port",
+ "fetcher": "ApiFetchPorts"
+ }
+ ],
+ "ScanRegionsRoot": [
+ {
+ "type": "region",
+ "fetcher": "ApiFetchRegions",
+ "children_scanner": "ScanRegion"
+ }
+ ],
+ "ScanVconnectorsRoot": [
+ {
+ "type": "vconnector",
+ "environment_condition": {
+ "mechanism_drivers": "OVS"
+ },
+ "fetcher": "CliFetchVconnectorsOvs"
+ },
+ {
+ "type": "vconnector",
+ "environment_condition": {
+ "mechanism_drivers": "LXB"
+ },
+ "fetcher": "CliFetchVconnectorsLxb",
+ "children_scanner": "ScanOteps"
+ },
+ {
+ "type": "vconnector",
+ "environment_condition": {
+ "mechanism_drivers": "VPP"
+ },
+ "fetcher": "CliFetchVconnectorsVpp"
+ }
+ ],
+ "ScanVedgePnicsRoot": [
+ {
+ "type": "pnics_folder",
+ "fetcher": {
+ "folder": true,
+ "types_name": "pnics",
+ "parent_type": "vedge",
+ "text": "pNICs"
+ },
+ "environment_condition": {
+ "mechanism_drivers": "VPP"
+ },
+ "children_scanner": "ScanVppPnicsRoot"
+ }
+ ],
+ "ScanVedgesRoot": [
+ {
+ "type": "vedge",
+ "fetcher": "DbFetchVedgesOvs",
+ "environment_condition": {
+ "mechanism_drivers": "OVS"
+ },
+ "children_scanner": "ScanOteps"
+ },
+ {
+ "type": "vedge",
+ "fetcher": "DbFetchVedgesVpp",
+ "environment_condition": {
+ "mechanism_drivers": "VPP"
+ },
+ "children_scanner": "ScanVedgePnicsRoot"
+ }
+ ],
+ "ScanVnicsRoot": [
+ {
+ "type": "vnic",
+ "environment_condition": {
+ "mechanism_drivers": [
+ "OVS",
+ "LXB"
+ ]
+ },
+ "fetcher": "CliFetchInstanceVnics"
+ },
+ {
+ "type": "vnic",
+ "environment_condition": {
+ "mechanism_drivers": "VPP"
+ },
+ "fetcher": "CliFetchInstanceVnicsVpp"
+ }
+ ],
+ "ScanVppPnicsRoot": [
+ {
+ "type": "pnic",
+ "fetcher": "CliFetchHostPnicsVpp",
+ "environment_condition": {
+ "mechanism_drivers": "VPP"
+ },
+ "children_scanner": "ScanOteps"
+ }
+ ],
+ "ScanVservicesRoot": [
+ {
+ "type": "vservice",
+ "fetcher": "CliFetchHostVservices"
+ }
+ ]
+ }
+}
diff --git a/app/discover/__init__.py b/app/discover/__init__.py
new file mode 100644
index 0000000..1e85a2a
--- /dev/null
+++ b/app/discover/__init__.py
@@ -0,0 +1,10 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+
diff --git a/app/discover/clique_finder.py b/app/discover/clique_finder.py
new file mode 100644
index 0000000..9b5aad2
--- /dev/null
+++ b/app/discover/clique_finder.py
@@ -0,0 +1,174 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+from bson.objectid import ObjectId
+
+from discover.fetcher import Fetcher
+from utils.inventory_mgr import InventoryMgr
+
+
+class CliqueFinder(Fetcher):
+ def __init__(self):
+ super().__init__()
+ self.inv = InventoryMgr()
+ self.inventory = self.inv.inventory_collection
+ self.links = self.inv.collections["links"]
+ self.clique_types = self.inv.collections["clique_types"]
+ self.clique_types_by_type = {}
+ self.clique_constraints = self.inv.collections["clique_constraints"]
+ self.cliques = self.inv.collections["cliques"]
+
+ def find_cliques_by_link(self, links_list):
+ return self.links.find({'links': {'$in': links_list}})
+
+ def find_links_by_source(self, db_id):
+ return self.links.find({'source': db_id})
+
+ def find_links_by_target(self, db_id):
+ return self.links.find({'target': db_id})
+
+ def find_cliques(self):
+ self.log.info("scanning for cliques")
+ clique_types = self.get_clique_types().values()
+ for clique_type in clique_types:
+ self.find_cliques_for_type(clique_type)
+ self.log.info("finished scanning for cliques")
+
+ def get_clique_types(self):
+ if not self.clique_types_by_type:
+ clique_types = self.clique_types.find({"environment": self.get_env()})
+ default_clique_types = \
+ self.clique_types.find({'environment': 'ANY'})
+ for clique_type in clique_types:
+ focal_point_type = clique_type['focal_point_type']
+ self.clique_types_by_type[focal_point_type] = clique_type
+ # if some focal point type does not have an explicit definition in
+ # clique_types for this specific environment, use the default
+ # clique type definition with environment=ANY
+ for clique_type in default_clique_types:
+ focal_point_type = clique_type['focal_point_type']
+ if focal_point_type not in clique_types:
+ self.clique_types_by_type[focal_point_type] = clique_type
+ return self.clique_types_by_type
+
+ def find_cliques_for_type(self, clique_type):
+ type = clique_type["focal_point_type"]
+ constraint = self.clique_constraints.find_one({"focal_point_type": type})
+ constraints = [] if not constraint else constraint["constraints"]
+ object_type = clique_type["focal_point_type"]
+ objects_for_focal_point_type = self.inventory.find({
+ "environment": self.get_env(),
+ "type": object_type
+ })
+ for o in objects_for_focal_point_type:
+ self.construct_clique_for_focal_point(o, clique_type, constraints)
+
+ def rebuild_clique(self, clique):
+ focal_point_db_id = clique['focal_point']
+ constraint = self.clique_constraints.find_one({"focal_point_type": type})
+ constraints = [] if not constraint else constraint["constraints"]
+ clique_types = self.get_clique_types()
+ o = self.inventory.find_one({'_id': focal_point_db_id})
+ clique_type = clique_types[o['type']]
+ new_clique = self.construct_clique_for_focal_point(o, clique_type, constraints)
+ if not new_clique:
+ self.cliques.delete({'_id': clique['_id']})
+
+ def construct_clique_for_focal_point(self, o, clique_type, constraints):
+ # keep a hash of nodes in clique that were visited for each type
+ # start from the focal point
+ nodes_of_type = {o["type"]: {str(o["_id"]): 1}}
+ clique = {
+ "environment": self.env,
+ "focal_point": o["_id"],
+ "focal_point_type": o["type"],
+ "links": [],
+ "links_detailed": [],
+ "constraints": {}
+ }
+ for c in constraints:
+ val = o[c] if c in o else None
+ clique["constraints"][c] = val
+ for link_type in clique_type["link_types"]:
+ # check if it's backwards
+ link_type_parts = link_type.split('-')
+ link_type_parts.reverse()
+ link_type_reversed = '-'.join(link_type_parts)
+ matches = self.links.find_one({
+ "environment": self.env,
+ "link_type": link_type_reversed
+ })
+ reversed = True if matches else False
+ if reversed:
+ link_type = link_type_reversed
+ from_type = link_type[:link_type.index("-")]
+ to_type = link_type[link_type.index("-") + 1:]
+ side_to_match = 'target' if reversed else 'source'
+ other_side = 'target' if not reversed else 'source'
+ match_type = to_type if reversed else from_type
+ if match_type not in nodes_of_type.keys():
+ continue
+ other_side_type = to_type if not reversed else from_type
+ for match_point in nodes_of_type[match_type].keys():
+ matches = self.links.find({
+ "environment": self.env,
+ "link_type": link_type,
+ side_to_match: ObjectId(match_point)
+ })
+ for link in matches:
+ id = link["_id"]
+ if id in clique["links"]:
+ continue
+ if not self.check_constraints(clique, link):
+ continue
+ clique["links"].append(id)
+ clique["links_detailed"].append(link)
+ other_side_point = str(link[other_side])
+ if other_side_type not in nodes_of_type:
+ nodes_of_type[other_side_type] = {}
+ nodes_of_type[other_side_type][other_side_point] = 1
+
+ # after adding the links to the clique, create/update the clique
+ if not clique["links"]:
+ return None
+ focal_point_obj = self.inventory.find({"_id": clique["focal_point"]})
+ if not focal_point_obj:
+ return None
+ focal_point_obj = focal_point_obj[0]
+ focal_point_obj["clique"] = True
+ focal_point_obj.pop("_id", None)
+ self.cliques.update_one(
+ {
+ "environment": self.get_env(),
+ "focal_point": clique["focal_point"]
+ },
+ {'$set': clique},
+ upsert=True)
+ clique_document = self.inventory.update_one(
+ {"_id": clique["focal_point"]},
+ {'$set': focal_point_obj},
+ upsert=True)
+ return clique_document
+
+ def check_constraints(self, clique, link):
+ if "attributes" not in link:
+ return True
+ attributes = link["attributes"]
+ constraints = clique["constraints"]
+ for c in constraints:
+ if c not in attributes:
+ continue # constraint not applicable to this link
+ constr_values = constraints[c]
+ link_val = attributes[c]
+ if isinstance(constr_values, list):
+ if link_val not in constr_values:
+ return False
+ elif link_val != constraints[c]:
+ return False
+ return True
diff --git a/app/discover/configuration.py b/app/discover/configuration.py
new file mode 100644
index 0000000..c7bc0c0
--- /dev/null
+++ b/app/discover/configuration.py
@@ -0,0 +1,70 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+from utils.inventory_mgr import InventoryMgr
+from utils.logging.full_logger import FullLogger
+from utils.mongo_access import MongoAccess
+from utils.singleton import Singleton
+
+
+class Configuration(metaclass=Singleton):
+ def __init__(self, environments_collection="environments_config"):
+ super().__init__()
+ self.db_client = MongoAccess()
+ self.db = MongoAccess.db
+ self.inv = InventoryMgr()
+ self.collection = self.inv.collections.get(environments_collection)
+ self.env_name = None
+ self.environment = None
+ self.configuration = None
+ self.log = FullLogger()
+
+ def use_env(self, env_name):
+ self.log.info("Configuration taken from environment: {}".format(env_name))
+ self.env_name = env_name
+
+ envs = self.collection.find({"name": env_name})
+ if envs.count() == 0:
+ raise ValueError("use_env: could not find matching environment")
+ if envs.count() > 1:
+ raise ValueError("use_env: found multiple matching environments")
+
+ self.environment = envs[0]
+ self.configuration = self.environment["configuration"]
+
+ def get_env_config(self):
+ return self.environment
+
+ def get_configuration(self):
+ return self.configuration
+
+ def get_env_name(self):
+ return self.env_name
+
+ def update_env(self, values):
+ self.collection.update_one({"name": self.env_name},
+ {'$set': MongoAccess.encode_mongo_keys(values)})
+
+ def get(self, component):
+ try:
+ matches = [c for c in self.configuration if c["name"] == component]
+ except AttributeError:
+ raise ValueError("Configuration: environment not set")
+ if len(matches) == 0:
+ raise IndexError("No matches for configuration component: " + component)
+ if len(matches) > 1:
+ raise IndexError("Found multiple matches for configuration component: " + component)
+ return matches[0]
+
+ def has_network_plugin(self, name):
+ if 'mechanism_drivers' not in self.environment:
+ self.log.error('Environment missing mechanism_drivers definition: ' +
+ self.environment['name'])
+ mechanism_drivers = self.environment['mechanism_drivers']
+ return name in mechanism_drivers
diff --git a/app/discover/event_handler.py b/app/discover/event_handler.py
new file mode 100644
index 0000000..12199f8
--- /dev/null
+++ b/app/discover/event_handler.py
@@ -0,0 +1,45 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+from discover.events.event_base import EventBase, EventResult
+from utils.inventory_mgr import InventoryMgr
+from utils.logging.full_logger import FullLogger
+from utils.util import ClassResolver
+
+
+class EventHandler:
+
+ def __init__(self, env: str, inventory_collection: str):
+ super().__init__()
+ self.inv = InventoryMgr()
+ self.inv.set_collections(inventory_collection)
+ self.env = env
+ self.log = FullLogger(env=env)
+ self.handlers = {}
+
+ def discover_handlers(self, handlers_package: str, event_handlers: dict):
+ if not event_handlers:
+ raise TypeError("Event handlers list is empty")
+
+ for event_name, handler_name in event_handlers.items():
+ handler = ClassResolver.get_instance_of_class(handler_name, handlers_package)
+ if not issubclass(handler.__class__, EventBase):
+ raise TypeError("Event handler '{}' is not a subclass of EventBase"
+ .format(handler_name))
+ if event_name in self.handlers:
+ self.log.warning("A handler is already registered for event type '{}'. Overwriting"
+ .format(event_name))
+ self.handlers[event_name] = handler
+
+ def handle(self, event_name: str, notification: dict) -> EventResult:
+ if event_name not in self.handlers:
+ self.log.info("No handler is able to process event of type '{}'"
+ .format(event_name))
+ return self.handlers[event_name].handle(self.env, notification)
+
diff --git a/app/discover/event_manager.py b/app/discover/event_manager.py
new file mode 100644
index 0000000..ce40ce4
--- /dev/null
+++ b/app/discover/event_manager.py
@@ -0,0 +1,265 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+import argparse
+import signal
+import time
+from multiprocessing import Process, Manager as SharedManager
+
+import os
+
+from discover.events.listeners.default_listener import DefaultListener
+from discover.events.listeners.listener_base import ListenerBase
+from discover.manager import Manager
+from utils.constants import OperationalStatus, EnvironmentFeatures
+from utils.inventory_mgr import InventoryMgr
+from utils.logging.file_logger import FileLogger
+from utils.mongo_access import MongoAccess
+
+
+class EventManager(Manager):
+
+ # After EventManager receives a SIGTERM,
+ # it will try to terminate all listeners.
+ # After this delay, a SIGKILL will be sent
+ # to each listener that is still alive.
+ SIGKILL_DELAY = 5 # in seconds
+
+ DEFAULTS = {
+ "mongo_config": "",
+ "collection": "environments_config",
+ "inventory": "inventory",
+ "interval": 5,
+ "loglevel": "INFO"
+ }
+
+ LISTENERS = {
+ 'Mirantis-6.0': DefaultListener,
+ 'Mirantis-7.0': DefaultListener,
+ 'Mirantis-8.0': DefaultListener,
+ 'RDO-Mitaka': DefaultListener,
+ 'RDO-Liberty': DefaultListener,
+ }
+
+ def __init__(self):
+ self.args = self.get_args()
+ super().__init__(log_directory=self.args.log_directory,
+ mongo_config_file=self.args.mongo_config)
+ self.db_client = None
+ self.interval = None
+ self.processes = []
+
+ @staticmethod
+ def get_args():
+ parser = argparse.ArgumentParser()
+ parser.add_argument("-m", "--mongo_config", nargs="?", type=str,
+ default=EventManager.DEFAULTS["mongo_config"],
+ help="Name of config file with MongoDB server access details")
+ parser.add_argument("-c", "--collection", nargs="?", type=str,
+ default=EventManager.DEFAULTS["collection"],
+ help="Environments collection to read from "
+ "(default: '{}')"
+ .format(EventManager.DEFAULTS["collection"]))
+ parser.add_argument("-y", "--inventory", nargs="?", type=str,
+ default=EventManager.DEFAULTS["inventory"],
+ help="name of inventory collection "
+ "(default: '{}')"
+ .format(EventManager.DEFAULTS["inventory"]))
+ parser.add_argument("-i", "--interval", nargs="?", type=float,
+ default=EventManager.DEFAULTS["interval"],
+ help="Interval between collection polls "
+ "(must be more than {} seconds. Default: {})"
+ .format(EventManager.MIN_INTERVAL,
+ EventManager.DEFAULTS["interval"]))
+ parser.add_argument("-l", "--loglevel", nargs="?", type=str,
+ default=EventManager.DEFAULTS["loglevel"],
+ help="Logging level \n(default: '{}')"
+ .format(EventManager.DEFAULTS["loglevel"]))
+ parser.add_argument("-d", "--log_directory", nargs="?", type=str,
+ default=FileLogger.LOG_DIRECTORY,
+ help="File logger directory \n(default: '{}')"
+ .format(FileLogger.LOG_DIRECTORY))
+ args = parser.parse_args()
+ return args
+
+ def configure(self):
+ self.db_client = MongoAccess()
+ self.inv = InventoryMgr()
+ self.inv.set_collections(self.args.inventory)
+ self.collection = self.db_client.db[self.args.collection]
+ self.interval = max(self.MIN_INTERVAL, self.args.interval)
+ self.log.set_loglevel(self.args.loglevel)
+
+ self.log.info("Started EventManager with following configuration:\n"
+ "Mongo config file path: {0}\n"
+ "Collection: {1}\n"
+ "Polling interval: {2} second(s)"
+ .format(self.args.mongo_config, self.collection.name, self.interval))
+
+ def get_listener(self, env: str):
+ env_config = self.inv.get_env_config(env)
+ return self.LISTENERS.get(env_config.get('distribution'))
+
+ def listen_to_events(self, listener: ListenerBase, env_name: str, process_vars: dict):
+ listener.listen({
+ 'env': env_name,
+ 'mongo_config': self.args.mongo_config,
+ 'inventory': self.args.inventory,
+ 'loglevel': self.args.loglevel,
+ 'environments_collection': self.args.collection,
+ 'process_vars': process_vars
+ })
+
+ def _get_alive_processes(self):
+ return [p for p in self.processes
+ if p['process'].is_alive()]
+
+ # Get all processes that should be terminated
+ def _get_stuck_processes(self, stopped_processes: list):
+ return [p for p in self._get_alive_processes()
+ if p.get("name") in map(lambda p: p.get("name"), stopped_processes)]
+
+ # Give processes time to finish and kill them if they are stuck
+ def _kill_stuck_processes(self, process_list: list):
+ if self._get_stuck_processes(process_list):
+ time.sleep(self.SIGKILL_DELAY)
+ for process in self._get_stuck_processes(process_list):
+ self.log.info("Killing event listener '{0}'".format(process.get("name")))
+ os.kill(process.get("process").pid, signal.SIGKILL)
+
+ def _get_operational(self, process: dict) -> OperationalStatus:
+ try:
+ return process.get("vars", {})\
+ .get("operational")
+ except:
+ self.log.error("Event listener '{0}' is unreachable".format(process.get("name")))
+ return OperationalStatus.STOPPED
+
+ def _update_operational_status(self, status: OperationalStatus):
+ self.collection.update_many(
+ {"name": {"$in": [process.get("name")
+ for process
+ in self.processes
+ if self._get_operational(process) == status]}},
+ {"$set": {"operational": status.value}}
+ )
+
+ def update_operational_statuses(self):
+ self._update_operational_status(OperationalStatus.RUNNING)
+ self._update_operational_status(OperationalStatus.ERROR)
+ self._update_operational_status(OperationalStatus.STOPPED)
+
+ def cleanup_processes(self):
+ # Query for envs that are no longer eligible for listening
+ # (scanned == false and/or listen == false)
+ dropped_envs = [env['name']
+ for env
+ in self.collection
+ .find(filter={'$or': [{'scanned': False},
+ {'listen': False}]},
+ projection=['name'])]
+
+ live_processes = []
+ stopped_processes = []
+ # Drop already terminated processes
+ # and for all others perform filtering
+ for process in self._get_alive_processes():
+ # If env no longer qualifies for listening,
+ # stop the listener.
+ # Otherwise, keep the process
+ if process['name'] in dropped_envs:
+ self.log.info("Stopping event listener '{0}'".format(process.get("name")))
+ process['process'].terminate()
+ stopped_processes.append(process)
+ else:
+ live_processes.append(process)
+
+ self._kill_stuck_processes(stopped_processes)
+
+ # Update all 'operational' statuses
+ # for processes stopped on the previous step
+ self.collection.update_many(
+ {"name": {"$in": [process.get("name")
+ for process
+ in stopped_processes]}},
+ {"$set": {"operational": OperationalStatus.STOPPED.value}}
+ )
+
+ # Keep the living processes
+ self.processes = live_processes
+
+ def do_action(self):
+ try:
+ while True:
+ # Update "operational" field in db before removing dead processes
+ # so that we keep last statuses of env listeners before they were terminated
+ self.update_operational_statuses()
+
+ # Perform a cleanup that filters out all processes
+ # that are no longer eligible for listening
+ self.cleanup_processes()
+
+ envs = self.collection.find({'scanned': True, 'listen': True})
+
+ # Iterate over environments that don't have an event listener attached
+ for env in filter(lambda e: e['name'] not in
+ map(lambda process: process["name"], self.processes),
+ envs):
+ env_name = env['name']
+
+ if not self.inv.is_feature_supported(env_name, EnvironmentFeatures.LISTENING):
+ self.log.error("Listening is not supported for env '{}'".format(env_name))
+ self.collection.update({"name": env_name},
+ {"$set": {"operational": OperationalStatus.ERROR.value}})
+ continue
+
+ listener = self.get_listener(env_name)
+ if not listener:
+ self.log.error("No listener is defined for env '{}'".format(env_name))
+ self.collection.update({"name": env_name},
+ {"$set": {"operational": OperationalStatus.ERROR.value}})
+ continue
+
+ # A dict that is shared between event manager and newly created env listener
+ process_vars = SharedManager().dict()
+ p = Process(target=self.listen_to_events,
+ args=(listener, env_name, process_vars,),
+ name=env_name)
+ self.processes.append({"process": p, "name": env_name, "vars": process_vars})
+ self.log.info("Starting event listener '{0}'".format(env_name))
+ p.start()
+
+ # Make sure statuses are up-to-date before event manager goes to sleep
+ self.update_operational_statuses()
+ time.sleep(self.interval)
+ finally:
+ # Fetch operational statuses before terminating listeners.
+ # Shared variables won't be available after termination.
+ stopping_processes = [process.get("name")
+ for process
+ in self.processes
+ if self._get_operational(process) != OperationalStatus.ERROR]
+ self._update_operational_status(OperationalStatus.ERROR)
+
+ # Gracefully stop processes
+ for process in self._get_alive_processes():
+ self.log.info("Stopping event listener '{0}'".format(process.get("name")))
+ process.get("process").terminate()
+
+ # Kill all remaining processes
+ self._kill_stuck_processes(self.processes)
+
+ # Updating operational statuses for stopped processes
+ self.collection.update_many(
+ {"name": {"$in": stopping_processes}},
+ {"$set": {"operational": OperationalStatus.STOPPED.value}}
+ )
+
+if __name__ == "__main__":
+ EventManager().run()
diff --git a/app/discover/events/__init__.py b/app/discover/events/__init__.py
new file mode 100644
index 0000000..1e85a2a
--- /dev/null
+++ b/app/discover/events/__init__.py
@@ -0,0 +1,10 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+
diff --git a/app/discover/events/event_base.py b/app/discover/events/event_base.py
new file mode 100644
index 0000000..6b3b290
--- /dev/null
+++ b/app/discover/events/event_base.py
@@ -0,0 +1,36 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+from abc import abstractmethod, ABC
+
+from discover.fetcher import Fetcher
+from utils.inventory_mgr import InventoryMgr
+
+
+class EventResult:
+ def __init__(self,
+ result: bool, retry: bool = False, message: str = None,
+ related_object: str = None,
+ display_context: str = None):
+ self.result = result
+ self.retry = retry
+ self.message = message
+ self.related_object = related_object
+ self.display_context = display_context
+
+
+class EventBase(Fetcher, ABC):
+
+ def __init__(self):
+ super().__init__()
+ self.inv = InventoryMgr()
+
+ @abstractmethod
+ def handle(self, env, values) -> EventResult:
+ pass
diff --git a/app/discover/events/event_delete_base.py b/app/discover/events/event_delete_base.py
new file mode 100644
index 0000000..1cf94c3
--- /dev/null
+++ b/app/discover/events/event_delete_base.py
@@ -0,0 +1,60 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+import re
+
+from bson.objectid import ObjectId
+
+from discover.clique_finder import CliqueFinder
+from discover.events.event_base import EventBase, EventResult
+
+
+class EventDeleteBase(EventBase):
+
+ def delete_handler(self, env, object_id, object_type) -> EventResult:
+ item = self.inv.get_by_id(env, object_id)
+ if not item:
+ self.log.info('{0} document is not found, aborting {0} delete'.format(object_type))
+ return EventResult(result=False, retry=False)
+
+ db_id = ObjectId(item['_id'])
+ id_path = item['id_path'] + '/'
+
+ # remove related clique
+ clique_finder = CliqueFinder()
+ self.inv.delete('cliques', {'focal_point': db_id})
+
+ # keep related links to do rebuild of cliques using them
+ matched_links_source = clique_finder.find_links_by_source(db_id)
+ matched_links_target = clique_finder.find_links_by_target(db_id)
+
+ links_using_object = []
+ links_using_object.extend([l['_id'] for l in matched_links_source])
+ links_using_object.extend([l['_id'] for l in matched_links_target])
+
+ # find cliques using these links
+ if links_using_object:
+ matched_cliques = clique_finder.find_cliques_by_link(links_using_object)
+ # find cliques using these links and rebuild them
+ for clique in matched_cliques:
+ clique_finder.rebuild_clique(clique)
+
+ # remove all related links
+ self.inv.delete('links', {'source': db_id})
+ self.inv.delete('links', {'target': db_id})
+
+ # remove object itself
+ self.inv.delete('inventory', {'_id': db_id})
+
+ # remove children
+ regexp = re.compile('^' + id_path)
+ self.inv.delete('inventory', {'id_path': {'$regex': regexp}})
+ return EventResult(result=True,
+ related_object=object_id,
+ display_context=object_id)
diff --git a/app/discover/events/event_instance_add.py b/app/discover/events/event_instance_add.py
new file mode 100644
index 0000000..4dd2b20
--- /dev/null
+++ b/app/discover/events/event_instance_add.py
@@ -0,0 +1,45 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+from discover.events.event_base import EventBase, EventResult
+from discover.scanner import Scanner
+
+
+class EventInstanceAdd(EventBase):
+
+ def handle(self, env, values):
+ # find the host, to serve as parent
+ instance_id = values['payload']['instance_id']
+ host_id = values['payload']['host']
+ instances_root_id = host_id + '-instances'
+ instances_root = self.inv.get_by_id(env, instances_root_id)
+ if not instances_root:
+ self.log.info('instances root not found, aborting instance add')
+ return EventResult(result=False, retry=True)
+
+ # scan instance
+ scanner = Scanner()
+ scanner.set_env(env)
+ scanner.scan("ScanInstancesRoot", instances_root,
+ limit_to_child_id=instance_id,
+ limit_to_child_type='instance')
+ scanner.scan_from_queue()
+
+ # scan host
+ host = self.inv.get_by_id(env, host_id)
+ scanner.scan('ScanHost', host,
+ limit_to_child_type=['vconnectors_folder',
+ 'vedges_folder'])
+ scanner.scan_from_queue()
+ scanner.scan_links()
+ scanner.scan_cliques()
+
+ return EventResult(result=True,
+ related_object=instance_id,
+ display_context=instance_id)
diff --git a/app/discover/events/event_instance_delete.py b/app/discover/events/event_instance_delete.py
new file mode 100644
index 0000000..714d0c7
--- /dev/null
+++ b/app/discover/events/event_instance_delete.py
@@ -0,0 +1,18 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+from discover.events.event_delete_base import EventDeleteBase
+
+
+class EventInstanceDelete(EventDeleteBase):
+
+ def handle(self, env, values):
+ # find the corresponding object
+ instance_id = values['payload']['instance_id']
+ return self.delete_handler(env, instance_id, "instance")
diff --git a/app/discover/events/event_instance_update.py b/app/discover/events/event_instance_update.py
new file mode 100644
index 0000000..6231c30
--- /dev/null
+++ b/app/discover/events/event_instance_update.py
@@ -0,0 +1,55 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+import re
+
+from discover.events.event_base import EventBase, EventResult
+from discover.events.event_instance_add import EventInstanceAdd
+from discover.events.event_instance_delete import EventInstanceDelete
+
+
+class EventInstanceUpdate(EventBase):
+
+ def handle(self, env, values):
+ # find the host, to serve as parent
+ payload = values['payload']
+ instance_id = payload['instance_id']
+ state = payload['state']
+ old_state = payload['old_state']
+
+ if state == 'building':
+ return EventResult(result=False, retry=False)
+
+ if state == 'active' and old_state == 'building':
+ return EventInstanceAdd().handle(env, values)
+
+ if state == 'deleted' and old_state == 'active':
+ return EventInstanceDelete().handle(env, values)
+
+ name = payload['display_name']
+ instance = self.inv.get_by_id(env, instance_id)
+ if not instance:
+ self.log.info('instance document not found, aborting instance update')
+ return EventResult(result=False, retry=True)
+
+ instance['name'] = name
+ instance['object_name'] = name
+ name_path = instance['name_path']
+ instance['name_path'] = name_path[:name_path.rindex('/') + 1] + name
+
+ # TBD: fix name_path for descendants
+ if name_path != instance['name_path']:
+ self.inv.values_replace({
+ "environment": env,
+ "name_path": {"$regex": r"^" + re.escape(name_path + '/')}},
+ {"name_path": {"from": name_path, "to": instance['name_path']}})
+ self.inv.set(instance)
+ return EventResult(result=True,
+ related_object=instance_id,
+ display_context=instance_id)
diff --git a/app/discover/events/event_interface_add.py b/app/discover/events/event_interface_add.py
new file mode 100644
index 0000000..a06ad14
--- /dev/null
+++ b/app/discover/events/event_interface_add.py
@@ -0,0 +1,139 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+import time
+
+from functools import partial
+
+from discover.events.event_base import EventBase, EventResult
+from discover.events.event_port_add import EventPortAdd
+from discover.events.event_subnet_add import EventSubnetAdd
+from discover.fetchers.api.api_access import ApiAccess
+from discover.fetchers.api.api_fetch_regions import ApiFetchRegions
+from discover.fetchers.cli.cli_fetch_host_vservice import CliFetchHostVservice
+from discover.find_links_for_vservice_vnics import FindLinksForVserviceVnics
+from discover.scanner import Scanner
+from utils.util import decode_router_id, encode_router_id
+
+
+class EventInterfaceAdd(EventBase):
+
+ def __init__(self):
+ super().__init__()
+ self.delay = 2
+
+ def add_gateway_port(self, env, project, network_name, router_doc, host_id):
+ fetcher = CliFetchHostVservice()
+ fetcher.set_env(env)
+ router_id = router_doc['id']
+ router = fetcher.get_vservice(host_id, router_id)
+ device_id = decode_router_id(router_id)
+ router_doc['gw_port_id'] = router['gw_port_id']
+
+ # add gateway port documents.
+ port_doc = EventSubnetAdd().add_port_document(env, router_doc['gw_port_id'], project_name=project)
+
+ mac_address = port_doc['mac_address'] if port_doc else None
+
+ # add vnic document
+ host = self.inv.get_by_id(env, host_id)
+
+ add_vnic_document = partial(EventPortAdd().add_vnic_document,
+ env=env,
+ host=host,
+ object_id=device_id,
+ object_type='router',
+ network_name=network_name,
+ router_name=router_doc['name'],
+ mac_address=mac_address)
+
+ ret = add_vnic_document()
+ if not ret:
+ time.sleep(self.delay)
+ self.log.info("Wait %s second, and then fetch vnic document again." % self.delay)
+ add_vnic_document()
+
+ def update_router(self, env, project, network_id, network_name, router_doc, host_id):
+ if router_doc:
+ if 'network' in router_doc:
+ if network_id not in router_doc['network']:
+ router_doc['network'].append(network_id)
+ else:
+ router_doc['network'] = [network_id]
+
+ # if gw_port_id is None, add gateway port first.
+ if not router_doc.get('gw_port_id'):
+ self.add_gateway_port(env, project, network_name, router_doc, host_id)
+ else:
+ # check the gateway port document, add it if document does not exist.
+ port = self.inv.get_by_id(env, router_doc['gw_port_id'])
+ if not port:
+ self.add_gateway_port(env, project, network_name, router_doc, host_id)
+ self.inv.set(router_doc)
+ else:
+ self.log.info("router document not found, aborting interface adding")
+
+ def handle(self, env, values):
+ interface = values['payload']['router_interface']
+ project = values['_context_project_name']
+ host_id = values["publisher_id"].replace("network.", "", 1)
+ port_id = interface['port_id']
+ subnet_id = interface['subnet_id']
+ router_id = encode_router_id(host_id, interface['id'])
+
+ network_document = self.inv.get_by_field(env, "network", "subnet_ids", subnet_id, get_single=True)
+ if not network_document:
+ self.log.info("network document not found, aborting interface adding")
+ return EventResult(result=False, retry=True)
+ network_name = network_document['name']
+ network_id = network_document['id']
+
+ # add router-interface port document.
+ if len(ApiAccess.regions) == 0:
+ fetcher = ApiFetchRegions()
+ fetcher.set_env(env)
+ fetcher.get(None)
+ port_doc = EventSubnetAdd().add_port_document(env, port_id, network_name=network_name)
+
+ mac_address = port_doc['mac_address'] if port_doc else None
+
+ # add vnic document
+ host = self.inv.get_by_id(env, host_id)
+ router_doc = self.inv.get_by_id(env, router_id)
+
+ add_vnic_document = partial(EventPortAdd().add_vnic_document,
+ env=env,
+ host=host,
+ object_id=interface['id'],
+ object_type='router',
+ network_name=network_name,
+ router_name=router_doc['name'],
+ mac_address=mac_address)
+
+ ret = add_vnic_document()
+ if ret is False:
+ # try it again to fetch vnic document, vnic will be created a little bit late before CLI fetch.
+ time.sleep(self.delay)
+ self.log.info("Wait {} seconds, and then fetch vnic document again.".format(self.delay))
+ add_vnic_document()
+
+ # update the router document: gw_port_id, network.
+ self.update_router(env, project, network_id, network_name, router_doc, host_id)
+
+ # update vservice-vnic, vnic-network,
+ FindLinksForVserviceVnics().add_links(search={"parent_id": router_id})
+ scanner = Scanner()
+ scanner.set_env(env)
+
+ scanner.scan_cliques()
+ self.log.info("Finished router-interface added.")
+
+ return EventResult(result=True,
+ related_object=interface['id'],
+ display_context=network_id)
diff --git a/app/discover/events/event_interface_delete.py b/app/discover/events/event_interface_delete.py
new file mode 100644
index 0000000..b1df978
--- /dev/null
+++ b/app/discover/events/event_interface_delete.py
@@ -0,0 +1,40 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+from discover.events.event_base import EventResult
+from discover.events.event_delete_base import EventDeleteBase
+from discover.events.event_port_delete import EventPortDelete
+from utils.util import encode_router_id
+
+
+class EventInterfaceDelete(EventDeleteBase):
+
+ def handle(self, env, values):
+ interface = values['payload']['router_interface']
+ port_id = interface['port_id']
+ host_id = values["publisher_id"].replace("network.", "", 1)
+ router_id = encode_router_id(host_id, interface['id'])
+
+ # update router document
+ port_doc = self.inv.get_by_id(env, port_id)
+ if not port_doc:
+ self.log.info("Interface deleting handler: port document not found.")
+ return EventResult(result=False, retry=False)
+ network_id = port_doc['network_id']
+
+ router_doc = self.inv.get_by_id(env, router_id)
+ if router_doc and network_id in router_doc.get('network', []):
+ router_doc['network'].remove(network_id)
+ self.inv.set(router_doc)
+
+ # delete port document
+ result = EventPortDelete().delete_port(env, port_id)
+ result.related_object = interface['id']
+ result.display_context = network_id
+ return result
diff --git a/app/discover/events/event_metadata_parser.py b/app/discover/events/event_metadata_parser.py
new file mode 100644
index 0000000..5d09376
--- /dev/null
+++ b/app/discover/events/event_metadata_parser.py
@@ -0,0 +1,75 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+from typing import List, Tuple
+
+from utils.metadata_parser import MetadataParser
+
+
+class EventMetadataParser(MetadataParser):
+
+ HANDLERS_PACKAGE = 'handlers_package'
+ QUEUES = 'queues'
+ EVENT_HANDLERS = 'event_handlers'
+
+ REQUIRED_EXPORTS = [HANDLERS_PACKAGE, EVENT_HANDLERS]
+
+ def __init__(self):
+ super().__init__()
+ self.handlers_package = None
+ self.queues = []
+ self.event_handlers = []
+
+ def get_required_fields(self) -> list:
+ return self.REQUIRED_EXPORTS
+
+ def validate_metadata(self, metadata: dict) -> bool:
+ super().validate_metadata(metadata)
+
+ package = metadata.get(self.HANDLERS_PACKAGE)
+ if not package or not isinstance(package, str):
+ self.add_error("Handlers package '{}' is invalid".format(package))
+
+ event_handlers = metadata.get(self.EVENT_HANDLERS)
+ if not event_handlers or not isinstance(event_handlers, dict):
+ self.add_error("Event handlers attribute is invalid or empty"
+ "(should be a non-empty dict)")
+
+ return len(self.errors) == 0
+
+ def _finalize_parsing(self, metadata):
+ handlers_package = metadata[self.HANDLERS_PACKAGE]
+ queues = metadata.get(self.QUEUES, None)
+ event_handlers = metadata[self.EVENT_HANDLERS]
+
+ # Convert variables to EventHandler-friendly format
+ self.handlers_package = handlers_package
+
+ try:
+ if queues and isinstance(queues, list):
+ self.queues = [{"queue": q["queue"],
+ "exchange": q["exchange"]}
+ for q in queues]
+ except KeyError:
+ self.add_error("Queues variable has invalid format")
+ return
+
+ self.event_handlers = event_handlers
+
+ def parse_metadata_file(self, file_path: str) -> dict:
+ metadata = super().parse_metadata_file(file_path)
+ self._finalize_parsing(metadata)
+ super().check_errors()
+ return metadata
+
+
+def parse_metadata_file(file_path: str):
+ parser = EventMetadataParser()
+ parser.parse_metadata_file(file_path)
+ return parser.handlers_package, parser.queues, parser.event_handlers
diff --git a/app/discover/events/event_network_add.py b/app/discover/events/event_network_add.py
new file mode 100644
index 0000000..41fafd4
--- /dev/null
+++ b/app/discover/events/event_network_add.py
@@ -0,0 +1,50 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+from discover.events.event_base import EventBase, EventResult
+
+
+class EventNetworkAdd(EventBase):
+
+ def handle(self, env, notification):
+ network = notification['payload']['network']
+ network_id = network['id']
+ network_document = self.inv.get_by_id(env, network_id)
+ if network_document:
+ self.log.info('network already existed, aborting network add')
+ return EventResult(result=False, retry=False)
+
+ # build network document for adding network
+ project_name = notification['_context_project_name']
+ project_id = notification['_context_project_id']
+ parent_id = project_id + '-networks'
+ network_name = network['name']
+
+ network['environment'] = env
+ network['type'] = 'network'
+ network['id_path'] = "/%s/%s-projects/%s/%s/%s" \
+ % (env, env, project_id, parent_id, network_id)
+ network['cidrs'] = []
+ network['subnet_ids'] = []
+ network['last_scanned'] = notification['timestamp']
+ network['name_path'] = "/%s/Projects/%s/Networks/%s" \
+ % (env, project_name, network_name)
+ network['network'] = network_id
+ network['object_name'] = network_name
+ network['parent_id'] = parent_id
+ network['parent_text'] = "Networks"
+ network['parent_type'] = "networks_folder"
+ network['project'] = project_name
+ network["show_in_tree"] = True
+ network['subnets'] = {}
+
+ self.inv.set(network)
+ return EventResult(result=True,
+ related_object=network_id,
+ display_context=network_id)
diff --git a/app/discover/events/event_network_delete.py b/app/discover/events/event_network_delete.py
new file mode 100644
index 0000000..b3277da
--- /dev/null
+++ b/app/discover/events/event_network_delete.py
@@ -0,0 +1,17 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+from discover.events.event_delete_base import EventDeleteBase
+
+
+class EventNetworkDelete(EventDeleteBase):
+
+ def handle(self, env, notification):
+ network_id = notification['payload']['network_id']
+ return self.delete_handler(env, network_id, "network")
diff --git a/app/discover/events/event_network_update.py b/app/discover/events/event_network_update.py
new file mode 100644
index 0000000..3e1432e
--- /dev/null
+++ b/app/discover/events/event_network_update.py
@@ -0,0 +1,44 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+import re
+
+from discover.events.event_base import EventBase, EventResult
+
+
+class EventNetworkUpdate(EventBase):
+
+ def handle(self, env, notification):
+ network = notification['payload']['network']
+ network_id = network['id']
+
+ network_document = self.inv.get_by_id(env, network_id)
+ if not network_document:
+ self.log.info('Network document not found, aborting network update')
+ return EventResult(result=False, retry=True)
+
+ # update network document
+ name = network['name']
+ if name != network_document['name']:
+ network_document['name'] = name
+ network_document['object_name'] = name
+
+ name_path = network_document['name_path']
+ network_document['name_path'] = name_path[:name_path.rindex('/') + 1] + name
+
+ # TBD: fix name_path for descendants
+ self.inv.values_replace({"environment": env,
+ "name_path": {"$regex": r"^" + re.escape(name_path + '/')}},
+ {"name_path": {"from": name_path, "to": network_document['name_path']}})
+
+ network_document['admin_state_up'] = network['admin_state_up']
+ self.inv.set(network_document)
+ return EventResult(result=True,
+ related_object=network_id,
+ display_context=network_id)
diff --git a/app/discover/events/event_port_add.py b/app/discover/events/event_port_add.py
new file mode 100644
index 0000000..63a5e80
--- /dev/null
+++ b/app/discover/events/event_port_add.py
@@ -0,0 +1,309 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+import datetime
+
+from discover.events.event_base import EventBase, EventResult
+from discover.fetchers.api.api_fetch_host_instances import ApiFetchHostInstances
+from discover.fetchers.cli.cli_fetch_instance_vnics import CliFetchInstanceVnics
+from discover.fetchers.cli.cli_fetch_instance_vnics_vpp import CliFetchInstanceVnicsVpp
+from discover.fetchers.cli.cli_fetch_vservice_vnics import CliFetchVserviceVnics
+from discover.find_links_for_instance_vnics import FindLinksForInstanceVnics
+from discover.find_links_for_vedges import FindLinksForVedges
+from discover.scanner import Scanner
+
+
+class EventPortAdd(EventBase):
+
+ def get_name_by_id(self, object_id):
+ item = self.inv.get_by_id(self.env, object_id)
+ if item:
+ return item['name']
+ return None
+
+ def add_port_document(self, env, project_name, project_id, network_name, network_id, port):
+ # add other data for port document
+ port['type'] = 'port'
+ port['environment'] = env
+
+ port['parent_id'] = port['network_id'] + '-ports'
+ port['parent_text'] = 'Ports'
+ port['parent_type'] = 'ports_folder'
+
+ port['name'] = port['mac_address']
+ port['object'] = port['name']
+ port['project'] = project_name
+
+ port['id_path'] = "{}/{}-projects/{}/{}-networks/{}/{}-ports/{}" \
+ .format(env, env,
+ project_id, project_id,
+ network_id, network_id, port['id'])
+ port['name_path'] = "/{}/Projects/{}/Networks/{}/Ports/{}" \
+ .format(env, project_name, network_name, port['id'])
+
+ port['show_in_tree'] = True
+ port['last_scanned'] = datetime.datetime.utcnow()
+ self.inv.set(port)
+ self.log.info("add port document for port: {}".format(port['id']))
+
+ def add_ports_folder(self, env, project_id, network_id, network_name):
+ port_folder = {
+ "id": network_id + "-ports",
+ "create_object": True,
+ "name": "Ports",
+ "text": "Ports",
+ "type": "ports_folder",
+ "parent_id": network_id,
+ "parent_type": "network",
+ 'environment': env,
+ 'id_path': "{}/{}-projects/{}/{}-networks/{}/{}-ports/"
+ .format(env, env, project_id, project_id,
+ network_id, network_id),
+ 'name_path': "/{}/Projects/{}/Networks/{}/Ports"
+ .format(env, project_id, network_name),
+ "show_in_tree": True,
+ "last_scanned": datetime.datetime.utcnow(),
+ "object_name": "Ports",
+ }
+ self.inv.set(port_folder)
+ self.log.info("add ports_folder document for network: {}.".format(network_id))
+
+ def add_network_services_folder(self, env, project_id, network_id, network_name):
+ network_services_folder = {
+ "create_object": True,
+ "environment": env,
+ "id": network_id + "-network_services",
+ "id_path": "{}/{}-projects/{}/{}-networks/{}/{}-network_services/"
+ .format(env, env, project_id, project_id,
+ network_id, network_id),
+ "last_scanned": datetime.datetime.utcnow(),
+ "name": "Network vServices",
+ "name_path": "/{}/Projects/{}/Networks/{}/Network vServices"
+ .format(env, project_id, network_name),
+ "object_name": "Network vServices",
+ "parent_id": network_id,
+ "parent_type": "network",
+ "show_in_tree": True,
+ "text": "Network vServices",
+ "type": "network_services_folder"
+ }
+ self.inv.set(network_services_folder)
+ self.log.info("add network services folder for network:{}".format(network_id))
+
+ def add_dhcp_document(self, env, host, network_id, network_name):
+ dhcp_document = {
+ "environment": env,
+ "host": host['id'],
+ "id": "qdhcp-" + network_id,
+ "id_path": "{}/{}-vservices/{}-vservices-dhcps/qdhcp-{}"
+ .format(host['id_path'], host['id'],
+ host['id'], network_id),
+ "last_scanned": datetime.datetime.utcnow(),
+ "local_service_id": "qdhcp-" + network_id,
+ "name": "dhcp-" + network_name,
+ "name_path": host['name_path'] + "/Vservices/DHCP servers/dhcp-" + network_name,
+ "network": [network_id],
+ "object_name": "dhcp-" + network_name,
+ "parent_id": host['id'] + "-vservices-dhcps",
+ "parent_text": "DHCP servers",
+ "parent_type": "vservice_dhcps_folder",
+ "service_type": "dhcp",
+ "show_in_tree": True,
+ "type": "vservice"
+ }
+ self.inv.set(dhcp_document)
+ self.log.info("add DHCP document for network: {}.".format(network_id))
+
+ # This method has dynamic usages, take caution when changing its signature
+ def add_vnics_folder(self,
+ env, host,
+ object_id, network_name='',
+ object_type="dhcp", router_name=''):
+ # when vservice is DHCP, id = network_id,
+ # when vservice is router, id = router_id
+ type_map = {"dhcp": ('DHCP servers', 'dhcp-' + network_name),
+ "router": ('Gateways', router_name)}
+
+ vnics_folder = {
+ "environment": env,
+ "id": "q{}-{}-vnics".format(object_type, object_id),
+ "id_path": "{}/{}-vservices/{}-vservices-{}s/q{}-{}/q{}-{}-vnics"
+ .format(host['id_path'], host['id'], host['id'],
+ object_type, object_type, object_id,
+ object_type, object_id),
+ "last_scanned": datetime.datetime.utcnow(),
+ "name": "q{}-{}-vnics".format(object_type, object_id),
+ "name_path": "{}/Vservices/{}/{}/vNICs"
+ .format(host['name_path'],
+ type_map[object_type][0],
+ type_map[object_type][1]),
+ "object_name": "vNICs",
+ "parent_id": "q{}-{}".format(object_type, object_id),
+ "parent_type": "vservice",
+ "show_in_tree": True,
+ "text": "vNICs",
+ "type": "vnics_folder"
+ }
+ self.inv.set(vnics_folder)
+ self.log.info("add vnics_folder document for q{}-{}-vnics"
+ .format(object_type, object_id))
+
+ # This method has dynamic usages, take caution when changing its signature
+ def add_vnic_document(self,
+ env, host,
+ object_id, network_name='',
+ object_type='dhcp', router_name='',
+ mac_address=None):
+ # when vservice is DHCP, id = network_id,
+ # when vservice is router, id = router_id
+ type_map = {"dhcp": ('DHCP servers', 'dhcp-' + network_name),
+ "router": ('Gateways', router_name)}
+
+ fetcher = CliFetchVserviceVnics()
+ fetcher.set_env(env)
+ namespace = 'q{}-{}'.format(object_type, object_id)
+ vnic_documents = fetcher.handle_service(host['id'], namespace, enable_cache=False)
+ if not vnic_documents:
+ self.log.info("Vnic document not found in namespace.")
+ return False
+
+ if mac_address is not None:
+ for doc in vnic_documents:
+ if doc['mac_address'] == mac_address:
+ # add a specific vnic document.
+ doc["environment"] = env
+ doc["id_path"] = "{}/{}-vservices/{}-vservices-{}s/{}/{}-vnics/{}"\
+ .format(host['id_path'], host['id'],
+ host['id'], object_type, namespace,
+ namespace, doc["id"])
+ doc["name_path"] = "{}/Vservices/{}/{}/vNICs/{}" \
+ .format(host['name_path'],
+ type_map[object_type][0],
+ type_map[object_type][1],
+ doc["id"])
+ self.inv.set(doc)
+ self.log.info("add vnic document with mac_address: {}."
+ .format(mac_address))
+ return True
+
+ self.log.info("Can not find vnic document by mac_address: {}"
+ .format(mac_address))
+ return False
+ else:
+ for doc in vnic_documents:
+ # add all vnic documents.
+ doc["environment"] = env
+ doc["id_path"] = "{}/{}-vservices/{}-vservices-{}s/{}/{}-vnics/{}" \
+ .format(host['id_path'], host['id'],
+ host['id'], object_type,
+ namespace, namespace, doc["id"])
+ doc["name_path"] = "{}/Vservices/{}/{}/vNICs/{}" \
+ .format(host['name_path'],
+ type_map[object_type][0],
+ type_map[object_type][1],
+ doc["id"])
+ self.inv.set(doc)
+ self.log.info("add vnic document with mac_address: {}."
+ .format(doc["mac_address"]))
+ return True
+
+ def handle_dhcp_device(self, env, notification, network_id, network_name, mac_address=None):
+ # add dhcp vservice document.
+ host_id = notification["publisher_id"].replace("network.", "", 1)
+ host = self.inv.get_by_id(env, host_id)
+
+ self.add_dhcp_document(env, host, network_id, network_name)
+
+ # add vnics folder.
+ self.add_vnics_folder(env, host, network_id, network_name)
+
+ # add vnic document.
+ self.add_vnic_document(env, host, network_id, network_name, mac_address=mac_address)
+
+ def handle(self, env, notification):
+ project = notification['_context_project_name']
+ project_id = notification['_context_project_id']
+ payload = notification['payload']
+ port = payload['port']
+ network_id = port['network_id']
+ network_name = self.get_name_by_id(network_id)
+ mac_address = port['mac_address']
+
+ # check ports folder document.
+ ports_folder = self.inv.get_by_id(env, network_id + '-ports')
+ if not ports_folder:
+ self.log.info("ports folder not found, add ports folder first.")
+ self.add_ports_folder(env, project_id, network_id, network_name)
+ self.add_port_document(env, project, project_id, network_name, network_id, port)
+
+ # update the port related documents.
+ if 'compute' in port['device_owner']:
+ # update the instance related document.
+ host_id = port['binding:host_id']
+ instance_id = port['device_id']
+ old_instance_doc = self.inv.get_by_id(env, instance_id)
+ instances_root_id = host_id + '-instances'
+ instances_root = self.inv.get_by_id(env, instances_root_id)
+ if not instances_root:
+ self.log.info('instance document not found, aborting port adding')
+ return EventResult(result=False, retry=True)
+
+ # update instance
+ instance_fetcher = ApiFetchHostInstances()
+ instance_fetcher.set_env(env)
+ instance_docs = instance_fetcher.get(host_id + '-')
+ instance = next(filter(lambda i: i['id'] == instance_id, instance_docs), None)
+
+ if instance:
+ old_instance_doc['network_info'] = instance['network_info']
+ old_instance_doc['network'] = instance['network']
+ if old_instance_doc.get('mac_address') is None:
+ old_instance_doc['mac_address'] = mac_address
+
+ self.inv.set(old_instance_doc)
+ self.log.info("update instance document")
+
+ # add vnic document.
+ if port['binding:vif_type'] == 'vpp':
+ vnic_fetcher = CliFetchInstanceVnicsVpp()
+ else:
+ # set ovs as default type.
+ vnic_fetcher = CliFetchInstanceVnics()
+
+ vnic_fetcher.set_env(env)
+ vnic_docs = vnic_fetcher.get(instance_id + '-')
+ vnic = next(filter(lambda vnic: vnic['mac_address'] == mac_address, vnic_docs), None)
+
+ if vnic:
+ vnic['environment'] = env
+ vnic['type'] = 'vnic'
+ vnic['name_path'] = old_instance_doc['name_path'] + '/vNICs/' + vnic['name']
+ vnic['id_path'] = '{}/{}/{}'.format(old_instance_doc['id_path'],
+ old_instance_doc['id'],
+ vnic['name'])
+ self.inv.set(vnic)
+ self.log.info("add instance-vnic document, mac_address: {}"
+ .format(mac_address))
+
+ self.log.info("scanning for links")
+ fetchers_implementing_add_links = [FindLinksForInstanceVnics(), FindLinksForVedges()]
+ for fetcher in fetchers_implementing_add_links:
+ fetcher.add_links()
+ scanner = Scanner()
+ scanner.set_env(env)
+ scanner.scan_cliques()
+
+ port_document = self.inv.get_by_id(env, port['id'])
+ if not port_document:
+ self.log.error("Port {} failed to add".format(port['id']))
+ return EventResult(result=False, retry=True)
+
+ return EventResult(result=True,
+ related_object=port['id'],
+ display_context=network_id)
diff --git a/app/discover/events/event_port_delete.py b/app/discover/events/event_port_delete.py
new file mode 100644
index 0000000..1e55870
--- /dev/null
+++ b/app/discover/events/event_port_delete.py
@@ -0,0 +1,80 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+from discover.events.event_base import EventResult
+from discover.events.event_delete_base import EventDeleteBase
+from discover.fetchers.api.api_fetch_host_instances import ApiFetchHostInstances
+
+
+class EventPortDelete(EventDeleteBase):
+
+ def delete_port(self, env, port_id):
+ port_doc = self.inv.get_by_id(env, port_id)
+ if not port_doc:
+ self.log.info("Port document not found, aborting port deleting.")
+ return EventResult(result=False, retry=False)
+
+ # if port is binding to a instance, instance document needs to be updated.
+ if 'compute' in port_doc['device_owner']:
+ self.log.info("update instance document to which port is binding.")
+ self.update_instance(env, port_doc)
+
+ # delete port document
+ self.inv.delete('inventory', {'id': port_id})
+
+ # delete vnic and related document
+ vnic_doc = self.inv.get_by_field(env, 'vnic', 'mac_address', port_doc['mac_address'], get_single=True)
+ if not vnic_doc:
+ self.log.info("Vnic document not found, aborting vnic deleting.")
+ return EventResult(result=False, retry=False)
+
+ result = self.delete_handler(env, vnic_doc['id'], 'vnic')
+ result.related_object = port_id
+ result.display_context = port_doc.get('network_id')
+ self.log.info('Finished port deleting')
+ return result
+
+ def update_instance(self, env, port_doc):
+ # update instance document if port
+ network_id = port_doc['network_id']
+ instance_doc = self.inv.get_by_field(env, 'instance', 'network_info.id', port_doc['id'], get_single=True)
+ if instance_doc:
+ port_num = 0
+
+ for port in instance_doc['network_info']:
+ if port['network']['id'] == network_id:
+ port_num += 1
+ if port['id'] == port_doc['id']:
+ instance_doc['network_info'].remove(port)
+ self.log.info("update network information of instance document.")
+
+ if port_num == 1:
+ # remove network information only when last port in network will be deleted.
+ instance_doc['network'].remove(network_id)
+
+ # update instance mac address.
+ if port_doc['mac_address'] == instance_doc['mac_address']:
+ instance_fetcher = ApiFetchHostInstances()
+ instance_fetcher.set_env(env)
+ host_id = port_doc['binding:host_id']
+ instance_id = port_doc['device_id']
+ instance_docs = instance_fetcher.get(host_id + '-')
+ instance = next(filter(lambda i: i['id'] == instance_id, instance_docs), None)
+ if instance:
+ if 'mac_address' not in instance:
+ instance_doc['mac_address'] = None
+ self.log.info("update mac_address:%s of instance document." % instance_doc['mac_address'])
+
+ self.inv.set(instance_doc)
+ else:
+ self.log.info("No instance document binding to network:%s." % network_id)
+
+ def handle(self, env, notification):
+ port_id = notification['payload']['port_id']
+ return self.delete_port(env, port_id)
diff --git a/app/discover/events/event_port_update.py b/app/discover/events/event_port_update.py
new file mode 100644
index 0000000..298b565
--- /dev/null
+++ b/app/discover/events/event_port_update.py
@@ -0,0 +1,38 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+from discover.events.event_base import EventBase, EventResult
+
+
+class EventPortUpdate(EventBase):
+
+ def handle(self, env, notification):
+ # check port document.
+ port = notification['payload']['port']
+ port_id = port['id']
+ port_document = self.inv.get_by_id(env, port_id)
+ if not port_document:
+ self.log.info('port document does not exist, aborting port update')
+ return EventResult(result=False, retry=True)
+
+ # build port document
+ port_document['name'] = port['name']
+ port_document['admin_state_up'] = port['admin_state_up']
+ if port_document['admin_state_up']:
+ port_document['status'] = 'ACTIVE'
+ else:
+ port_document['status'] = 'DOWN'
+
+ port_document['binding:vnic_type'] = port['binding:vnic_type']
+
+ # update port document.
+ self.inv.set(port_document)
+ return EventResult(result=True,
+ related_object=port_id,
+ display_context=port_document.get('network_id'))
diff --git a/app/discover/events/event_router_add.py b/app/discover/events/event_router_add.py
new file mode 100644
index 0000000..20e07e5
--- /dev/null
+++ b/app/discover/events/event_router_add.py
@@ -0,0 +1,123 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+import datetime
+
+from functools import partial
+
+from discover.events.event_base import EventBase, EventResult
+from discover.events.event_port_add import EventPortAdd
+from discover.events.event_subnet_add import EventSubnetAdd
+from discover.fetchers.cli.cli_fetch_host_vservice import CliFetchHostVservice
+from discover.find_links_for_vservice_vnics import FindLinksForVserviceVnics
+from discover.scanner import Scanner
+from utils.util import decode_router_id, encode_router_id
+
+
+class EventRouterAdd(EventBase):
+
+ def add_router_document(self, env, network_id, router_doc, host):
+ router_doc["environment"] = env
+ router_doc["id_path"] = "{}/{}-vservices/{}-vservices-routers/{}"\
+ .format(host['id_path'], host['id'],
+ host['id'], router_doc['id'])
+ router_doc['last_scanned'] = datetime.datetime.utcnow()
+ router_doc['name_path'] = "{}/Vservices/Gateways/{}"\
+ .format(host['name_path'],
+ router_doc['name'])
+ router_doc['network'] = []
+ if network_id:
+ router_doc['network'] = [network_id]
+
+ router_doc['object_name'] = router_doc['name']
+ router_doc['parent_id'] = host['id'] + "-vservices-routers"
+ router_doc['show_in_tree'] = True
+ router_doc['type'] = "vservice"
+
+ self.inv.set(router_doc)
+
+ def add_children_documents(self, env, project_id, network_id, host, router_doc):
+
+ network_document = self.inv.get_by_id(env, network_id)
+ network_name = network_document['name']
+ router_id = decode_router_id(router_doc['id'])
+
+ # add port for binding to vservice:router
+ subnet_handler = EventSubnetAdd()
+ ports_folder = self.inv.get_by_id(env, network_id + '-ports')
+ if not ports_folder:
+ self.log.info("Ports_folder not found.")
+ subnet_handler.add_ports_folder(env, project_id, network_id, network_name)
+ add_port_return = subnet_handler.add_port_document(env,
+ router_doc['gw_port_id'],
+ network_name=network_name)
+
+ # add vnics folder and vnic document
+ port_handler = EventPortAdd()
+ add_vnic_folder = partial(port_handler.add_vnics_folder,
+ env=env,
+ host=host,
+ object_id=router_id,
+ object_type='router',
+ network_name=network_name,
+ router_name=router_doc['name'])
+ add_vnic_document = partial(port_handler.add_vnic_document,
+ env=env,
+ host=host,
+ object_id=router_id,
+ object_type='router',
+ network_name=network_name,
+ router_name=router_doc['name'])
+
+ add_vnic_folder()
+ if add_port_return:
+ add_vnic_return = add_vnic_document()
+ if not add_vnic_return:
+ self.log.info("Try to add vnic document again.")
+ add_vnic_document()
+ else:
+ # in some cases, port has been created,
+ # but port doc cannot be fetched by OpenStack API
+ self.log.info("Try to add port document again.")
+ # TODO: #AskCheng - this never returns anything!
+ add_port_return = add_vnic_folder()
+ # TODO: #AskCheng - this will never evaluate to True!
+ if add_port_return is False:
+ self.log.info("Try to add vnic document again.")
+ add_vnic_document()
+
+ def handle(self, env, values):
+ router = values['payload']['router']
+ host_id = values["publisher_id"].replace("network.", "", 1)
+ project_id = values['_context_project_id']
+ router_id = encode_router_id(host_id, router['id'])
+ host = self.inv.get_by_id(env, host_id)
+
+ fetcher = CliFetchHostVservice()
+ fetcher.set_env(env)
+ router_doc = fetcher.get_vservice(host_id, router_id)
+ gateway_info = router['external_gateway_info']
+
+ if gateway_info:
+ network_id = gateway_info['network_id']
+ self.add_router_document(env, network_id, router_doc, host)
+ self.add_children_documents(env, project_id, network_id, host, router_doc)
+ else:
+ self.add_router_document(env, None, router_doc, host)
+
+ # scan links and cliques
+ FindLinksForVserviceVnics().add_links(search={"parent_id": router_id})
+ scanner = Scanner()
+ scanner.set_env(env)
+ scanner.scan_cliques()
+ self.log.info("Finished router added.")
+
+ return EventResult(result=True,
+ related_object=router_id,
+ display_context=router_id)
diff --git a/app/discover/events/event_router_delete.py b/app/discover/events/event_router_delete.py
new file mode 100644
index 0000000..65072d6
--- /dev/null
+++ b/app/discover/events/event_router_delete.py
@@ -0,0 +1,37 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+from discover.events.event_base import EventResult
+from discover.events.event_delete_base import EventDeleteBase
+from utils.util import encode_router_id
+
+
+class EventRouterDelete(EventDeleteBase):
+
+ def handle(self, env, values):
+ payload = values['payload']
+
+ if 'publisher_id' not in values:
+ self.log.error("Publisher_id is not in event values. Aborting router delete")
+ return EventResult(result=False, retry=False)
+
+ host_id = values['publisher_id'].replace('network.', '', 1)
+ if 'router_id' in payload:
+ router_id = payload['router_id']
+ elif 'id' in payload:
+ router_id = payload['id']
+ else:
+ router_id = payload.get('router', {}).get('id')
+
+ if not router_id:
+ self.log.error("Router id is not in payload. Aborting router delete")
+ return EventResult(result=False, retry=False)
+
+ router_full_id = encode_router_id(host_id, router_id)
+ return self.delete_handler(env, router_full_id, "vservice")
diff --git a/app/discover/events/event_router_update.py b/app/discover/events/event_router_update.py
new file mode 100644
index 0000000..8dd53f0
--- /dev/null
+++ b/app/discover/events/event_router_update.py
@@ -0,0 +1,82 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+from discover.events.event_base import EventBase, EventResult
+from discover.events.event_port_delete import EventPortDelete
+from discover.events.event_router_add import EventRouterAdd
+from discover.fetchers.cli.cli_fetch_host_vservice import CliFetchHostVservice
+from discover.find_links_for_vservice_vnics import FindLinksForVserviceVnics
+from discover.scanner import Scanner
+from utils.util import encode_router_id
+
+
+class EventRouterUpdate(EventBase):
+
+ def handle(self, env, values):
+ payload = values['payload']
+ router = payload['router']
+
+ project_id = values['_context_project_id']
+ host_id = values["publisher_id"].replace("network.", "", 1)
+ router_id = payload['id'] if 'id' in payload else router['id']
+
+ router_full_id = encode_router_id(host_id, router_id)
+ router_doc = self.inv.get_by_id(env, router_full_id)
+ if not router_doc:
+ self.log.info("Router document not found, aborting router updating")
+ return EventResult(result=False, retry=True)
+
+ router_doc['admin_state_up'] = router['admin_state_up']
+ router_doc['name'] = router['name']
+ gateway_info = router.get('external_gateway_info')
+ if gateway_info is None:
+ # when delete gateway, need to delete the port relate document.
+ port_doc = {}
+ if router_doc.get('gw_port_id'):
+ port_doc = self.inv.get_by_id(env, router_doc['gw_port_id'])
+ EventPortDelete().delete_port(env, router_doc['gw_port_id'])
+
+ if router_doc.get('network'):
+ if port_doc:
+ router_doc['network'].remove(port_doc['network_id'])
+ router_doc['gw_port_id'] = None
+
+ # remove related links
+ self.inv.delete('links', {'source_id': router_full_id})
+ else:
+ if 'network' in router_doc:
+ if gateway_info['network_id'] not in router_doc['network']:
+ router_doc['network'].append(gateway_info['network_id'])
+ else:
+ router_doc['network'] = [gateway_info['network_id']]
+ # update static route
+ router_doc['routes'] = router['routes']
+
+ # add gw_port_id info and port document.
+ fetcher = CliFetchHostVservice()
+ fetcher.set_env(env)
+ router_vservice = fetcher.get_vservice(host_id, router_full_id)
+ if router_vservice.get('gw_port_id'):
+ router_doc['gw_port_id'] = router_vservice['gw_port_id']
+
+ host = self.inv.get_by_id(env, host_id)
+ EventRouterAdd().add_children_documents(env, project_id, gateway_info['network_id'], host, router_doc)
+
+ # rescan the vnic links.
+ FindLinksForVserviceVnics().add_links(search={'parent_id': router_full_id + '-vnics'})
+ self.inv.set(router_doc)
+
+ # update the cliques.
+ scanner = Scanner()
+ scanner.set_env(env)
+ scanner.scan_cliques()
+ self.log.info("Finished router update.")
+ return EventResult(result=True,
+ related_object=router_full_id,
+ display_context=router_full_id)
diff --git a/app/discover/events/event_subnet_add.py b/app/discover/events/event_subnet_add.py
new file mode 100644
index 0000000..b519b1c
--- /dev/null
+++ b/app/discover/events/event_subnet_add.py
@@ -0,0 +1,154 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+import datetime
+
+from discover.events.event_base import EventBase, EventResult
+from discover.events.event_port_add import EventPortAdd
+from discover.fetchers.api.api_access import ApiAccess
+from discover.fetchers.api.api_fetch_port import ApiFetchPort
+from discover.fetchers.api.api_fetch_regions import ApiFetchRegions
+from discover.fetchers.db.db_fetch_port import DbFetchPort
+from discover.find_links_for_pnics import FindLinksForPnics
+from discover.find_links_for_vservice_vnics import FindLinksForVserviceVnics
+from discover.scanner import Scanner
+
+
+class EventSubnetAdd(EventBase):
+
+ def add_port_document(self, env, port_id, network_name=None, project_name=''):
+ # when add router-interface port, network_name need to be given to enhance efficiency.
+ # when add gateway port, project_name need to be specified, cause this type of port
+ # document does not has project attribute. In this case, network_name should not be provided.
+
+ fetcher = ApiFetchPort()
+ fetcher.set_env(env)
+ ports = fetcher.get(port_id)
+
+ if ports:
+ port = ports[0]
+ project_id = port['tenant_id']
+ network_id = port['network_id']
+
+ if not network_name:
+ network = self.inv.get_by_id(env, network_id)
+ network_name = network['name']
+
+ port['type'] = "port"
+ port['environment'] = env
+ port_id = port['id']
+ port['id_path'] = "%s/%s-projects/%s/%s-networks/%s/%s-ports/%s" % \
+ (env, env, project_id, project_id, network_id, network_id, port_id)
+ port['last_scanned'] = datetime.datetime.utcnow()
+ if 'project' in port:
+ project_name = port['project']
+ port['name_path'] = "/%s/Projects/%s/Networks/%s/Ports/%s" % \
+ (env, project_name, network_name, port_id)
+ self.inv.set(port)
+ self.log.info("add port document for port:%s" % port_id)
+ return port
+ return False
+
+ def add_ports_folder(self, env, project_id, network_id, network_name):
+ port_folder = {
+ "id": network_id + "-ports",
+ "create_object": True,
+ "name": "Ports",
+ "text": "Ports",
+ "type": "ports_folder",
+ "parent_id": network_id,
+ "parent_type": "network",
+ 'environment': env,
+ 'id_path': "%s/%s-projects/%s/%s-networks/%s/%s-ports/" % (env, env, project_id, project_id,
+ network_id, network_id),
+ 'name_path': "/%s/Projects/%s/Networks/%s/Ports" % (env, project_id, network_name),
+ "show_in_tree": True,
+ "last_scanned": datetime.datetime.utcnow(),
+ "object_name": "Ports",
+ }
+
+ self.inv.set(port_folder)
+
+ def add_children_documents(self, env, project_id, network_id, network_name, host_id):
+ # generate port folder data.
+ self.add_ports_folder(env, project_id, network_id, network_name)
+
+ # get ports ID.
+ port_id = DbFetchPort().get_id(network_id)
+
+ # add specific ports documents.
+ self.add_port_document(env, port_id, network_name=network_name)
+
+ port_handler = EventPortAdd()
+
+ # add network_services_folder document.
+ port_handler.add_network_services_folder(env, project_id, network_id, network_name)
+
+ # add dhcp vservice document.
+ host = self.inv.get_by_id(env, host_id)
+
+ port_handler.add_dhcp_document(env, host, network_id, network_name)
+
+ # add vnics folder.
+ port_handler.add_vnics_folder(env, host, network_id, network_name)
+
+ # add vnic docuemnt.
+ port_handler.add_vnic_document(env, host, network_id, network_name)
+
+ def handle(self, env, notification):
+ # check for network document.
+ subnet = notification['payload']['subnet']
+ project_id = subnet['tenant_id']
+ network_id = subnet['network_id']
+ if 'id' not in subnet:
+ self.log.info('Subnet payload doesn\'t have id, aborting subnet add')
+ return EventResult(result=False, retry=False)
+
+ network_document = self.inv.get_by_id(env, network_id)
+ if not network_document:
+ self.log.info('network document does not exist, aborting subnet add')
+ return EventResult(result=False, retry=True)
+ network_name = network_document['name']
+
+ # build subnet document for adding network
+ if subnet['cidr'] not in network_document['cidrs']:
+ network_document['cidrs'].append(subnet['cidr'])
+ if not network_document.get('subnets'):
+ network_document['subnets'] = {}
+
+ network_document['subnets'][subnet['name']] = subnet
+ if subnet['id'] not in network_document['subnet_ids']:
+ network_document['subnet_ids'].append(subnet['id'])
+ self.inv.set(network_document)
+
+ # Check DHCP enable, if true, scan network.
+ if subnet['enable_dhcp'] is True:
+ # update network
+ # TODO: #AskCheng - why is this necessary?
+ if len(ApiAccess.regions) == 0:
+ fetcher = ApiFetchRegions()
+ fetcher.set_env(env)
+ fetcher.get(None)
+
+ self.log.info("add new subnet.")
+ host_id = notification["publisher_id"].replace("network.", "", 1)
+ self.add_children_documents(env, project_id, network_id, network_name, host_id)
+
+ # scan links and cliques
+ self.log.info("scanning for links")
+ FindLinksForPnics().add_links()
+ FindLinksForVserviceVnics().add_links(search={"parent_id": "qdhcp-%s-vnics" % network_id})
+
+ scanner = Scanner()
+ scanner.set_env(env)
+ scanner.scan_cliques()
+ self.log.info("Finished subnet added.")
+ return EventResult(result=True,
+ related_object=subnet['id'],
+ display_context=network_id)
diff --git a/app/discover/events/event_subnet_delete.py b/app/discover/events/event_subnet_delete.py
new file mode 100644
index 0000000..900e701
--- /dev/null
+++ b/app/discover/events/event_subnet_delete.py
@@ -0,0 +1,57 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+from discover.events.event_base import EventResult
+from discover.events.event_delete_base import EventDeleteBase
+
+
+class EventSubnetDelete(EventDeleteBase):
+
+ def delete_children_documents(self, env, vservice_id):
+ vnic_parent_id = vservice_id + '-vnics'
+ vnic = self.inv.get_by_field(env, 'vnic', 'parent_id', vnic_parent_id, get_single=True)
+ if not vnic:
+ self.log.info("Vnic document not found.")
+ return EventResult(result=False, retry=False)
+
+ # delete port and vnic together by mac address.
+ self.inv.delete('inventory', {"mac_address": vnic.get("mac_address")})
+ return self.delete_handler(env, vservice_id, 'vservice')
+
+ def handle(self, env, notification):
+ subnet_id = notification['payload']['subnet_id']
+ network_document = self.inv.get_by_field(env, "network", "subnet_ids", subnet_id, get_single=True)
+ if not network_document:
+ self.log.info("network document not found, aborting subnet deleting")
+ return EventResult(result=False, retry=False)
+
+ # remove subnet_id from subnet_ids array
+ network_document["subnet_ids"].remove(subnet_id)
+
+ # find the subnet in network_document by subnet_id
+ subnet = next(
+ filter(lambda s: s['id'] == subnet_id,
+ network_document['subnets'].values()),
+ None)
+
+ # remove cidr from cidrs and delete subnet document.
+ if subnet:
+ network_document['cidrs'].remove(subnet['cidr'])
+ del network_document['subnets'][subnet['name']]
+
+ self.inv.set(network_document)
+
+ # when network does not have any subnet, delete vservice DHCP, port and vnic documents.
+ if not network_document["subnet_ids"]:
+ vservice_dhcp_id = 'qdhcp-{}'.format(network_document['id'])
+ self.delete_children_documents(env, vservice_dhcp_id)
+
+ return EventResult(result=True,
+ related_object=subnet['id'],
+ display_context=network_document.get('id'))
diff --git a/app/discover/events/event_subnet_update.py b/app/discover/events/event_subnet_update.py
new file mode 100644
index 0000000..9d3c48b
--- /dev/null
+++ b/app/discover/events/event_subnet_update.py
@@ -0,0 +1,102 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+from discover.events.event_base import EventBase, EventResult
+from discover.events.event_port_add import EventPortAdd
+from discover.events.event_port_delete import EventPortDelete
+from discover.events.event_subnet_add import EventSubnetAdd
+from discover.fetchers.api.api_access import ApiAccess
+from discover.fetchers.api.api_fetch_regions import ApiFetchRegions
+from discover.fetchers.db.db_fetch_port import DbFetchPort
+from discover.find_links_for_vservice_vnics import FindLinksForVserviceVnics
+from discover.scanner import Scanner
+
+
+class EventSubnetUpdate(EventBase):
+
+ def handle(self, env, notification):
+ # check for network document.
+ subnet = notification['payload']['subnet']
+ project = notification['_context_project_name']
+ host_id = notification['publisher_id'].replace('network.', '', 1)
+ subnet_id = subnet['id']
+ network_id = subnet['network_id']
+ network_document = self.inv.get_by_id(env, network_id)
+ if not network_document:
+ self.log.info('network document does not exist, aborting subnet update')
+ return EventResult(result=False, retry=True)
+
+ # update network document.
+ subnets = network_document['subnets']
+ key = next(filter(lambda k: subnets[k]['id'] == subnet_id, subnets),
+ None)
+
+ if key:
+ if subnet['enable_dhcp'] and subnets[key]['enable_dhcp'] is False:
+ # scan DHCP namespace to add related document.
+ # add dhcp vservice document.
+ host = self.inv.get_by_id(env, host_id)
+ port_handler = EventPortAdd()
+ port_handler.add_dhcp_document(env, host, network_id,
+ network_document['name'])
+
+ # make sure that self.regions is not empty.
+ if len(ApiAccess.regions) == 0:
+ fetcher = ApiFetchRegions()
+ fetcher.set_env(env)
+ fetcher.get(None)
+
+ self.log.info("add port binding to DHCP server.")
+ port_id = DbFetchPort(). \
+ get_id_by_field(network_id,
+ """device_owner LIKE "%dhcp" """)
+ port = EventSubnetAdd(). \
+ add_port_document(env, port_id,
+ network_name=network_document['name'],
+ project_name=project)
+ if port:
+ port_handler. \
+ add_vnic_document(env, host, network_id,
+ network_name=network_document['name'],
+ mac_address=port['mac_address'])
+ # add link for vservice - vnic
+ FindLinksForVserviceVnics().add_links(search={"id": "qdhcp-%s" % network_id})
+ scanner = Scanner()
+ scanner.set_env(env)
+ scanner.scan_cliques()
+ FindLinksForVserviceVnics(). \
+ add_links(search={"id": "qdhcp-%s" % network_id})
+ scanner = Scanner()
+ scanner.set_env(env)
+ scanner.scan_cliques()
+
+ if subnet['enable_dhcp'] is False and subnets[key]['enable_dhcp']:
+ # delete existed related DHCP documents.
+ self.inv.delete("inventory",
+ {'id': "qdhcp-%s" % subnet['network_id']})
+ self.log.info("delete DHCP document: qdhcp-%s" %
+ subnet['network_id'])
+
+ port = self.inv.find_items({'network_id': subnet['network_id'],
+ 'device_owner': 'network:dhcp'},
+ get_single=True)
+ if 'id' in port:
+ EventPortDelete().delete_port(env, port['id'])
+ self.log.info("delete port binding to DHCP server.")
+
+ if subnet['name'] == subnets[key]['name']:
+ subnets[key] = subnet
+ else:
+ # TODO: #AskCheng shouldn't we remove the old one?
+ subnets[subnet['name']] = subnet
+
+ self.inv.set(network_document)
+ return EventResult(result=True,
+ related_object=subnet['id'],
+ display_context=network_id)
diff --git a/app/discover/events/listeners/__init__.py b/app/discover/events/listeners/__init__.py
new file mode 100644
index 0000000..1e85a2a
--- /dev/null
+++ b/app/discover/events/listeners/__init__.py
@@ -0,0 +1,10 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+
diff --git a/app/discover/events/listeners/default_listener.py b/app/discover/events/listeners/default_listener.py
new file mode 100755
index 0000000..a135673
--- /dev/null
+++ b/app/discover/events/listeners/default_listener.py
@@ -0,0 +1,314 @@
+#!/usr/bin/env python3
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+
+import argparse
+import datetime
+import json
+import os
+import time
+from collections import defaultdict
+from typing import List
+
+from kombu import Connection, Queue, Exchange
+from kombu.mixins import ConsumerMixin
+
+from discover.configuration import Configuration
+from discover.event_handler import EventHandler
+from discover.events.event_base import EventResult
+from discover.events.event_metadata_parser import parse_metadata_file
+from discover.events.listeners.listener_base import ListenerBase
+from messages.message import Message
+from monitoring.setup.monitoring_setup_manager import MonitoringSetupManager
+from utils.constants import OperationalStatus, EnvironmentFeatures
+from utils.inventory_mgr import InventoryMgr
+from utils.logging.full_logger import FullLogger
+from utils.mongo_access import MongoAccess
+from utils.string_utils import stringify_datetime
+from utils.util import SignalHandler, setup_args
+
+
+class DefaultListener(ListenerBase, ConsumerMixin):
+
+ SOURCE_SYSTEM = "OpenStack"
+
+ COMMON_METADATA_FILE = "events.json"
+
+ DEFAULTS = {
+ "env": "Mirantis-Liberty",
+ "mongo_config": "",
+ "metadata_file": "",
+ "inventory": "inventory",
+ "loglevel": "INFO",
+ "environments_collection": "environments_config",
+ "retry_limit": 10,
+ "consume_all": False
+ }
+
+ def __init__(self, connection: Connection,
+ event_handler: EventHandler,
+ event_queues: List,
+ env_name: str = DEFAULTS["env"],
+ inventory_collection: str = DEFAULTS["inventory"],
+ retry_limit: int = DEFAULTS["retry_limit"],
+ consume_all: bool = DEFAULTS["consume_all"]):
+ super().__init__()
+
+ self.connection = connection
+ self.retry_limit = retry_limit
+ self.env_name = env_name
+ self.consume_all = consume_all
+ self.handler = event_handler
+ self.event_queues = event_queues
+ self.failing_messages = defaultdict(int)
+
+ self.inv = InventoryMgr()
+ self.inv.set_collections(inventory_collection)
+ if self.inv.is_feature_supported(self.env_name, EnvironmentFeatures.MONITORING):
+ self.inv.monitoring_setup_manager = \
+ MonitoringSetupManager(self.env_name)
+ self.inv.monitoring_setup_manager.server_setup()
+
+ def get_consumers(self, consumer, channel):
+ return [consumer(queues=self.event_queues,
+ accept=['json'],
+ callbacks=[self.process_task])]
+
+ # Determines if message should be processed by a handler
+ # and extracts message body if yes.
+ @staticmethod
+ def _extract_event_data(body):
+ if "event_type" in body:
+ return True, body
+ elif "event_type" in body.get("oslo.message", ""):
+ return True, json.loads(body["oslo.message"])
+ else:
+ return False, None
+
+ def process_task(self, body, message):
+ received_timestamp = stringify_datetime(datetime.datetime.now())
+ processable, event_data = self._extract_event_data(body)
+ # If env listener can't process the message
+ # or it's not intended for env listener to handle,
+ # leave the message in the queue unless "consume_all" flag is set
+ if processable and event_data["event_type"] in self.handler.handlers:
+ with open("/tmp/listener.log", "a") as f:
+ f.write("{}\n".format(event_data))
+ event_result = self.handle_event(event_data["event_type"],
+ event_data)
+ finished_timestamp = stringify_datetime(datetime.datetime.now())
+ self.save_message(message_body=event_data,
+ result=event_result,
+ started=received_timestamp,
+ finished=finished_timestamp)
+
+ # Check whether the event was fully handled
+ # and, if not, whether it should be retried later
+ if event_result.result:
+ message.ack()
+ elif event_result.retry:
+ if 'message_id' not in event_data:
+ message.reject()
+ else:
+ # Track message retry count
+ message_id = event_data['message_id']
+ self.failing_messages[message_id] += 1
+
+ # Retry handling the message
+ if self.failing_messages[message_id] <= self.retry_limit:
+ self.inv.log.info("Retrying handling message " +
+ "with id '{}'".format(message_id))
+ message.requeue()
+ # Discard the message if it's not accepted
+ # after specified number of trials
+ else:
+ self.inv.log.warn("Discarding message with id '{}' ".
+ format(message_id) +
+ "as it's exceeded the retry limit")
+ message.reject()
+ del self.failing_messages[message_id]
+ else:
+ message.reject()
+ elif self.consume_all:
+ message.reject()
+
+ # This method passes the event to its handler.
+ # Returns a (result, retry) tuple:
+ # 'Result' flag is True if handler has finished successfully,
+ # False otherwise
+ # 'Retry' flag specifies if the error is recoverable or not
+ # 'Retry' flag is checked only is 'result' is False
+ def handle_event(self, event_type: str, notification: dict) -> EventResult:
+ print("Got notification.\nEvent_type: {}\nNotification:\n{}".
+ format(event_type, notification))
+ try:
+ result = self.handler.handle(event_name=event_type,
+ notification=notification)
+ return result if result else EventResult(result=False, retry=False)
+ except Exception as e:
+ self.inv.log.exception(e)
+ return EventResult(result=False, retry=False)
+
+ def save_message(self, message_body: dict, result: EventResult,
+ started: str, finished: str):
+ try:
+ message = Message(
+ msg_id=message_body.get('message_id'),
+ env=self.env_name,
+ source=self.SOURCE_SYSTEM,
+ object_id=result.related_object,
+ display_context=result.display_context,
+ level=message_body.get('priority'),
+ msg=message_body,
+ ts=message_body.get('timestamp'),
+ received_ts=started,
+ finished_ts=finished
+ )
+ self.inv.collections['messages'].insert_one(message.get())
+ return True
+ except Exception as e:
+ self.inv.log.error("Failed to save message")
+ self.inv.log.exception(e)
+ return False
+
+ @staticmethod
+ def listen(args: dict = None):
+
+ args = setup_args(args, DefaultListener.DEFAULTS, get_args)
+ if 'process_vars' not in args:
+ args['process_vars'] = {}
+
+ env_name = args["env"]
+ inventory_collection = args["inventory"]
+
+ MongoAccess.set_config_file(args["mongo_config"])
+ conf = Configuration(args["environments_collection"])
+ conf.use_env(env_name)
+
+ event_handler = EventHandler(env_name, inventory_collection)
+ event_queues = []
+
+ env_config = conf.get_env_config()
+ common_metadata_file = os.path.join(env_config.get('app_path', '/etc/calipso'),
+ 'config',
+ DefaultListener.COMMON_METADATA_FILE)
+
+ # import common metadata
+ import_metadata(event_handler, event_queues, common_metadata_file)
+
+ # import custom metadata if supplied
+ if args["metadata_file"]:
+ import_metadata(event_handler, event_queues, args["metadata_file"])
+
+ inv = InventoryMgr()
+ inv.set_collections(inventory_collection)
+ logger = FullLogger()
+ logger.set_loglevel(args["loglevel"])
+
+ amqp_config = conf.get("AMQP")
+ connect_url = 'amqp://{user}:{pwd}@{host}:{port}//' \
+ .format(user=amqp_config["user"],
+ pwd=amqp_config["password"],
+ host=amqp_config["host"],
+ port=amqp_config["port"])
+
+ with Connection(connect_url) as conn:
+ try:
+ print(conn)
+ conn.connect()
+ args['process_vars']['operational'] = OperationalStatus.RUNNING
+ terminator = SignalHandler()
+ worker = \
+ DefaultListener(connection=conn,
+ event_handler=event_handler,
+ event_queues=event_queues,
+ retry_limit=args["retry_limit"],
+ consume_all=args["consume_all"],
+ inventory_collection=inventory_collection,
+ env_name=env_name)
+ worker.run()
+ if terminator.terminated:
+ args.get('process_vars', {})['operational'] = \
+ OperationalStatus.STOPPED
+ except KeyboardInterrupt:
+ print('Stopped')
+ args['process_vars']['operational'] = OperationalStatus.STOPPED
+ except Exception as e:
+ logger.log.exception(e)
+ args['process_vars']['operational'] = OperationalStatus.ERROR
+ finally:
+ # This should enable safe saving of shared variables
+ time.sleep(0.1)
+
+
+def get_args():
+ # Read listener config from command line args
+ parser = argparse.ArgumentParser()
+ parser.add_argument("-m", "--mongo_config", nargs="?", type=str,
+ default=DefaultListener.DEFAULTS["mongo_config"],
+ help="Name of config file with MongoDB access details")
+ parser.add_argument("--metadata_file", nargs="?", type=str,
+ default=DefaultListener.DEFAULTS["metadata_file"],
+ help="Name of custom configuration metadata file")
+ def_env_collection = DefaultListener.DEFAULTS["environments_collection"]
+ parser.add_argument("-c", "--environments_collection", nargs="?", type=str,
+ default=def_env_collection,
+ help="Name of collection where selected environment " +
+ "is taken from \n(default: {})"
+ .format(def_env_collection))
+ parser.add_argument("-e", "--env", nargs="?", type=str,
+ default=DefaultListener.DEFAULTS["env"],
+ help="Name of target listener environment \n" +
+ "(default: {})"
+ .format(DefaultListener.DEFAULTS["env"]))
+ parser.add_argument("-y", "--inventory", nargs="?", type=str,
+ default=DefaultListener.DEFAULTS["inventory"],
+ help="Name of inventory collection \n"" +"
+ "(default: '{}')"
+ .format(DefaultListener.DEFAULTS["inventory"]))
+ parser.add_argument("-l", "--loglevel", nargs="?", type=str,
+ default=DefaultListener.DEFAULTS["loglevel"],
+ help="Logging level \n(default: '{}')"
+ .format(DefaultListener.DEFAULTS["loglevel"]))
+ parser.add_argument("-r", "--retry_limit", nargs="?", type=int,
+ default=DefaultListener.DEFAULTS["retry_limit"],
+ help="Maximum number of times the OpenStack message "
+ "should be requeued before being discarded \n" +
+ "(default: {})"
+ .format(DefaultListener.DEFAULTS["retry_limit"]))
+ parser.add_argument("--consume_all", action="store_true",
+ help="If this flag is set, " +
+ "environment listener will try to consume"
+ "all messages from OpenStack event queue "
+ "and reject incompatible messages."
+ "Otherwise they'll just be ignored.",
+ default=DefaultListener.DEFAULTS["consume_all"])
+ args = parser.parse_args()
+ return args
+
+
+# Imports metadata from file,
+# updates event handler with new handlers
+# and event queues with new queues
+def import_metadata(event_handler: EventHandler,
+ event_queues: List[Queue],
+ metadata_file_path: str) -> None:
+ handlers_package, queues, event_handlers = \
+ parse_metadata_file(metadata_file_path)
+ event_handler.discover_handlers(handlers_package, event_handlers)
+ event_queues.extend([
+ Queue(q['queue'],
+ Exchange(q['exchange'], 'topic', durable=False),
+ durable=False, routing_key='#') for q in queues
+ ])
+
+
+if __name__ == '__main__':
+ DefaultListener.listen()
diff --git a/app/discover/events/listeners/listener_base.py b/app/discover/events/listeners/listener_base.py
new file mode 100644
index 0000000..7052dc9
--- /dev/null
+++ b/app/discover/events/listeners/listener_base.py
@@ -0,0 +1,18 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+from abc import ABC, abstractmethod
+
+
+class ListenerBase(ABC):
+
+ @staticmethod
+ @abstractmethod
+ def listen(self):
+ pass
diff --git a/app/discover/fetch_host_object_types.py b/app/discover/fetch_host_object_types.py
new file mode 100644
index 0000000..da38af7
--- /dev/null
+++ b/app/discover/fetch_host_object_types.py
@@ -0,0 +1,37 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+from discover.fetcher import Fetcher
+
+
+class FetchHostObjectTypes(Fetcher):
+
+ def get(self, parent):
+ ret = {
+ "id": "",
+ "parent": parent,
+ "rows": [
+ {
+ "id": "instances_root",
+ "type": "instances_folder",
+ "text": "Instances"
+ },
+ {
+ "id": "networks_root",
+ "type": "networks_folder",
+ "text": "Networks"
+ },
+ {
+ "id": "vservices_root",
+ "type": "vservices_folder",
+ "text": "vServices"
+ }
+ ]
+ }
+ return ret
diff --git a/app/discover/fetch_region_object_types.py b/app/discover/fetch_region_object_types.py
new file mode 100644
index 0000000..047c84c
--- /dev/null
+++ b/app/discover/fetch_region_object_types.py
@@ -0,0 +1,37 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+from discover.fetcher import Fetcher
+
+
+class FetchRegionObjectTypes(Fetcher):
+
+ def get(self, parent):
+ ret = {
+ "id": "",
+ "parent": parent,
+ "rows": [
+ {
+ "id": "aggregates_root",
+ "type": "aggregates_folder",
+ "text": "Aggregates"
+ },
+ {
+ "id": "availability_zones_root",
+ "type": "availability_zones_folder",
+ "text": "Availability Zones"
+ },
+ {
+ "id": "network_agents_root",
+ "type": "network_agents_folder",
+ "text": "network Agents"
+ }
+ ]
+ }
+ return ret
diff --git a/app/discover/fetcher.py b/app/discover/fetcher.py
new file mode 100644
index 0000000..8d7fdbb
--- /dev/null
+++ b/app/discover/fetcher.py
@@ -0,0 +1,35 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+from discover.configuration import Configuration
+from utils.logging.full_logger import FullLogger
+
+
+class Fetcher:
+
+ def __init__(self):
+ super().__init__()
+ self.env = None
+ self.log = FullLogger()
+ self.configuration = None
+
+ @staticmethod
+ def escape(string):
+ return string
+
+ def set_env(self, env):
+ self.env = env
+ self.log.set_env(env)
+ self.configuration = Configuration()
+
+ def get_env(self):
+ return self.env
+
+ def get(self, object_id):
+ return None
diff --git a/app/discover/fetcher_new.py b/app/discover/fetcher_new.py
new file mode 100644
index 0000000..f545554
--- /dev/null
+++ b/app/discover/fetcher_new.py
@@ -0,0 +1,30 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+from discover.fetcher import Fetcher
+##old stuff
+class FetchHostObjectTypes(Fetcher):
+
+
+ def get(self, parent):
+ ret = {
+ "type": "host_object_type",
+ "id": "",
+ "parent": parent,
+ "rows": [
+ {"id": "instances_root", "text": "Instances", "descendants": 1},
+ {"id": "networks_root", "text": "Networks", "descendants": 1},
+ {"id": "pnics_root", "text": "pNICs", "descendants": 1},
+ {"id": "vservices_root", "text": "vServices", "descendants": 1}
+ ]
+ }
+ return ret
+
+ ## old/moved
+
diff --git a/app/discover/fetchers/__init__.py b/app/discover/fetchers/__init__.py
new file mode 100644
index 0000000..b0637e9
--- /dev/null
+++ b/app/discover/fetchers/__init__.py
@@ -0,0 +1,9 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
diff --git a/app/discover/fetchers/aci/__init__.py b/app/discover/fetchers/aci/__init__.py
new file mode 100644
index 0000000..b0637e9
--- /dev/null
+++ b/app/discover/fetchers/aci/__init__.py
@@ -0,0 +1,9 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
diff --git a/app/discover/fetchers/aci/aci_access.py b/app/discover/fetchers/aci/aci_access.py
new file mode 100644
index 0000000..836e45d
--- /dev/null
+++ b/app/discover/fetchers/aci/aci_access.py
@@ -0,0 +1,200 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+import json
+
+import requests
+
+from discover.configuration import Configuration
+from discover.fetcher import Fetcher
+
+
+def aci_config_required(default=None):
+ def decorator(func):
+ def wrapper(self, *args, **kwargs):
+ if not self.aci_enabled:
+ return default
+ return func(self, *args, **kwargs)
+ return wrapper
+ return decorator
+
+
+class AciAccess(Fetcher):
+
+ RESPONSE_FORMAT = "json"
+ cookie_token = None
+
+ def __init__(self):
+ super().__init__()
+ self.configuration = Configuration()
+ self.aci_enabled = self.configuration.get_env_config() \
+ .get('aci_enabled', False)
+ self.aci_configuration = None
+ self.host = None
+ if self.aci_enabled:
+ self.aci_configuration = self.configuration.get("ACI")
+ self.host = self.aci_configuration["host"]
+
+ def get_base_url(self):
+ return "https://{}/api".format(self.host)
+
+ # Unwrap ACI response payload
+ # and return an array of desired fields' values.
+ #
+ # Parameters
+ # ----------
+ #
+ # payload: dict
+ # Full json response payload returned by ACI
+ # *field_names: Tuple[str]
+ # Enumeration of fields that are used to traverse ACI "imdata" array
+ # (order is important)
+ #
+ # Returns
+ # ----------
+ # list
+ # List of unwrapped dictionaries (or primitives)
+ #
+ # Example
+ # ----------
+ # Given payload:
+ #
+ # {
+ # "totalCount": "2",
+ # "imdata": [
+ # {
+ # "aaa": {
+ # "bbb": {
+ # "ccc": "value1"
+ # }
+ # }
+ # },
+ # {
+ # "aaa": {
+ # "bbb": {
+ # "ccc": "value2"
+ # }
+ # }
+ # }
+ # ]
+ # }
+ #
+ # Executing get_objects_by_field_names(payload, "aaa", "bbb")
+ # will yield the following result:
+ #
+ # >>> [{"ccc": "value1"}, {"ccc": "value2"}]
+ #
+ # Executing get_objects_by_field_names(payload, "aaa", "bbb", "ccc")
+ # will yield the following result:
+ #
+ # >>> ["value1", "value2"]
+ #
+ @staticmethod
+ def get_objects_by_field_names(payload, *field_names):
+ results = payload.get("imdata", [])
+ if not results:
+ return []
+
+ for field in field_names:
+ results = [entry[field] for entry in results]
+ return results
+
+ # Set auth tokens in request headers and cookies
+ @staticmethod
+ def _insert_token_into_request(cookies):
+ return dict(cookies, **AciAccess.cookie_token) \
+ if cookies \
+ else AciAccess.cookie_token
+
+ @staticmethod
+ def _set_token(response):
+ tokens = AciAccess.get_objects_by_field_names(response.json(), "aaaLogin", "attributes", "token")
+ token = tokens[0]
+
+ AciAccess.cookie_token = {"APIC-Cookie": token}
+
+ @aci_config_required()
+ def login(self):
+ url = "/".join((self.get_base_url(), "aaaLogin.json"))
+ payload = {
+ "aaaUser": {
+ "attributes": {
+ "name": self.aci_configuration["user"],
+ "pwd": self.aci_configuration["pwd"]
+ }
+ }
+ }
+
+ response = requests.post(url, json=payload, verify=False)
+ response.raise_for_status()
+
+ AciAccess._set_token(response)
+
+ # Refresh token or login if token has expired
+ @aci_config_required()
+ def refresh_token(self):
+ # First time login
+ if not AciAccess.cookie_token:
+ self.login()
+ return
+
+ url = "/".join((self.get_base_url(), "aaaRefresh.json"))
+
+ response = requests.get(url, verify=False)
+
+ # Login again if the token has expired
+ if response.status_code == requests.codes.forbidden:
+ self.login()
+ return
+ # Propagate any other error
+ elif response.status_code != requests.codes.ok:
+ response.raise_for_status()
+
+ AciAccess._set_token(response)
+
+ @aci_config_required(default={})
+ def send_get(self, url, params, headers, cookies):
+ self.refresh_token()
+
+ cookies = self._insert_token_into_request(cookies)
+
+ response = requests.get(url, params=params, headers=headers,
+ cookies=cookies, verify=False)
+ # Let client handle HTTP errors
+ response.raise_for_status()
+
+ return response.json()
+
+ # Search ACI for Managed Objects (MOs) of a specific class
+ @aci_config_required(default=[])
+ def fetch_objects_by_class(self,
+ class_name: str,
+ params: dict = None,
+ headers: dict = None,
+ cookies: dict = None,
+ response_format: str = RESPONSE_FORMAT):
+ url = "/".join((self.get_base_url(),
+ "class", "{cn}.{f}".format(cn=class_name, f=response_format)))
+
+ response_json = self.send_get(url, params, headers, cookies)
+ return self.get_objects_by_field_names(response_json, class_name)
+
+ # Fetch data for a specific Managed Object (MO)
+ @aci_config_required(default=[])
+ def fetch_mo_data(self,
+ dn: str,
+ params: dict = None,
+ headers: dict = None,
+ cookies: dict = None,
+ response_format: str = RESPONSE_FORMAT):
+ url = "/".join((self.get_base_url(), "mo", "topology",
+ "{dn}.{f}".format(dn=dn, f=response_format)))
+
+ response_json = self.send_get(url, params, headers, cookies)
+ return response_json
diff --git a/app/discover/fetchers/aci/aci_fetch_switch_pnic.py b/app/discover/fetchers/aci/aci_fetch_switch_pnic.py
new file mode 100644
index 0000000..a4216ea
--- /dev/null
+++ b/app/discover/fetchers/aci/aci_fetch_switch_pnic.py
@@ -0,0 +1,91 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+import re
+
+from discover.fetchers.aci.aci_access import AciAccess, aci_config_required
+from utils.inventory_mgr import InventoryMgr
+from utils.util import encode_aci_dn, get_object_path_part
+
+
+class AciFetchSwitchPnic(AciAccess):
+
+ def __init__(self):
+ super().__init__()
+ self.inv = InventoryMgr()
+
+ def fetch_pnics_by_mac_address(self, mac_address):
+ mac_filter = "eq(epmMacEp.addr,\"{}\")".format(mac_address)
+ pnic_filter = "wcard(epmMacEp.ifId, \"eth\")"
+ query_filter = "and({},{})".format(mac_filter, pnic_filter)
+
+ pnics = self.fetch_objects_by_class("epmMacEp",
+ {"query-target-filter": query_filter})
+
+ return [pnic["attributes"] for pnic in pnics]
+
+ def fetch_switch_by_id(self, switch_id):
+ dn = "/".join((switch_id, "sys"))
+ response = self.fetch_mo_data(dn)
+ switch_data = self.get_objects_by_field_names(response, "topSystem", "attributes")
+ return switch_data[0] if switch_data else None
+
+ @aci_config_required(default=[])
+ def get(self, pnic_id):
+ environment = self.get_env()
+ pnic = self.inv.get_by_id(environment=environment, item_id=pnic_id)
+ if not pnic:
+ return []
+ mac_address = pnic.get("mac_address")
+ if not mac_address:
+ return []
+
+ switch_pnics = self.fetch_pnics_by_mac_address(mac_address)
+ if not switch_pnics:
+ return []
+ switch_pnic = switch_pnics[0]
+
+ # Prepare and save switch data in inventory
+ aci_id_match = re.match("topology/(.+)/sys", switch_pnic["dn"])
+ if not aci_id_match:
+ raise ValueError("Failed to fetch switch id from pnic dn: {}"
+ .format(switch_pnic["dn"]))
+
+ aci_switch_id = aci_id_match.group(1)
+ db_switch_id = encode_aci_dn(aci_switch_id)
+ if not self.inv.get_by_id(environment, db_switch_id):
+ switch_data = self.fetch_switch_by_id(aci_switch_id)
+ if not switch_data:
+ self.log.warning("No switch found for switch pnic dn: {}"
+ .format(switch_pnic["dn"]))
+ return []
+
+ switch_json = {
+ "id": db_switch_id,
+ "ip_address": switch_data["address"],
+ "type": "switch",
+ "aci_document": switch_data
+ }
+ # Region name is the same as region id
+ region_id = get_object_path_part(pnic["name_path"], "Regions")
+ region = self.inv.get_by_id(environment, region_id)
+ self.inv.save_inventory_object(o=switch_json, parent=region, environment=environment)
+
+ db_pnic_id = "-".join((db_switch_id,
+ encode_aci_dn(switch_pnic["ifId"]),
+ mac_address))
+ pnic_json = {
+ "id": db_pnic_id,
+ "type": "pnic",
+ "pnic_type": "switch",
+ "mac_address": mac_address,
+ "aci_document": switch_pnic
+ }
+ return [pnic_json]
+
diff --git a/app/discover/fetchers/api/__init__.py b/app/discover/fetchers/api/__init__.py
new file mode 100644
index 0000000..b0637e9
--- /dev/null
+++ b/app/discover/fetchers/api/__init__.py
@@ -0,0 +1,9 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
diff --git a/app/discover/fetchers/api/api_access.py b/app/discover/fetchers/api/api_access.py
new file mode 100644
index 0000000..89eeb34
--- /dev/null
+++ b/app/discover/fetchers/api/api_access.py
@@ -0,0 +1,195 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+import calendar
+import re
+import requests
+import time
+
+from discover.configuration import Configuration
+from discover.fetcher import Fetcher
+from utils.string_utils import jsonify
+
+
+class ApiAccess(Fetcher):
+ subject_token = None
+ initialized = False
+ regions = {}
+ config = None
+ api_config = None
+
+ host = ""
+ base_url = ""
+ admin_token = ""
+ tokens = {}
+ admin_endpoint = ""
+ admin_project = None
+ auth_response = None
+
+ alternative_services = {
+ "neutron": ["quantum"]
+ }
+
+ # identitity API v2 version with admin token
+ def __init__(self):
+ super(ApiAccess, self).__init__()
+ if ApiAccess.initialized:
+ return
+ ApiAccess.config = Configuration()
+ ApiAccess.api_config = ApiAccess.config.get("OpenStack")
+ host = ApiAccess.api_config["host"]
+ ApiAccess.host = host
+ port = ApiAccess.api_config["port"]
+ if not (host and port):
+ raise ValueError('Missing definition of host or port ' +
+ 'for OpenStack API access')
+ ApiAccess.base_url = "http://" + host + ":" + port
+ ApiAccess.admin_token = ApiAccess.api_config["admin_token"]
+ ApiAccess.admin_project = ApiAccess.api_config["admin_project"] \
+ if "admin_project" in ApiAccess.api_config \
+ else 'admin'
+ ApiAccess.admin_endpoint = "http://" + host + ":" + "35357"
+
+ token = self.v2_auth_pwd(ApiAccess.admin_project)
+ if not token:
+ raise ValueError("Authentication failed. Failed to obtain token")
+ else:
+ self.subject_token = token
+
+ @staticmethod
+ def parse_time(time_str):
+ try:
+ time_struct = time.strptime(time_str, "%Y-%m-%dT%H:%M:%SZ")
+ except ValueError:
+ try:
+ time_struct = time.strptime(time_str,
+ "%Y-%m-%dT%H:%M:%S.%fZ")
+ except ValueError:
+ return None
+ return time_struct
+
+ # try to use existing token, if it did not expire
+ def get_existing_token(self, project_id):
+ try:
+ token_details = ApiAccess.tokens[project_id]
+ except KeyError:
+ return None
+ token_expiry = token_details["expires"]
+ token_expiry_time_struct = self.parse_time(token_expiry)
+ if not token_expiry_time_struct:
+ return None
+ token_expiry_time = token_details["token_expiry_time"]
+ now = time.time()
+ if now > token_expiry_time:
+ # token has expired
+ ApiAccess.tokens.pop(project_id)
+ return None
+ return token_details
+
+ def v2_auth(self, project_id, headers, post_body):
+ subject_token = self.get_existing_token(project_id)
+ if subject_token:
+ return subject_token
+ req_url = ApiAccess.base_url + "/v2.0/tokens"
+ response = requests.post(req_url, json=post_body, headers=headers)
+ ApiAccess.auth_response = response.json()
+ if 'error' in self.auth_response:
+ e = self.auth_response['error']
+ self.log.error(str(e['code']) + ' ' + e['title'] + ': ' +
+ e['message'] + ", URL: " + req_url)
+ return None
+ try:
+ token_details = ApiAccess.auth_response["access"]["token"]
+ except KeyError:
+ # assume authentication failed
+ return None
+ token_expiry = token_details["expires"]
+ token_expiry_time_struct = self.parse_time(token_expiry)
+ if not token_expiry_time_struct:
+ return None
+ token_expiry_time = calendar.timegm(token_expiry_time_struct)
+ token_details["token_expiry_time"] = token_expiry_time
+ ApiAccess.tokens[project_id] = token_details
+ return token_details
+
+ def v2_auth_pwd(self, project):
+ user = ApiAccess.api_config["user"]
+ pwd = ApiAccess.api_config["pwd"]
+ post_body = {
+ "auth": {
+ "passwordCredentials": {
+ "username": user,
+ "password": pwd
+ }
+ }
+ }
+ if project is not None:
+ post_body["auth"]["tenantName"] = project
+ project_id = project
+ else:
+ project_id = ""
+ headers = {
+ 'Accept': 'application/json',
+ 'Content-Type': 'application/json; charset=UTF-8'
+ }
+ return self.v2_auth(project_id, headers, post_body)
+
+ def get_rel_url(self, relative_url, headers):
+ req_url = ApiAccess.base_url + relative_url
+ return self.get_url(req_url, headers)
+
+ def get_url(self, req_url, headers):
+ response = requests.get(req_url, headers=headers)
+ if response.status_code != requests.codes.ok:
+ # some error happened
+ if "reason" in response:
+ msg = ", reason: {}".format(response.reason)
+ else:
+ msg = ", response: {}".format(response.text)
+ self.log.error("req_url: {} {}".format(req_url, msg))
+ return response
+ ret = response.json()
+ return ret
+
+ def get_region_url(self, region_name, service):
+ if region_name not in self.regions:
+ return None
+ region = self.regions[region_name]
+ s = self.get_service_region_endpoints(region, service)
+ if not s:
+ return None
+ orig_url = s["adminURL"]
+ # replace host name with the host found in config
+ url = re.sub(r"^([^/]+)//[^:]+", r"\1//" + ApiAccess.host, orig_url)
+ return url
+
+ # like get_region_url(), but remove everything starting from the "/v2"
+ def get_region_url_nover(self, region, service):
+ full_url = self.get_region_url(region, service)
+ if not full_url:
+ self.log.error("could not find region URL for region: " + region)
+ exit()
+ url = re.sub(r":([0-9]+)/v[2-9].*", r":\1", full_url)
+ return url
+
+ def get_catalog(self, pretty):
+ return jsonify(self.regions, pretty)
+
+ # find the endpoints for a given service name,
+ # considering also alternative service names
+ def get_service_region_endpoints(self, region, service):
+ alternatives = [service]
+ endpoints = region["endpoints"]
+ if service in self.alternative_services:
+ alternatives.extend(self.alternative_services[service])
+ for sname in alternatives:
+ if sname in endpoints:
+ return endpoints[sname]
+ return None
+
diff --git a/app/discover/fetchers/api/api_fetch_availability_zones.py b/app/discover/fetchers/api/api_fetch_availability_zones.py
new file mode 100644
index 0000000..196893b
--- /dev/null
+++ b/app/discover/fetchers/api/api_fetch_availability_zones.py
@@ -0,0 +1,56 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+from discover.fetchers.api.api_access import ApiAccess
+
+
+class ApiFetchAvailabilityZones(ApiAccess):
+ def __init__(self):
+ super(ApiFetchAvailabilityZones, self).__init__()
+
+ def get(self, project_id):
+ token = self.v2_auth_pwd(project_id)
+ if not token:
+ return []
+ ret = []
+ for region in self.regions:
+ ret.extend(self.get_for_region(project_id, region, token))
+ return ret
+
+ def get_for_region(self, project, region, token):
+ # we use os-availability-zone/detail rather than os-availability-zone,
+ # because the later does not inclde the "internal" zone in the results
+ endpoint = self.get_region_url_nover(region, "nova")
+ req_url = endpoint + "/v2/" + token["tenant"]["id"] + \
+ "/os-availability-zone/detail"
+ headers = {
+ "X-Auth-Project-Id": project,
+ "X-Auth-Token": token["id"]
+ }
+ response = self.get_url(req_url, headers)
+ if "status" in response and int(response["status"]) != 200:
+ return []
+ ret = []
+ if "availabilityZoneInfo" not in response:
+ return []
+ azs = response["availabilityZoneInfo"]
+ if not azs:
+ return []
+ for doc in azs:
+ doc["id"] = doc["zoneName"]
+ doc["name"] = doc.pop("zoneName")
+ doc["master_parent_type"] = "region"
+ doc["master_parent_id"] = region
+ doc["parent_type"] = "availability_zones_folder"
+ doc["parent_id"] = region + "-availability_zones"
+ doc["parent_text"] = "Availability Zones"
+ doc["available"] = doc["zoneState"]["available"]
+ doc.pop("zoneState")
+ ret.append(doc)
+ return ret
diff --git a/app/discover/fetchers/api/api_fetch_end_points.py b/app/discover/fetchers/api/api_fetch_end_points.py
new file mode 100644
index 0000000..9471c7e
--- /dev/null
+++ b/app/discover/fetchers/api/api_fetch_end_points.py
@@ -0,0 +1,35 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+# fetch the end points for a given project (tenant)
+# return list of regions, to allow further recursive scanning
+
+from discover.fetchers.api.api_access import ApiAccess
+
+
+class ApiFetchEndPoints(ApiAccess):
+
+ def get(self, project_id):
+ if project_id != "admin":
+ return [] # XXX currently having problems authenticating to other tenants
+ self.v2_auth_pwd(project_id)
+
+ environment = ApiAccess.config.get_env_name()
+ regions = []
+ services = ApiAccess.auth_response['access']['serviceCatalog']
+ endpoints = []
+ for s in services:
+ if s["type"] != "identity":
+ continue
+ e = s["endpoints"][0]
+ e["environment"] = environment
+ e["project"] = project_id
+ e["type"] = "endpoint"
+ endpoints.append(e)
+ return endpoints
diff --git a/app/discover/fetchers/api/api_fetch_host_instances.py b/app/discover/fetchers/api/api_fetch_host_instances.py
new file mode 100644
index 0000000..56cffda
--- /dev/null
+++ b/app/discover/fetchers/api/api_fetch_host_instances.py
@@ -0,0 +1,59 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+from discover.fetchers.api.api_access import ApiAccess
+from discover.fetchers.db.db_access import DbAccess
+from discover.fetchers.db.db_fetch_instances import DbFetchInstances
+from utils.inventory_mgr import InventoryMgr
+from utils.singleton import Singleton
+
+
+class ApiFetchHostInstances(ApiAccess, DbAccess, metaclass=Singleton):
+ def __init__(self):
+ super(ApiFetchHostInstances, self).__init__()
+ self.inv = InventoryMgr()
+ self.endpoint = ApiAccess.base_url.replace(":5000", ":8774")
+ self.projects = None
+ self.db_fetcher = DbFetchInstances()
+
+ def get_projects(self):
+ if not self.projects:
+ projects_list = self.inv.get(self.get_env(), "project", None)
+ self.projects = [p["name"] for p in projects_list]
+
+ def get(self, id):
+ self.get_projects()
+ host_id = id[:id.rindex("-")]
+ host = self.inv.get_by_id(self.get_env(), host_id)
+ if not host or "Compute" not in host.get("host_type", ""):
+ return []
+ instances_found = self.get_instances_from_api(host_id)
+ self.db_fetcher.get_instance_data(instances_found)
+ return instances_found
+
+ def get_instances_from_api(self, host_name):
+ token = self.v2_auth_pwd(self.admin_project)
+ if not token:
+ return []
+ tenant_id = token["tenant"]["id"]
+ req_url = self.endpoint + "/v2/" + tenant_id + \
+ "/os-hypervisors/" + host_name + "/servers"
+ response = self.get_url(req_url, {"X-Auth-Token": token["id"]})
+ ret = []
+ if not "hypervisors" in response:
+ return []
+ if not "servers" in response["hypervisors"][0]:
+ return []
+ for doc in response["hypervisors"][0]["servers"]:
+ doc["id"] = doc["uuid"]
+ doc["host"] = host_name
+ doc["local_name"] = doc.pop("name")
+ ret.append(doc)
+ self.log.info("found %s instances for host: %s", str(len(ret)), host_name)
+ return ret
diff --git a/app/discover/fetchers/api/api_fetch_network.py b/app/discover/fetchers/api/api_fetch_network.py
new file mode 100644
index 0000000..889b8a5
--- /dev/null
+++ b/app/discover/fetchers/api/api_fetch_network.py
@@ -0,0 +1,76 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+from discover.fetchers.api.api_access import ApiAccess
+from utils.inventory_mgr import InventoryMgr
+
+
+class ApiFetchNetwork(ApiAccess):
+ def __init__(self):
+ super(ApiFetchNetwork, self).__init__()
+ self.inv = InventoryMgr()
+
+ def get(self, project_id):
+ # use project admin credentials, to be able to fetch all networks
+ token = self.v2_auth_pwd(self.admin_project)
+ if not token:
+ return []
+ ret = []
+ for region in self.regions:
+ # TODO: refactor legacy code (Unresolved reference - self.get_for_region)
+ ret.extend(self.get_for_region(region, token, project_id))
+ return ret
+
+ def get_network(self, region, token, subnet_id):
+ endpoint = self.get_region_url_nover(region, "neutron")
+
+ # get target network network document
+ req_url = endpoint + "/v2.0/networks/" + subnet_id
+ headers = {
+ "X-Auth-Project-Id": self.admin_project,
+ "X-Auth-Token": token["id"]
+ }
+ response = self.get_url(req_url, headers)
+ if not "network" in response:
+ return []
+ network = response["network"]
+ subnets = network['subnets']
+
+ # get subnets documents.
+ subnets_hash = {}
+ cidrs = []
+ subnet_ids = []
+ for subnet_id in subnets:
+ req_url = endpoint + "/v2.0/subnets/" + subnet_id
+ response = self.get_url(req_url, headers)
+ if "subnet" in response:
+ # create a hash subnets, to allow easy locating of subnets
+ subnet = response["subnet"]
+ subnets_hash[subnet["name"]] = subnet
+ cidrs.append(subnet["cidr"])
+ subnet_ids.append(subnet["id"])
+
+ network["subnets"] = subnets_hash
+ network["cidrs"] = cidrs
+ network["subnet_ids"] = subnet_ids
+
+ network["master_parent_type"] = "project"
+ network["master_parent_id"] = network["tenant_id"]
+ network["parent_type"] = "networks_folder"
+ network["parent_id"] = network["tenant_id"] + "-networks"
+ network["parent_text"] = "Networks"
+ # set the 'network' attribute for network objects to the name of network,
+ # to allow setting constraint on network when creating network clique
+ network['network'] = network["id"]
+ # get the project name
+ project = self.inv.get_by_id(self.get_env(), network["tenant_id"])
+ if project:
+ network["project"] = project["name"]
+
+ return network
diff --git a/app/discover/fetchers/api/api_fetch_networks.py b/app/discover/fetchers/api/api_fetch_networks.py
new file mode 100644
index 0000000..4b70f65
--- /dev/null
+++ b/app/discover/fetchers/api/api_fetch_networks.py
@@ -0,0 +1,86 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+from discover.fetchers.api.api_access import ApiAccess
+from utils.inventory_mgr import InventoryMgr
+
+
+class ApiFetchNetworks(ApiAccess):
+ def __init__(self):
+ super(ApiFetchNetworks, self).__init__()
+ self.inv = InventoryMgr()
+
+ def get(self, project_id=None):
+ # use project admin credentials, to be able to fetch all networks
+ token = self.v2_auth_pwd(self.admin_project)
+ if not token:
+ return []
+ ret = []
+ for region in self.regions:
+ ret.extend(self.get_networks(region, token))
+ return ret
+
+ def get_networks(self, region, token):
+ endpoint = self.get_region_url_nover(region, "neutron")
+ req_url = endpoint + "/v2.0/networks"
+ headers = {
+ "X-Auth-Project-Id": self.admin_project,
+ "X-Auth-Token": token["id"]
+ }
+ response = self.get_url(req_url, headers)
+ if not "networks" in response:
+ return []
+ networks = response["networks"]
+ req_url = endpoint + "/v2.0/subnets"
+ response = self.get_url(req_url, headers)
+ subnets_hash = {}
+ if "subnets" in response:
+ # create a hash subnets, to allow easy locating of subnets
+ subnets = response["subnets"]
+ for s in subnets:
+ subnets_hash[s["id"]] = s
+ for doc in networks:
+ doc["master_parent_type"] = "project"
+ project_id = doc["tenant_id"]
+ if not project_id:
+ # find project ID of admin project
+ project = self.inv.get_by_field(self.get_env(),
+ "project", "name",
+ self.admin_project,
+ get_single=True)
+ if not project:
+ self.log.error("failed to find admin project in DB")
+ project_id = project["id"]
+ doc["master_parent_id"] = project_id
+ doc["parent_type"] = "networks_folder"
+ doc["parent_id"] = project_id + "-networks"
+ doc["parent_text"] = "Networks"
+ # set the 'network' attribute for network objects to the name of network,
+ # to allow setting constraint on network when creating network clique
+ doc['network'] = doc["id"]
+ # get the project name
+ project = self.inv.get_by_id(self.get_env(), project_id)
+ if project:
+ doc["project"] = project["name"]
+ subnets_details = {}
+ cidrs = []
+ subnet_ids = []
+ for s in doc["subnets"]:
+ try:
+ subnet = subnets_hash[s]
+ cidrs.append(subnet["cidr"])
+ subnet_ids.append(subnet["id"])
+ subnets_details[subnet["name"]] = subnet
+ except KeyError:
+ pass
+
+ doc["subnets"] = subnets_details
+ doc["cidrs"] = cidrs
+ doc["subnet_ids"] = subnet_ids
+ return networks
diff --git a/app/discover/fetchers/api/api_fetch_port.py b/app/discover/fetchers/api/api_fetch_port.py
new file mode 100644
index 0000000..f8d9eeb
--- /dev/null
+++ b/app/discover/fetchers/api/api_fetch_port.py
@@ -0,0 +1,60 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+from discover.fetchers.api.api_access import ApiAccess
+from utils.inventory_mgr import InventoryMgr
+
+
+class ApiFetchPort(ApiAccess):
+ def __init__(self):
+ super(ApiFetchPort, self).__init__()
+ self.inv = InventoryMgr()
+
+ def get(self, project_id):
+ if not project_id:
+ self.log.info("Get method needs ID parameter")
+ return []
+ # use project admin credentials, to be able to fetch all ports
+ token = self.v2_auth_pwd(self.admin_project)
+ if not token:
+ return []
+ ret = []
+ for region in self.regions:
+ ret.append(self.get_port(region, token, project_id))
+ if ret == []:
+ self.log.info("ApiFetchPort: Port not found.")
+ return ret
+
+ def get_port(self, region, token, id):
+ endpoint = self.get_region_url_nover(region, "neutron")
+ req_url = endpoint + "/v2.0/ports/" + id
+ headers = {
+ "X-Auth-Project-Id": self.admin_project,
+ "X-Auth-Token": token["id"]
+ }
+ response = self.get_url(req_url, headers)
+ if not "port" in response:
+ return []
+
+ doc = response["port"]
+ doc["master_parent_type"] = "network"
+ doc["master_parent_id"] = doc["network_id"]
+ doc["parent_type"] = "ports_folder"
+ doc["parent_id"] = doc["network_id"] + "-ports"
+ doc["parent_text"] = "Ports"
+ # get the project name
+ net = self.inv.get_by_id(self.get_env(), doc["network_id"])
+ if net:
+ doc["name"] = doc["mac_address"]
+ else:
+ doc["name"] = doc["id"]
+ project = self.inv.get_by_id(self.get_env(), doc["tenant_id"])
+ if project:
+ doc["project"] = project["name"]
+ return doc
diff --git a/app/discover/fetchers/api/api_fetch_ports.py b/app/discover/fetchers/api/api_fetch_ports.py
new file mode 100644
index 0000000..f4c54a6
--- /dev/null
+++ b/app/discover/fetchers/api/api_fetch_ports.py
@@ -0,0 +1,55 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+from discover.fetchers.api.api_access import ApiAccess
+from utils.inventory_mgr import InventoryMgr
+
+
+class ApiFetchPorts(ApiAccess):
+ def __init__(self):
+ super(ApiFetchPorts, self).__init__()
+ self.inv = InventoryMgr()
+
+ def get(self, project_id):
+ # use project admin credentials, to be able to fetch all ports
+ token = self.v2_auth_pwd(self.admin_project)
+ if not token:
+ return []
+ ret = []
+ for region in self.regions:
+ ret.extend(self.get_ports_for_region(region, token))
+ return ret
+
+ def get_ports_for_region(self, region, token):
+ endpoint = self.get_region_url_nover(region, "neutron")
+ req_url = endpoint + "/v2.0/ports"
+ headers = {
+ "X-Auth-Project-Id": self.admin_project,
+ "X-Auth-Token": token["id"]
+ }
+ response = self.get_url(req_url, headers)
+ if not "ports" in response:
+ return []
+ ports = response["ports"]
+ for doc in ports:
+ doc["master_parent_type"] = "network"
+ doc["master_parent_id"] = doc["network_id"]
+ doc["parent_type"] = "ports_folder"
+ doc["parent_id"] = doc["network_id"] + "-ports"
+ doc["parent_text"] = "Ports"
+ # get the project name
+ net = self.inv.get_by_id(self.get_env(), doc["network_id"])
+ if net:
+ doc["name"] = doc["mac_address"]
+ else:
+ doc["name"] = doc["id"]
+ project = self.inv.get_by_id(self.get_env(), doc["tenant_id"])
+ if project:
+ doc["project"] = project["name"]
+ return ports
diff --git a/app/discover/fetchers/api/api_fetch_project_hosts.py b/app/discover/fetchers/api/api_fetch_project_hosts.py
new file mode 100644
index 0000000..7dc262e
--- /dev/null
+++ b/app/discover/fetchers/api/api_fetch_project_hosts.py
@@ -0,0 +1,144 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+import json
+
+from discover.fetchers.api.api_access import ApiAccess
+from discover.fetchers.db.db_access import DbAccess
+
+
+class ApiFetchProjectHosts(ApiAccess, DbAccess):
+ def __init__(self):
+ super(ApiFetchProjectHosts, self).__init__()
+
+ def get(self, project_id):
+ if project_id != self.admin_project:
+ # do not scan hosts except under project 'admin'
+ return []
+ token = self.v2_auth_pwd(self.admin_project)
+ if not token:
+ return []
+ ret = []
+ for region in self.regions:
+ ret.extend(self.get_for_region(region, token))
+ return ret
+
+ def get_for_region(self, region, token):
+ endpoint = self.get_region_url(region, "nova")
+ ret = []
+ if not token:
+ return []
+ req_url = endpoint + "/os-availability-zone/detail"
+ headers = {
+ "X-Auth-Project-Id": self.admin_project,
+ "X-Auth-Token": token["id"]
+ }
+ response = self.get_url(req_url, headers)
+ if "status" in response and int(response["status"]) != 200:
+ return []
+ az_info = response["availabilityZoneInfo"]
+ hosts = {}
+ for doc in az_info:
+ az_hosts = self.get_hosts_from_az(doc)
+ for h in az_hosts:
+ if h["name"] in hosts:
+ # merge host_type data between AZs
+ existing_entry = hosts[h["name"]]
+ for t in h["host_type"]:
+ self.add_host_type(existing_entry, t, doc['zoneName'])
+ else:
+ hosts[h["name"]] = h
+ ret.append(h)
+ # get os_id for hosts using the os-hypervisors API call
+ req_url = endpoint + "/os-hypervisors"
+ response = self.get_url(req_url, headers)
+ if "status" in response and int(response["status"]) != 200:
+ return ret
+ if "hypervisors" not in response:
+ return ret
+ for h in response["hypervisors"]:
+ hvname = h["hypervisor_hostname"]
+ if '.' in hvname and hvname not in hosts:
+ hostname = hvname[:hvname.index('.')]
+ else:
+ hostname = hvname
+ try:
+ doc = hosts[hostname]
+ except KeyError:
+ # TBD - add error output
+ continue
+ doc["os_id"] = str(h["id"])
+ self.fetch_compute_node_ip_address(doc, hvname)
+ # get more network nodes details
+ self.fetch_network_node_details(ret)
+ return ret
+
+ def get_hosts_from_az(self, az):
+ ret = []
+ for h in az["hosts"]:
+ doc = self.get_host_details(az, h)
+ ret.append(doc)
+ return ret
+
+ def get_host_details(self, az, h):
+ # for hosts we use the name
+ services = az["hosts"][h]
+ doc = {
+ "id": h,
+ "host": h,
+ "name": h,
+ "zone": az["zoneName"],
+ "parent_type": "availability_zone",
+ "parent_id": az["zoneName"],
+ "services": services,
+ "host_type": []
+ }
+ if "nova-conductor" in services:
+ s = services["nova-conductor"]
+ if s["available"] and s["active"]:
+ self.add_host_type(doc, "Controller", az['zoneName'])
+ if "nova-compute" in services:
+ s = services["nova-compute"]
+ if s["available"] and s["active"]:
+ self.add_host_type(doc, "Compute", az['zoneName'])
+ return doc
+
+ # fetch more details of network nodes from neutron.agents table
+ def fetch_network_node_details(self, docs):
+ hosts = {}
+ for doc in docs:
+ hosts[doc["host"]] = doc
+ query = """
+ SELECT DISTINCT host, host AS id, configurations
+ FROM {}.agents
+ WHERE agent_type IN ('Metadata agent', 'DHCP agent', 'L3 agent')
+ """.format(self.neutron_db)
+ results = self.get_objects_list(query, "")
+ for r in results:
+ host = hosts[r["host"]]
+ host["config"] = json.loads(r["configurations"])
+ self.add_host_type(host, "Network", '')
+
+ # fetch ip_address from nova.compute_nodes table if possible
+ def fetch_compute_node_ip_address(self, doc, h):
+ query = """
+ SELECT host_ip AS ip_address
+ FROM nova.compute_nodes
+ WHERE hypervisor_hostname = %s
+ """
+ results = self.get_objects_list_for_id(query, "", h)
+ for db_row in results:
+ doc.update(db_row)
+
+ def add_host_type(self, doc, type, zone):
+ if not type in doc["host_type"]:
+ doc["host_type"].append(type)
+ if type == 'Compute':
+ doc['zone'] = zone
+ doc['parent_id'] = zone
diff --git a/app/discover/fetchers/api/api_fetch_projects.py b/app/discover/fetchers/api/api_fetch_projects.py
new file mode 100644
index 0000000..4ef8083
--- /dev/null
+++ b/app/discover/fetchers/api/api_fetch_projects.py
@@ -0,0 +1,66 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+from discover.fetchers.api.api_access import ApiAccess
+
+
+class ApiFetchProjects(ApiAccess):
+ def __init__(self):
+ super(ApiFetchProjects, self).__init__()
+
+ def get(self, project_id):
+ token = self.v2_auth_pwd(self.admin_project)
+ if not token:
+ return []
+ if not self.regions:
+ self.log.error('No regions found')
+ return []
+ ret = []
+ for region in self.regions:
+ ret.extend(self.get_for_region(region, token))
+ projects_for_user = self.get_projects_for_api_user(region, token)
+ return [p for p in ret if p['name'] in projects_for_user] \
+ if projects_for_user else ret
+
+ def get_projects_for_api_user(self, region, token):
+ if not token:
+ token = self.v2_auth_pwd(self.admin_project)
+ if not token:
+ return []
+ endpoint = self.get_region_url_nover(region, "keystone")
+ headers = {
+ 'X-Auth-Project-Id': self.admin_project,
+ 'X-Auth-Token': token['id']
+ }
+ # get the list of projects accessible by the admin user
+ req_url = endpoint + '/v3/projects'
+ response = self.get_url(req_url, headers)
+ if not response or 'projects' not in response:
+ return None
+ response = [p['name'] for p in response['projects']]
+ return response
+
+ def get_for_region(self, region, token):
+ endpoint = self.get_region_url_nover(region, "keystone")
+ req_url = endpoint + "/v2.0/tenants"
+ headers = {
+ "X-Auth-Project-Id": self.admin_project,
+ "X-Auth-Token": token["id"]
+ }
+ response = self.get_url(req_url, headers)
+ if not isinstance(response, dict):
+ self.log.error('invalid response to /tenants request: not dict')
+ return []
+ tenants_list = response.get("tenants", [])
+ if not isinstance(tenants_list, list):
+ self.log.error('invalid response to /tenants request: '
+ 'tenants value is n ot a list')
+ return []
+ response = [t for t in tenants_list if t.get("name", "") != "services"]
+ return response
diff --git a/app/discover/fetchers/api/api_fetch_regions.py b/app/discover/fetchers/api/api_fetch_regions.py
new file mode 100644
index 0000000..dcc558f
--- /dev/null
+++ b/app/discover/fetchers/api/api_fetch_regions.py
@@ -0,0 +1,51 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+from discover.fetchers.api.api_access import ApiAccess
+
+
+class ApiFetchRegions(ApiAccess):
+ def __init__(self):
+ super(ApiFetchRegions, self).__init__()
+ self.endpoint = ApiAccess.base_url
+
+ def get(self, project_id):
+ token = self.v2_auth_pwd(self.admin_project)
+ if not token:
+ return []
+ # the returned authentication response contains the list of end points
+ # and regions
+ service_catalog = ApiAccess.auth_response.get('access', {}).get('serviceCatalog')
+ if not service_catalog:
+ return []
+ env = self.get_env()
+ ret = []
+ NULL_REGION = "No-Region"
+ for service in service_catalog:
+ for e in service["endpoints"]:
+ if "region" in e:
+ region_name = e.pop("region")
+ region_name = region_name if region_name else NULL_REGION
+ else:
+ region_name = NULL_REGION
+ if region_name in self.regions.keys():
+ region = self.regions[region_name]
+ else:
+ region = {
+ "id": region_name,
+ "name": region_name,
+ "endpoints": {}
+ }
+ ApiAccess.regions[region_name] = region
+ region["parent_type"] = "regions_folder"
+ region["parent_id"] = env + "-regions"
+ e["service_type"] = service["type"]
+ region["endpoints"][service["name"]] = e
+ ret.extend(list(ApiAccess.regions.values()))
+ return ret
diff --git a/app/discover/fetchers/cli/__init__.py b/app/discover/fetchers/cli/__init__.py
new file mode 100644
index 0000000..b0637e9
--- /dev/null
+++ b/app/discover/fetchers/cli/__init__.py
@@ -0,0 +1,9 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
diff --git a/app/discover/fetchers/cli/cli_access.py b/app/discover/fetchers/cli/cli_access.py
new file mode 100644
index 0000000..1db84ea
--- /dev/null
+++ b/app/discover/fetchers/cli/cli_access.py
@@ -0,0 +1,206 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+import re
+import time
+
+from discover.fetcher import Fetcher
+from utils.binary_converter import BinaryConverter
+from utils.logging.console_logger import ConsoleLogger
+from utils.ssh_conn import SshConn
+
+
+class CliAccess(BinaryConverter, Fetcher):
+ connections = {}
+ ssh_cmd = "ssh -o StrictHostKeyChecking=no "
+ call_count_per_con = {}
+ max_call_count_per_con = 100
+ cache_lifetime = 60 # no. of seconds to cache results
+ cached_commands = {}
+
+ def __init__(self):
+ super().__init__()
+ self.log = ConsoleLogger()
+
+ @staticmethod
+ def is_gateway_host(ssh_to_host):
+ ssh_conn = SshConn(ssh_to_host)
+ return ssh_conn.is_gateway_host(ssh_to_host)
+
+ def run_on_gateway(self, cmd, ssh_to_host="", enable_cache=True,
+ use_sudo=True):
+ self.run(cmd, ssh_to_host=ssh_to_host, enable_cache=enable_cache,
+ on_gateway=True, use_sudo=use_sudo)
+
+ def run(self, cmd, ssh_to_host="", enable_cache=True, on_gateway=False,
+ ssh=None, use_sudo=True):
+ ssh_conn = ssh if ssh else SshConn(ssh_to_host)
+ if use_sudo and not cmd.strip().startswith("sudo "):
+ cmd = "sudo " + cmd
+ if not on_gateway and ssh_to_host \
+ and not ssh_conn.is_gateway_host(ssh_to_host):
+ cmd = self.ssh_cmd + ssh_to_host + " " + cmd
+ curr_time = time.time()
+ cmd_path = ssh_to_host + ',' + cmd
+ if enable_cache and cmd_path in self.cached_commands:
+ # try to re-use output from last call
+ cached = self.cached_commands[cmd_path]
+ if cached["timestamp"] + self.cache_lifetime < curr_time:
+ # result expired
+ self.cached_commands.pop(cmd_path, None)
+ else:
+ # result is good to use - skip the SSH call
+ self.log.info('CliAccess: ****** using cached result, ' +
+ 'host: ' + ssh_to_host + ', cmd: %s ******', cmd)
+ return cached["result"]
+
+ self.log.info('CliAccess: host: %s, cmd: %s', ssh_to_host, cmd)
+ ret = ssh_conn.exec(cmd)
+ self.cached_commands[cmd_path] = {"timestamp": curr_time, "result": ret}
+ return ret
+
+ def run_fetch_lines(self, cmd, ssh_to_host="", enable_cache=True):
+ out = self.run(cmd, ssh_to_host, enable_cache)
+ if not out:
+ return []
+ # first try to split lines by whitespace
+ ret = out.splitlines()
+ # if split by whitespace did not work, try splitting by "\\n"
+ if len(ret) == 1:
+ ret = [l for l in out.split("\\n") if l != ""]
+ return ret
+
+ # parse command output columns separated by whitespace
+ # since headers can contain whitespace themselves,
+ # it is the caller's responsibility to provide the headers
+ def parse_cmd_result_with_whitespace(self, lines, headers, remove_first):
+ if remove_first:
+ # remove headers line
+ del lines[:1]
+ results = [self.parse_line_with_ws(line, headers)
+ for line in lines]
+ return results
+
+ # parse command output with "|" column separators and "-" row separators
+ def parse_cmd_result_with_separators(self, lines):
+ headers = self.parse_headers_line_with_separators(lines[1])
+ # remove line with headers and formatting lines above it and below it
+ del lines[:3]
+ # remove formatting line in the end
+ lines.pop()
+ results = [self.parse_content_line_with_separators(line, headers)
+ for line in lines]
+ return results
+
+ # parse a line with columns separated by whitespace
+ def parse_line_with_ws(self, line, headers):
+ s = line if isinstance(line, str) else self.binary2str(line)
+ parts = [word.strip() for word in s.split() if word.strip()]
+ ret = {}
+ for i, p in enumerate(parts):
+ header = headers[i]
+ ret[header] = p
+ return ret
+
+ # parse a line with "|" column separators
+ def parse_line_with_separators(self, line):
+ s = self.binary2str(line)
+ parts = [word.strip() for word in s.split("|") if word.strip()]
+ # remove the ID field
+ del parts[:1]
+ return parts
+
+ def parse_headers_line_with_separators(self, line):
+ return self.parse_line_with_separators(line)
+
+ def parse_content_line_with_separators(self, line, headers):
+ content_parts = self.parse_line_with_separators(line)
+ content = {}
+ for i in range(0, len(content_parts)):
+ content[headers[i]] = content_parts[i]
+ return content
+
+ def merge_ws_spillover_lines(self, lines):
+ # with WS-separated output, extra output sometimes spills to next line
+ # detect that and add to the end of the previous line for our procesing
+ pending_line = None
+ fixed_lines = []
+ # remove headers line
+ for l in lines:
+ if l[0] == '\t':
+ # this is a spill-over line
+ if pending_line:
+ # add this line to the end of the previous line
+ pending_line = pending_line.strip() + "," + l.strip()
+ else:
+ # add the previous pending line to the fixed lines list
+ if pending_line:
+ fixed_lines.append(pending_line)
+ # make current line the pending line
+ pending_line = l
+ if pending_line:
+ fixed_lines.append(pending_line)
+ return fixed_lines
+
+ """
+ given output lines from CLI command like 'ip -d link show',
+ find lines belonging to section describing a specific interface
+ parameters:
+ - lines: list of strings, output of command
+ - header_regexp: regexp marking the start of the section
+ - end_regexp: regexp marking the end of the section
+ """
+ def get_section_lines(self, lines, header_regexp, end_regexp):
+ if not lines:
+ return []
+ header_re = re.compile(header_regexp)
+ start_pos = None
+ # find start_pos of section
+ line_count = len(lines)
+ for line_num in range(0, line_count-1):
+ matches = header_re.match(lines[line_num])
+ if matches:
+ start_pos = line_num
+ break
+ if not start_pos:
+ return []
+ # find end of section
+ end_pos = line_count
+ end_re = re.compile(end_regexp)
+ for line_num in range(start_pos+1, end_pos-1):
+ matches = end_re.match(lines[line_num])
+ if matches:
+ end_pos = line_num
+ break
+ return lines[start_pos:end_pos]
+
+ def get_object_data(self, o, lines, regexps):
+ """
+ find object data in output lines from CLI command
+ parameters:
+ - o: object (dict), to which we'll add attributes with the data found
+ - lines: list of strings
+ - regexps: dict, keys are attribute names, values are regexp to match
+ for finding the value of the attribute
+ """
+ for line in lines:
+ self.find_matching_regexps(o, line, regexps)
+ for regexp_tuple in regexps:
+ name = regexp_tuple['name']
+ if 'name' not in o and 'default' in regexp_tuple:
+ o[name] = regexp_tuple['default']
+
+ def find_matching_regexps(self, o, line, regexps):
+ for regexp_tuple in regexps:
+ name = regexp_tuple['name']
+ regex = regexp_tuple['re']
+ regex = re.compile(regex)
+ matches = regex.search(line)
+ if matches:
+ o[name] = matches.group(1)
diff --git a/app/discover/fetchers/cli/cli_fetch_host_pnics.py b/app/discover/fetchers/cli/cli_fetch_host_pnics.py
new file mode 100644
index 0000000..3516e25
--- /dev/null
+++ b/app/discover/fetchers/cli/cli_fetch_host_pnics.py
@@ -0,0 +1,122 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+import re
+
+from discover.fetchers.cli.cli_access import CliAccess
+from utils.inventory_mgr import InventoryMgr
+
+
+class CliFetchHostPnics(CliAccess):
+ def __init__(self):
+ super().__init__()
+ self.inv = InventoryMgr()
+ self.ethtool_attr = re.compile('^\s+([^:]+):\s(.*)$')
+ self.regexps = [
+ {'name': 'mac_address', 're': '^.*\sHWaddr\s(\S+)(\s.*)?$'},
+ {'name': 'mac_address', 're': '^.*\sether\s(\S+)(\s.*)?$'},
+ {'name': 'IP Address', 're': '^\s*inet addr:?(\S+)\s.*$'},
+ {'name': 'IP Address', 're': '^\s*inet ([0-9.]+)\s.*$'},
+ {'name': 'IPv6 Address', 're': '^\s*inet6 addr:\s*(\S+)(\s.*)?$'},
+ {'name': 'IPv6 Address', 're': '^\s*inet6 \s*(\S+)(\s.*)?$'}
+ ]
+
+ def get(self, id):
+ host_id = id[:id.rindex("-")]
+ cmd = 'ls -l /sys/class/net | grep ^l | grep -v "/virtual/"'
+ host = self.inv.get_by_id(self.get_env(), host_id)
+ if not host:
+ self.log.error("CliFetchHostPnics: host not found: " + host_id)
+ return []
+ if "host_type" not in host:
+ self.log.error("host does not have host_type: " + host_id +
+ ", host: " + str(host))
+ return []
+ host_types = host["host_type"]
+ if "Network" not in host_types and "Compute" not in host_types:
+ return []
+ interface_lines = self.run_fetch_lines(cmd, host_id)
+ interfaces = []
+ for line in interface_lines:
+ interface_name = line[line.rindex('/')+1:]
+ interface_name = interface_name.strip()
+ # run ifconfig with specific interface name,
+ # since running it with no name yields a list without inactive pNICs
+ interface = self.find_interface_details(host_id, interface_name)
+ if interface:
+ interfaces.append(interface)
+ return interfaces
+
+ def find_interface_details(self, host_id, interface_name):
+ lines = self.run_fetch_lines("ifconfig " + interface_name, host_id)
+ interface = None
+ status_up = None
+ for line in [l for l in lines if l != '']:
+ tokens = None
+ if interface is None:
+ tokens = line.split()
+ name = tokens[0].strip('- :')
+ name = name.strip()
+ if name == interface_name:
+ line_remainder = line.strip('-')[len(interface_name)+2:]
+ line_remainder = line_remainder.strip(' :')
+ id = interface_name
+ interface = {
+ "host": host_id,
+ "name": id,
+ "local_name": interface_name,
+ "lines": []
+ }
+ self.handle_line(interface, line_remainder)
+ if '<UP,' in line:
+ status_up = True
+ if status_up is None:
+ if tokens is None:
+ tokens = line.split()
+ if 'BROADCAST' in tokens:
+ status_up = 'UP' in tokens
+ if interface:
+ self.handle_line(interface, line)
+ self.set_interface_data(interface)
+ interface['state'] = 'UP' if status_up else 'DOWN'
+ if 'id' not in interface:
+ interface['id'] = interface_name + '-unknown_mac'
+ return interface
+
+ def handle_line(self, interface, line):
+ self.find_matching_regexps(interface, line, self.regexps)
+ if 'mac_address' in interface:
+ interface["id"] = interface["name"] + "-" + interface["mac_address"]
+ interface["lines"].append(line.strip())
+
+ def set_interface_data(self, interface):
+ if not interface:
+ return
+ interface["data"] = "\n".join(interface["lines"])
+ interface.pop("lines", None)
+ ethtool_ifname = interface["local_name"]
+ if "@" in interface["local_name"]:
+ pos = interface["local_name"].index("@")
+ ethtool_ifname = ethtool_ifname[pos + 1:]
+ cmd = "ethtool " + ethtool_ifname
+ lines = self.run_fetch_lines(cmd, interface["host"])
+ attr = None
+ for line in lines[1:]:
+ matches = self.ethtool_attr.match(line)
+ if matches:
+ # add this attribute to the interface
+ attr = matches.group(1)
+ value = matches.group(2)
+ interface[attr] = value.strip()
+ else:
+ # add more values to the current attribute as an array
+ if isinstance(interface[attr], str):
+ interface[attr] = [interface[attr], line.strip()]
+ else:
+ interface[attr].append(line.strip())
diff --git a/app/discover/fetchers/cli/cli_fetch_host_pnics_vpp.py b/app/discover/fetchers/cli/cli_fetch_host_pnics_vpp.py
new file mode 100644
index 0000000..69b6413
--- /dev/null
+++ b/app/discover/fetchers/cli/cli_fetch_host_pnics_vpp.py
@@ -0,0 +1,44 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+import re
+
+from discover.fetcher import Fetcher
+from utils.inventory_mgr import InventoryMgr
+
+NAME_RE = '^[a-zA-Z]*GigabitEthernet'
+
+class CliFetchHostPnicsVpp(Fetcher):
+ def __init__(self):
+ super().__init__()
+ self.inv = InventoryMgr()
+ self.name_re = re.compile(NAME_RE)
+
+ def get(self, id):
+ host_id = id[:id.rindex("-")]
+ host_id = id[:host_id.rindex("-")]
+ vedges = self.inv.find_items({
+ "environment": self.get_env(),
+ "type": "vedge",
+ "host": host_id
+ })
+ ret = []
+ for vedge in vedges:
+ pnic_ports = vedge['ports']
+ for pnic_name in pnic_ports:
+ if not self.name_re.search(pnic_name):
+ continue
+ pnic = pnic_ports[pnic_name]
+ pnic['host'] = host_id
+ pnic['id'] = host_id + "-pnic-" + pnic_name
+ pnic['type'] = 'pnic'
+ pnic['object_name'] = pnic_name
+ pnic['Link detected'] = 'yes' if pnic['state'] == 'up' else 'no'
+ ret.append(pnic)
+ return ret
diff --git a/app/discover/fetchers/cli/cli_fetch_host_vservice.py b/app/discover/fetchers/cli/cli_fetch_host_vservice.py
new file mode 100644
index 0000000..9f8173f
--- /dev/null
+++ b/app/discover/fetchers/cli/cli_fetch_host_vservice.py
@@ -0,0 +1,80 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+import re
+
+from discover.fetchers.cli.cli_access import CliAccess
+from discover.fetchers.db.db_access import DbAccess
+from discover.network_agents_list import NetworkAgentsList
+from utils.inventory_mgr import InventoryMgr
+
+
+class CliFetchHostVservice(CliAccess, DbAccess):
+ def __init__(self):
+ super(CliFetchHostVservice, self).__init__()
+ # match only DHCP agent and router (L3 agent)
+ self.type_re = re.compile("^q(dhcp|router)-")
+ self.inv = InventoryMgr()
+ self.agents_list = NetworkAgentsList()
+
+ def get_vservice(self, host_id, name_space):
+ result = {"local_service_id": name_space}
+ self.set_details(host_id, result)
+ return result
+
+ def set_details(self, host_id, r):
+ # keep the index without prefix
+ id_full = r["local_service_id"].strip()
+ prefix = id_full[1:id_full.index('-')]
+ id_clean = id_full[id_full.index('-') + 1:]
+ r["service_type"] = prefix
+ name = self.get_router_name(r, id_clean) if prefix == "router" \
+ else self.get_network_name(id_clean)
+ r["name"] = prefix + "-" + name
+ r["host"] = host_id
+ r["id"] = host_id + "-" + id_full
+ self.set_agent_type(r)
+
+ def get_network_name(self, id):
+ query = """
+ SELECT name
+ FROM {}.networks
+ WHERE id = %s
+ """.format(self.neutron_db)
+ results = self.get_objects_list_for_id(query, "router", id)
+ if not list(results):
+ return id
+ for db_row in results:
+ return db_row["name"]
+
+ def get_router_name(self, r, id):
+ query = """
+ SELECT *
+ FROM {}.routers
+ WHERE id = %s
+ """.format(self.neutron_db)
+ results = self.get_objects_list_for_id(query, "router", id.strip())
+ for db_row in results:
+ r.update(db_row)
+ return r["name"]
+
+ # dynamically create sub-folder for vService by type
+ def set_agent_type(self, o):
+ o["master_parent_id"] = o["host"] + "-vservices"
+ o["master_parent_type"] = "vservices_folder"
+ atype = o["service_type"]
+ agent = self.agents_list.get_type(atype)
+ try:
+ o["parent_id"] = o["master_parent_id"] + "-" + agent["type"] + "s"
+ o["parent_type"] = "vservice_" + agent["type"] + "s_folder"
+ o["parent_text"] = agent["folder_text"]
+ except KeyError:
+ o["parent_id"] = o["master_parent_id"] + "-" + "miscellenaous"
+ o["parent_type"] = "vservice_miscellenaous_folder"
+ o["parent_text"] = "Misc. services"
diff --git a/app/discover/fetchers/cli/cli_fetch_host_vservices.py b/app/discover/fetchers/cli/cli_fetch_host_vservices.py
new file mode 100644
index 0000000..9b62dcb
--- /dev/null
+++ b/app/discover/fetchers/cli/cli_fetch_host_vservices.py
@@ -0,0 +1,27 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+from discover.fetchers.cli.cli_fetch_host_vservice import CliFetchHostVservice
+
+
+class CliFetchHostVservices(CliFetchHostVservice):
+ def __init__(self):
+ super(CliFetchHostVservices, self).__init__()
+
+ def get(self, host_id):
+ host = self.inv.get_single(self.get_env(), "host", host_id)
+ if "Network" not in host["host_type"]:
+ return []
+ services_ids = [l[:l.index(' ')] if ' ' in l else l
+ for l in self.run_fetch_lines("ip netns", host_id)]
+ results = [{"local_service_id": s} for s in services_ids if self.type_re.match(s)]
+ for r in results:
+ self.set_details(host_id, r)
+ return results
+
diff --git a/app/discover/fetchers/cli/cli_fetch_instance_vnics.py b/app/discover/fetchers/cli/cli_fetch_instance_vnics.py
new file mode 100644
index 0000000..22ac573
--- /dev/null
+++ b/app/discover/fetchers/cli/cli_fetch_instance_vnics.py
@@ -0,0 +1,22 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+from discover.fetchers.cli.cli_fetch_instance_vnics_base import CliFetchInstanceVnicsBase
+
+
+class CliFetchInstanceVnics(CliFetchInstanceVnicsBase):
+ def __init__(self):
+ super().__init__()
+
+ def set_vnic_properties(self, v, instance):
+ super().set_vnic_properties(v, instance)
+ v["source_bridge"] = v["source"]["@bridge"]
+
+ def get_vnic_name(self, v, instance):
+ return v["target"]["@dev"]
diff --git a/app/discover/fetchers/cli/cli_fetch_instance_vnics_base.py b/app/discover/fetchers/cli/cli_fetch_instance_vnics_base.py
new file mode 100644
index 0000000..4de1840
--- /dev/null
+++ b/app/discover/fetchers/cli/cli_fetch_instance_vnics_base.py
@@ -0,0 +1,68 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+import xmltodict
+
+from discover.fetchers.cli.cli_access import CliAccess
+from utils.inventory_mgr import InventoryMgr
+
+
+class CliFetchInstanceVnicsBase(CliAccess):
+ def __init__(self):
+ super().__init__()
+ self.inv = InventoryMgr()
+
+ def get(self, id):
+ instance_uuid = id[:id.rindex('-')]
+ instance = self.inv.get_by_id(self.get_env(), instance_uuid)
+ if not instance:
+ return []
+ host = self.inv.get_by_id(self.get_env(), instance["host"])
+ if not host or "Compute" not in host["host_type"]:
+ return []
+ lines = self.run_fetch_lines("virsh list", instance["host"])
+ del lines[:2] # remove header
+ virsh_ids = [l.split()[0] for l in lines if l > ""]
+ results = []
+ # Note: there are 2 ids here of instances with local names, which are
+ # not connected to the data we have thus far for the instance
+ # therefore, we will decide whether the instance is the correct one
+ # based on comparison of the uuid in the dumpxml output
+ for id in virsh_ids:
+ results.extend(self.get_vnics_from_dumpxml(id, instance))
+ return results
+
+ def get_vnics_from_dumpxml(self, id, instance):
+ xml_string = self.run("virsh dumpxml " + id, instance["host"])
+ if not xml_string.strip():
+ return []
+ response = xmltodict.parse(xml_string)
+ if instance["uuid"] != response["domain"]["uuid"]:
+ # this is the wrong instance - skip it
+ return []
+ try:
+ vnics = response["domain"]["devices"]["interface"]
+ except KeyError:
+ return []
+ if isinstance(vnics, dict):
+ vnics = [vnics]
+ for v in vnics:
+ self.set_vnic_properties(v, instance)
+ return vnics
+
+ def set_vnic_properties(self, v, instance):
+ v["name"] = self.get_vnic_name(v, instance)
+ v["id"] = v["name"]
+ v["vnic_type"] = "instance_vnic"
+ v["host"] = instance["host"]
+ v["instance_id"] = instance["id"]
+ v["instance_db_id"] = instance["_id"]
+ v["mac_address"] = v["mac"]["@address"]
+ instance["mac_address"] = v["mac_address"]
+ self.inv.set(instance)
diff --git a/app/discover/fetchers/cli/cli_fetch_instance_vnics_vpp.py b/app/discover/fetchers/cli/cli_fetch_instance_vnics_vpp.py
new file mode 100644
index 0000000..58facd2
--- /dev/null
+++ b/app/discover/fetchers/cli/cli_fetch_instance_vnics_vpp.py
@@ -0,0 +1,18 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+from discover.fetchers.cli.cli_fetch_instance_vnics_base import CliFetchInstanceVnicsBase
+
+
+class CliFetchInstanceVnicsVpp(CliFetchInstanceVnicsBase):
+ def __init__(self):
+ super().__init__()
+
+ def get_vnic_name(self, v, instance):
+ return instance["name"] + "-" + v["@type"] + "-" + v["mac"]["@address"]
diff --git a/app/discover/fetchers/cli/cli_fetch_oteps_lxb.py b/app/discover/fetchers/cli/cli_fetch_oteps_lxb.py
new file mode 100644
index 0000000..1e65a14
--- /dev/null
+++ b/app/discover/fetchers/cli/cli_fetch_oteps_lxb.py
@@ -0,0 +1,86 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+from discover.fetchers.cli.cli_access import CliAccess
+from discover.fetchers.db.db_access import DbAccess
+from utils.inventory_mgr import InventoryMgr
+
+
+class CliFetchOtepsLxb(CliAccess, DbAccess):
+
+ def __init__(self):
+ super().__init__()
+ self.inv = InventoryMgr()
+
+ def get(self, parent_id):
+ vconnector = self.inv.get_by_id(self.get_env(), parent_id)
+ if not vconnector:
+ return []
+ configurations = vconnector['configurations']
+ tunneling_ip = configurations['tunneling_ip']
+ tunnel_types_used = configurations['tunnel_types']
+ if not tunnel_types_used:
+ return []
+ tunnel_type = tunnel_types_used[0]
+ if not tunnel_type:
+ return []
+ # check only interfaces with name matching tunnel type
+ ret = [i for i in vconnector['interfaces'].values()
+ if i['name'].startswith(tunnel_type + '-')]
+ for otep in ret:
+ otep['ip_address'] = tunneling_ip
+ otep['host'] = vconnector['host']
+ self.get_otep_ports(otep)
+ otep['id'] = otep['host'] + '-otep-' + otep['name']
+ otep['name'] = otep['id']
+ otep['vconnector'] = vconnector['name']
+ otep['overlay_type'] = tunnel_type
+ self.get_udp_port(otep)
+ return ret
+
+ """
+ fetch OTEP data from CLI command 'ip -d link show'
+ """
+ def get_otep_ports(self, otep):
+ cmd = 'ip -d link show'
+ lines = self.run_fetch_lines(cmd, otep['host'])
+ header_format = '[0-9]+: ' + otep['name'] + ':'
+ interface_lines = self.get_section_lines(lines, header_format, '\S')
+ otep['data'] = '\n'.join(interface_lines)
+ regexps = [
+ {'name': 'state', 're': ',UP,', 'default': 'DOWN'},
+ {'name': 'mac_address', 're': '.*\slink/ether\s(\S+)\s'},
+ {'name': 'mtu', 're': '.*\smtu\s(\S+)\s'},
+ ]
+ self.get_object_data(otep, interface_lines, regexps)
+ cmd = 'bridge fdb show'
+ dst_line_format = ' dev ' + otep['name'] + ' dst '
+ lines = self.run_fetch_lines(cmd, otep['host'])
+ lines = [l for l in lines if dst_line_format in l]
+ if lines:
+ l = lines[0]
+ otep['bridge dst'] = l[l.index(' dst ')+5:]
+ return otep
+
+ def get_udp_port(self, otep):
+ table_name = "neutron.ml2_" + otep['overlay_type'] + "_endpoints"
+ results = None
+ try:
+ results = self.get_objects_list_for_id(
+ """
+ SELECT udp_port
+ FROM {}
+ WHERE host = %s
+ """.format(table_name),
+ "vedge", otep['host'])
+ except Exception as e:
+ self.log.error('failed to fetch UDP port for OTEP: ' + str(e))
+ otep['udp_port'] = 0
+ for result in results:
+ otep['udp_port'] = result['udp_port']
diff --git a/app/discover/fetchers/cli/cli_fetch_vconnectors.py b/app/discover/fetchers/cli/cli_fetch_vconnectors.py
new file mode 100644
index 0000000..78b767a
--- /dev/null
+++ b/app/discover/fetchers/cli/cli_fetch_vconnectors.py
@@ -0,0 +1,40 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+from abc import abstractmethod, ABCMeta
+
+from discover.fetchers.cli.cli_access import CliAccess
+from utils.inventory_mgr import InventoryMgr
+from utils.singleton import Singleton
+
+
+class ABCSingleton(ABCMeta, Singleton):
+ pass
+
+
+class CliFetchVconnectors(CliAccess, metaclass=ABCSingleton):
+ def __init__(self):
+ super().__init__()
+ self.inv = InventoryMgr()
+
+ @abstractmethod
+ def get_vconnectors(self, host):
+ raise NotImplementedError("Subclass must override get_vconnectors()")
+
+ def get(self, id):
+ host_id = id[:id.rindex('-')]
+ host = self.inv.get_by_id(self.get_env(), host_id)
+ if not host:
+ self.log.error("CliFetchVconnectors: host not found: " + host_id)
+ return []
+ if "host_type" not in host:
+ self.log.error("host does not have host_type: " + host_id + \
+ ", host: " + str(host))
+ return []
+ return self.get_vconnectors(host)
diff --git a/app/discover/fetchers/cli/cli_fetch_vconnectors_lxb.py b/app/discover/fetchers/cli/cli_fetch_vconnectors_lxb.py
new file mode 100644
index 0000000..648dc63
--- /dev/null
+++ b/app/discover/fetchers/cli/cli_fetch_vconnectors_lxb.py
@@ -0,0 +1,35 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+import json
+
+from discover.fetchers.cli.cli_fetch_vconnectors_ovs import CliFetchVconnectorsOvs
+from discover.fetchers.db.db_access import DbAccess
+
+
+class CliFetchVconnectorsLxb(CliFetchVconnectorsOvs, DbAccess):
+
+ def __init__(self):
+ super().__init__()
+
+ def get(self, id):
+ ret = super().get(id)
+ for doc in ret:
+ query = """
+ SELECT configurations
+ FROM {}.agents
+ WHERE agent_type="Linux bridge agent" AND host = %s
+ """.format(self.neutron_db)
+ host = doc['host']
+ matches = self.get_objects_list_for_id(query, '', host)
+ if not matches:
+ raise ValueError('No Linux bridge agent in DB for host: {}'.format(host))
+ agent = matches[0]
+ doc['configurations'] = json.loads(agent['configurations'])
+ return ret
diff --git a/app/discover/fetchers/cli/cli_fetch_vconnectors_ovs.py b/app/discover/fetchers/cli/cli_fetch_vconnectors_ovs.py
new file mode 100644
index 0000000..ff37569
--- /dev/null
+++ b/app/discover/fetchers/cli/cli_fetch_vconnectors_ovs.py
@@ -0,0 +1,56 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+import re
+
+from discover.fetchers.cli.cli_fetch_vconnectors import CliFetchVconnectors
+
+
+class CliFetchVconnectorsOvs(CliFetchVconnectors):
+ def __init__(self):
+ super().__init__()
+
+ def get_vconnectors(self, host):
+ host_id = host['id']
+ lines = self.run_fetch_lines("brctl show", host_id)
+ headers = ["bridge_name", "bridge_id", "stp_enabled", "interfaces"]
+ headers_count = len(headers)
+ # since we hard-coded the headers list, remove the headers line
+ del lines[:1]
+
+ # intefaces can spill to next line - need to detect that and add
+ # them to the end of the previous line for our procesing
+ fixed_lines = self.merge_ws_spillover_lines(lines)
+
+ results = self.parse_cmd_result_with_whitespace(fixed_lines, headers, False)
+ ret = []
+ for doc in results:
+ doc["name"] = doc.pop("bridge_name")
+ doc["id"] = doc["name"] + "-" + doc.pop("bridge_id")
+ doc["host"] = host_id
+ doc["connector_type"] = "bridge"
+ if "interfaces" in doc:
+ interfaces = {}
+ interface_names = doc["interfaces"].split(",")
+ for interface_name in interface_names:
+ # find MAC address for this interface from ports list
+ port_id_prefix = interface_name[3:]
+ port = self.inv.find_items({
+ "environment": self.get_env(),
+ "type": "port",
+ "binding:host_id": host_id,
+ "id": {"$regex": r"^" + re.escape(port_id_prefix)}
+ }, get_single=True)
+ mac_address = '' if not port else port['mac_address']
+ interface = {'name': interface_name, 'mac_address': mac_address}
+ interfaces[interface_name] = interface
+ doc["interfaces"] = interfaces
+ doc['interfaces_names'] = list(interfaces.keys())
+ ret.append(doc)
+ return ret
diff --git a/app/discover/fetchers/cli/cli_fetch_vconnectors_vpp.py b/app/discover/fetchers/cli/cli_fetch_vconnectors_vpp.py
new file mode 100644
index 0000000..479e1db
--- /dev/null
+++ b/app/discover/fetchers/cli/cli_fetch_vconnectors_vpp.py
@@ -0,0 +1,64 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+from discover.fetchers.cli.cli_fetch_vconnectors import CliFetchVconnectors
+
+
+class CliFetchVconnectorsVpp(CliFetchVconnectors):
+ def __init__(self):
+ super().__init__()
+
+ def get_vconnectors(self, host):
+ lines = self.run_fetch_lines("vppctl show mode", host['id'])
+ vconnectors = {}
+ for l in lines:
+ if not l.startswith('l2 bridge'):
+ continue
+ line_parts = l.split(' ')
+ name = line_parts[2]
+ bd_id = line_parts[4]
+ if bd_id in vconnectors:
+ vconnector = vconnectors[bd_id]
+ else:
+ vconnector = {
+ 'host': host['id'],
+ 'id': host['id'] + '-vconnector-' + bd_id,
+ 'bd_id': bd_id,
+ 'name': "bridge-domain-" + bd_id,
+ 'interfaces': {},
+ 'interfaces_names': []
+ }
+ vconnectors[bd_id] = vconnector
+ interface = self.get_interface_details(host, name)
+ if interface:
+ vconnector['interfaces'][name] = interface
+ vconnector['interfaces_names'].append(name)
+ return list(vconnectors.values())
+
+ def get_interface_details(self, host, name):
+ # find vconnector interfaces
+ cmd = "vppctl show hardware-int " + name
+ interface_lines = self.run_fetch_lines(cmd, host['id'])
+ # remove header line
+ interface_lines.pop(0)
+ interface = None
+ for l in interface_lines:
+ if not l.strip():
+ continue # ignore empty lines
+ if not l.startswith(' '):
+ details = l.split()
+ interface = {
+ "name": details[0],
+ "hardware": details[3],
+ "state": details[2],
+ "id": details[1],
+ }
+ elif l.startswith(' Ethernet address '):
+ interface['mac_address'] = l[l.rindex(' ') + 1:]
+ return interface
diff --git a/app/discover/fetchers/cli/cli_fetch_vpp_vedges.py b/app/discover/fetchers/cli/cli_fetch_vpp_vedges.py
new file mode 100644
index 0000000..f9c622d
--- /dev/null
+++ b/app/discover/fetchers/cli/cli_fetch_vpp_vedges.py
@@ -0,0 +1,58 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+# Copyright 2016 cisco Corporation
+#oslo related message handling
+
+from oslo_serialization import jsonutils
+from oslo_utils import uuidutils
+import yaml
+
+from neutronclient.tests.functional import base
+
+
+class TestCLIFormatter(base.ClientTestBase):
+
+## old stuff ..not related to vpp..disregard
+ def setUp(self):
+ super(TestCLIFormatter, self).setUp()
+ self.net_name = 'net-%s' % uuidutils.generate_uuid()
+ self.addCleanup(self.neutron, 'net-delete %s' % self.net_name)
+
+ def _create_net(self, fmt, col_attrs):
+ params = ['-c %s' % attr for attr in col_attrs]
+ params.append('-f %s' % fmt)
+ params.append(self.net_name)
+ param_string = ' '.join(params)
+ return self.neutron('net-create', params=param_string)
+
+ def test_net_create_with_json_formatter(self):
+ result = self._create_net('json', ['name', 'admin_state_up'])
+ self.assertDictEqual({'name': self.net_name,
+ 'admin_state_up': True},
+ jsonutils.loads(result))
+
+ def test_net_create_with_yaml_formatter(self):
+ result = self._create_net('yaml', ['name', 'admin_state_up'])
+ self.assertDictEqual({'name': self.net_name,
+ 'admin_state_up': True},
+ yaml.load(result))
+
+ def test_net_create_with_value_formatter(self):
+ # NOTE(amotoki): In 'value' formatter, there is no guarantee
+ # in the order of attribute, so we use one attribute in this test.
+ result = self._create_net('value', ['name'])
+ self.assertEqual(self.net_name, result.strip())
+
+ def test_net_create_with_shell_formatter(self):
+ result = self._create_net('shell', ['name', 'admin_state_up'])
+ result_lines = set(result.strip().split('\n'))
+ self.assertSetEqual(set(['name="%s"' % self.net_name,
+ 'admin_state_up="True"']),
+result_lines)
diff --git a/app/discover/fetchers/cli/cli_fetch_vservice_vnics.py b/app/discover/fetchers/cli/cli_fetch_vservice_vnics.py
new file mode 100644
index 0000000..44ac8d6
--- /dev/null
+++ b/app/discover/fetchers/cli/cli_fetch_vservice_vnics.py
@@ -0,0 +1,140 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+import re
+
+from discover.fetchers.cli.cli_access import CliAccess
+from utils.inventory_mgr import InventoryMgr
+
+
+class CliFetchVserviceVnics(CliAccess):
+ def __init__(self):
+ super().__init__()
+ self.inv = InventoryMgr()
+ self.if_header = re.compile('^[-]?(\S+)\s+(.*)$')
+ self.regexps = [
+ {'name': 'mac_address', 're': '^.*\sHWaddr\s(\S+)(\s.*)?$'},
+ {'name': 'mac_address', 're': '^.*\sether\s(\S+)(\s.*)?$'},
+ {'name': 'netmask', 're': '^.*\sMask:\s?([0-9.]+)(\s.*)?$'},
+ {'name': 'netmask', 're': '^.*\snetmask\s([0-9.]+)(\s.*)?$'},
+ {'name': 'IP Address', 're': '^\s*inet addr:(\S+)\s.*$'},
+ {'name': 'IP Address', 're': '^\s*inet ([0-9.]+)\s.*$'},
+ {'name': 'IPv6 Address',
+ 're': '^\s*inet6 addr: ?\s*([0-9a-f:/]+)(\s.*)?$'},
+ {'name': 'IPv6 Address',
+ 're': '^\s*inet6 \s*([0-9a-f:/]+)(\s.*)?$'}
+ ]
+
+ def get(self, host_id):
+ host = self.inv.get_by_id(self.get_env(), host_id)
+ if not host:
+ self.log.error("host not found: " + host_id)
+ return []
+ if "host_type" not in host:
+ self.log.error("host does not have host_type: " + host_id +
+ ", host: " + str(host))
+ return []
+ if "Network" not in host["host_type"]:
+ return []
+ lines = self.run_fetch_lines("ip netns", host_id)
+ ret = []
+ for l in [l for l in lines
+ if l.startswith("qdhcp") or l.startswith("qrouter")]:
+ service = l.strip()
+ service = service if ' ' not in service \
+ else service[:service.index(' ')]
+ ret.extend(self.handle_service(host_id, service))
+ return ret
+
+ def handle_service(self, host, service, enable_cache=True):
+ cmd = "ip netns exec " + service + " ifconfig"
+ lines = self.run_fetch_lines(cmd, host, enable_cache)
+ interfaces = []
+ current = None
+ for line in lines:
+ matches = self.if_header.match(line)
+ if matches:
+ if current:
+ self.set_interface_data(current)
+ name = matches.group(1).strip(":")
+ # ignore 'lo' interface
+ if name == 'lo':
+ current = None
+ else:
+ line_remainder = matches.group(2)
+ vservice_id = host + "-" + service
+ current = {
+ "id": host + "-" + name,
+ "type": "vnic",
+ "vnic_type": "vservice_vnic",
+ "host": host,
+ "name": name,
+ "master_parent_type": "vservice",
+ "master_parent_id": vservice_id,
+ "parent_type": "vnics_folder",
+ "parent_id": vservice_id + "-vnics",
+ "parent_text": "vNICs",
+ "lines": []
+ }
+ interfaces.append(current)
+ self.handle_line(current, line_remainder)
+ else:
+ if current:
+ self.handle_line(current, line)
+ if current:
+ self.set_interface_data(current)
+ return interfaces
+
+ def handle_line(self, interface, line):
+ self.find_matching_regexps(interface, line, self.regexps)
+ interface["lines"].append(line.strip())
+
+ def set_interface_data(self, interface):
+ if not interface or 'IP Address' not in interface or 'netmask' not in interface:
+ return
+
+ interface["data"] = "\n".join(interface.pop("lines", None))
+ interface["cidr"] = self.get_cidr_for_vnic(interface)
+ network = self.inv.get_by_field(self.get_env(), "network", "cidrs",
+ interface["cidr"], get_single=True)
+ if not network:
+ return
+ interface["network"] = network["id"]
+ # set network for the vservice, to check network on clique creation
+ vservice = self.inv.get_by_id(self.get_env(),
+ interface["master_parent_id"])
+ network_id = network["id"]
+ if "network" not in vservice:
+ vservice["network"] = list()
+ if network_id not in vservice["network"]:
+ vservice["network"].append(network_id)
+ self.inv.set(vservice)
+
+ # find CIDR string by IP address and netmask
+ def get_cidr_for_vnic(self, vnic):
+ if "IP Address" not in vnic:
+ vnic["IP Address"] = "No IP Address"
+ return "No IP Address"
+ ipaddr = vnic["IP Address"].split('.')
+ netmask = vnic["netmask"].split('.')
+
+ # calculate network start
+ net_start = []
+ for pos in range(0, 4):
+ net_start.append(str(int(ipaddr[pos]) & int(netmask[pos])))
+
+ cidr_string = '.'.join(net_start) + '/'
+ cidr_string = cidr_string + self.get_net_size(netmask)
+ return cidr_string
+
+ def get_net_size(self, netmask):
+ binary_str = ''
+ for octet in netmask:
+ binary_str += bin(int(octet))[2:].zfill(8)
+ return str(len(binary_str.rstrip('0')))
diff --git a/app/discover/fetchers/db/__init__.py b/app/discover/fetchers/db/__init__.py
new file mode 100644
index 0000000..b0637e9
--- /dev/null
+++ b/app/discover/fetchers/db/__init__.py
@@ -0,0 +1,9 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
diff --git a/app/discover/fetchers/db/db_access.py b/app/discover/fetchers/db/db_access.py
new file mode 100644
index 0000000..00bd776
--- /dev/null
+++ b/app/discover/fetchers/db/db_access.py
@@ -0,0 +1,142 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+import mysql.connector
+
+from discover.configuration import Configuration
+from discover.fetcher import Fetcher
+from utils.string_utils import jsonify
+
+
+class DbAccess(Fetcher):
+ conn = None
+ query_count_per_con = 0
+
+ # connection timeout set to 30 seconds,
+ # due to problems over long connections
+ TIMEOUT = 30
+
+ def __init__(self):
+ super().__init__()
+ self.config = Configuration()
+ self.conf = self.config.get("mysql")
+ self.connect_to_db()
+ cursor = DbAccess.conn.cursor(dictionary=True)
+ try:
+ # check if DB schema 'neutron' exists
+ cursor.execute("SELECT COUNT(*) FROM neutron.agents")
+ for row in cursor:
+ pass
+ self.neutron_db = "neutron"
+ except (AttributeError, mysql.connector.errors.ProgrammingError):
+ self.neutron_db = "ml2_neutron"
+
+ def db_connect(self, _host, _port, _user, _password, _database):
+ if DbAccess.conn:
+ return
+ try:
+ connector = mysql.connector
+ DbAccess.conn = connector.connect(host=_host, port=_port,
+ connection_timeout=self.TIMEOUT,
+ user=_user,
+ password=_password,
+ database=_database,
+ raise_on_warnings=True)
+ DbAccess.conn.ping(True) # auto-reconnect if necessary
+ except:
+ self.log.critical("failed to connect to MySQL DB")
+ return
+ DbAccess.query_count_per_con = 0
+
+ def connect_to_db(self, force=False):
+ if DbAccess.conn:
+ if not force:
+ return
+ self.log.info("DbAccess: ****** forcing reconnect, " +
+ "query count: %s ******",
+ DbAccess.query_count_per_con)
+ DbAccess.conn = None
+ self.conf = self.config.get("mysql")
+ cnf = self.conf
+ cnf['schema'] = cnf['schema'] if 'schema' in cnf else 'nova'
+ self.db_connect(cnf["host"], cnf["port"],
+ cnf["user"], cnf["password"],
+ cnf["schema"])
+
+ def get_objects_list_for_id(self, query, object_type, id):
+ self.connect_to_db(DbAccess.query_count_per_con >= 25)
+ DbAccess.query_count_per_con += 1
+ self.log.debug("query count: %s, running query:\n%s\n",
+ str(DbAccess.query_count_per_con), query)
+
+ cursor = DbAccess.conn.cursor(dictionary=True)
+ try:
+ if id:
+ cursor.execute(query, [str(id)])
+ else:
+ cursor.execute(query)
+ except (AttributeError, mysql.connector.errors.OperationalError) as e:
+ self.log.error(e)
+ self.connect_to_db(True)
+ # try again to run the query
+ cursor = DbAccess.conn.cursor(dictionary=True)
+ if id:
+ cursor.execute(query, [str(id)])
+ else:
+ cursor.execute(query)
+
+ rows = []
+ for row in cursor:
+ rows.append(row)
+ return rows
+
+ def get_objects_list(self, query, object_type):
+ return self.get_objects_list_for_id(query, object_type, None)
+
+ def get_objects(self, qry, type, id):
+ return jsonify(self.get_objects_list(qry, type))
+
+ def get(self, id):
+ # return list of available fetch types
+ ret = {
+ "description": "List of available fetch calls for this interface",
+ "types": {
+ "regions": "Regions of this environment",
+ "projects": "Projects (tenants) of this environment",
+ "availability_zones": "Availability zones",
+ "aggregates": "Host aggregates",
+ "aggregate_hosts": "Hosts in aggregate X (parameter: id)",
+ "az_hosts": "Host in availability_zone X (parameter: id)"
+ }
+ }
+ return jsonify(ret)
+
+ def exec(self, query, table, field, values):
+ try:
+ cursor = DbAccess.conn.cursor(dictionary=True)
+ cursor.execute(query, [table, field, values])
+ except (AttributeError, mysql.connector.errors.OperationalError) as e:
+ self.log.error(e)
+ self.connect_to_db(True)
+ # try again to run the query
+ cursor = DbAccess.conn.cursor(dictionary=True)
+ cursor.execute(query, [table, field, values])
+
+ rows = []
+ for row in cursor:
+ rows.append(row)
+ return rows
+
+ def set(self, table, field, values):
+ query = """INSERT INTO %s %s VALUES %s"""
+ return self.exec(query, table, field, values)
+
+ def delete(self, table, field, values):
+ query = """DELETE FROM %s WHERE %s=%s"""
+ return self.exec(query, table, field, values)
diff --git a/app/discover/fetchers/db/db_fetch_aggregate_hosts.py b/app/discover/fetchers/db/db_fetch_aggregate_hosts.py
new file mode 100644
index 0000000..59ba5d0
--- /dev/null
+++ b/app/discover/fetchers/db/db_fetch_aggregate_hosts.py
@@ -0,0 +1,36 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+import bson
+
+from discover.fetchers.db.db_access import DbAccess
+from utils.inventory_mgr import InventoryMgr
+
+
+class DbFetchAggregateHosts(DbAccess):
+ def get(self, id):
+ query = """
+ SELECT CONCAT('aggregate-', a.name, '-', host) AS id, host AS name
+ FROM nova.aggregate_hosts ah
+ JOIN nova.aggregates a ON a.id = ah.aggregate_id
+ WHERE ah.deleted = 0 AND aggregate_id = %s
+ """
+ hosts = self.get_objects_list_for_id(query, "host", id)
+ if hosts:
+ inv = InventoryMgr()
+ for host_rec in hosts:
+ host_id = host_rec['name']
+ host = inv.get_by_id(self.get_env(), host_id)
+ if not host:
+ self.log.error('unable to find host {} '
+ 'from aggregate {} in inventory'
+ .format(host_id, id))
+ continue
+ host_rec['ref_id'] = bson.ObjectId(host['_id'])
+ return hosts
diff --git a/app/discover/fetchers/db/db_fetch_aggregates.py b/app/discover/fetchers/db/db_fetch_aggregates.py
new file mode 100644
index 0000000..da0720b
--- /dev/null
+++ b/app/discover/fetchers/db/db_fetch_aggregates.py
@@ -0,0 +1,21 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+from discover.fetchers.db.db_access import DbAccess
+
+
+class DbFetchAggregates(DbAccess):
+ def get(self, id):
+ return self.get_objects_list(
+ """
+ SELECT id, name
+ FROM nova.aggregates
+ WHERE deleted = 0
+ """,
+ "host aggregate")
diff --git a/app/discover/fetchers/db/db_fetch_availability_zones.py b/app/discover/fetchers/db/db_fetch_availability_zones.py
new file mode 100644
index 0000000..763d777
--- /dev/null
+++ b/app/discover/fetchers/db/db_fetch_availability_zones.py
@@ -0,0 +1,22 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+from discover.fetchers.db.db_access import DbAccess
+
+class DbFetchAvailabilityZones(DbAccess):
+
+ def get(self, id):
+ query = """
+ SELECT DISTINCT availability_zone,
+ availability_zone AS id, COUNT(DISTINCT host) AS descendants
+ FROM nova.instances
+ WHERE availability_zone IS NOT NULL
+ GROUP BY availability_zone
+ """
+ return self.get_objects_list(query, "availability zone")
diff --git a/app/discover/fetchers/db/db_fetch_az_network_hosts.py b/app/discover/fetchers/db/db_fetch_az_network_hosts.py
new file mode 100644
index 0000000..09043ea
--- /dev/null
+++ b/app/discover/fetchers/db/db_fetch_az_network_hosts.py
@@ -0,0 +1,31 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+import json
+
+from discover.fetchers.db.db_access import DbAccess
+
+
+class DbFetchAZNetworkHosts(DbAccess):
+
+ def get(self, id):
+ query = """
+ SELECT DISTINCT host, host AS id, configurations
+ FROM neutron.agents
+ WHERE agent_type = 'Metadata agent'
+ """
+ results = self.get_objects_list(query, "host")
+ for r in results:
+ self.set_host_details(r)
+ return results
+
+ def set_host_details(self, r):
+ config = json.loads(r["configurations"])
+ r["ip_address"] = config["nova_metadata_ip"]
+ r["host_type"] = "Network Node"
diff --git a/app/discover/fetchers/db/db_fetch_host_instances.py b/app/discover/fetchers/db/db_fetch_host_instances.py
new file mode 100644
index 0000000..2245c4a
--- /dev/null
+++ b/app/discover/fetchers/db/db_fetch_host_instances.py
@@ -0,0 +1,15 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+from discover.fetchers.db.db_fetch_instances import DbFetchInstances
+
+class DbFetchHostInstances(DbFetchInstances):
+
+ def get(self, id):
+ return self.get_instances("host", id)
diff --git a/app/discover/fetchers/db/db_fetch_host_network_agents.py b/app/discover/fetchers/db/db_fetch_host_network_agents.py
new file mode 100644
index 0000000..c323573
--- /dev/null
+++ b/app/discover/fetchers/db/db_fetch_host_network_agents.py
@@ -0,0 +1,35 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+import json
+
+from discover.fetchers.db.db_access import DbAccess
+from utils.inventory_mgr import InventoryMgr
+
+
+class DbFetchHostNetworkAgents(DbAccess):
+ def __init__(self):
+ super().__init__()
+ self.inv = InventoryMgr()
+ self.env_config = self.config.get_env_config()
+
+ def get(self, id):
+ query = """
+ SELECT * FROM {}.agents
+ WHERE host = %s
+ """.format(self.neutron_db)
+ host_id = id[:-1 * len("-network_agents")]
+ results = self.get_objects_list_for_id(query, "network_agent", host_id)
+ mechanism_drivers = self.env_config['mechanism_drivers']
+ id_prefix = mechanism_drivers[0] if mechanism_drivers else 'network_agent'
+ for o in results:
+ o["configurations"] = json.loads(o["configurations"])
+ o["name"] = o["binary"]
+ o['id'] = id_prefix + '-' + o['id']
+ return results
diff --git a/app/discover/fetchers/db/db_fetch_instances.py b/app/discover/fetchers/db/db_fetch_instances.py
new file mode 100644
index 0000000..54c4114
--- /dev/null
+++ b/app/discover/fetchers/db/db_fetch_instances.py
@@ -0,0 +1,60 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+import json
+
+from discover.fetchers.db.db_access import DbAccess
+
+
+class DbFetchInstances(DbAccess):
+ def get_instance_data(self, instances):
+ instances_hash = {}
+ for doc in instances:
+ instances_hash[doc["id"]] = doc
+
+ query = """
+ SELECT DISTINCT i.uuid AS id, i.display_name AS name,
+ i.host AS host, host_ip AS ip_address,
+ network_info, project_id,
+ IF(p.name IS NULL, "Unknown", p.name) AS project
+ FROM nova.instances i
+ LEFT JOIN keystone.project p ON p.id = i.project_id
+ JOIN nova.instance_info_caches ic ON i.uuid = ic.instance_uuid
+ JOIN nova.compute_nodes cn ON i.node = cn.hypervisor_hostname
+ WHERE i.deleted = 0
+ """
+ results = self.get_objects_list(query, "instance")
+ for result in results:
+ id = result["id"]
+ if id not in instances_hash:
+ continue
+ self.build_instance_details(result)
+ doc = instances_hash[id]
+ doc.update(result)
+
+ def build_instance_details(self, result):
+ network_info_str = result.pop("network_info", None)
+ result["network_info"] = json.loads(network_info_str)
+
+ # add network as an array to allow constraint checking
+ # when building clique
+ networks = []
+ for net in result["network_info"]:
+ if "network" not in net or "id" not in net["network"]:
+ continue
+ network_id = net["network"]["id"]
+ if network_id in networks:
+ continue
+ networks.append(network_id)
+ result["network"] = networks
+
+ result["type"] = "instance"
+ result["parent_type"] = "instances_folder"
+ result["parent_id"] = result["host"] + "-instances"
+ result["in_project-" + result.pop("project", None)] = "1"
diff --git a/app/discover/fetchers/db/db_fetch_oteps.py b/app/discover/fetchers/db/db_fetch_oteps.py
new file mode 100644
index 0000000..9055c11
--- /dev/null
+++ b/app/discover/fetchers/db/db_fetch_oteps.py
@@ -0,0 +1,81 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+import re
+
+from discover.fetchers.cli.cli_access import CliAccess
+from discover.fetchers.db.db_access import DbAccess
+from utils.inventory_mgr import InventoryMgr
+from utils.singleton import Singleton
+
+
+class DbFetchOteps(DbAccess, CliAccess, metaclass=Singleton):
+ def __init__(self):
+ super().__init__()
+ self.inv = InventoryMgr()
+ self.port_re = re.compile("^\s*port (\d+): ([^(]+)( \(internal\))?$")
+
+ def get(self, id):
+ vedge = self.inv.get_by_id(self.get_env(), id)
+ tunnel_type = None
+ if "configurations" not in vedge:
+ return []
+ if "tunnel_types" not in vedge["configurations"]:
+ return []
+ if not vedge["configurations"]["tunnel_types"]:
+ return []
+ tunnel_type = vedge["configurations"]["tunnel_types"][0]
+ host_id = vedge["host"]
+ table_name = "neutron.ml2_" + tunnel_type + "_endpoints"
+ env_config = self.config.get_env_config()
+ distribution = env_config["distribution"]
+ if distribution == "Canonical-icehouse":
+ # for Icehouse, we only get IP address from the DB, so take the
+ # host IP address and from the host data in Mongo
+ host = self.inv.get_by_id(self.get_env(), host_id)
+ results = [{"host": host_id, "ip_address": host["ip_address"]}]
+ else:
+ results = self.get_objects_list_for_id(
+ """
+ SELECT *
+ FROM {}
+ WHERE host = %s
+ """.format(table_name),
+ "vedge", host_id)
+ for doc in results:
+ doc["id"] = host_id + "-otep"
+ doc["name"] = doc["id"]
+ doc["host"] = host_id
+ doc["overlay_type"] = tunnel_type
+ doc["ports"] = vedge["tunnel_ports"] if "tunnel_ports" in vedge else []
+ if "udp_port" not in doc:
+ doc["udp_port"] = "67"
+ self.get_vconnector(doc, host_id, vedge)
+
+ return results
+
+ # find matching vConnector by tunneling_ip of vEdge
+ # look for that IP address in ifconfig for the host
+ def get_vconnector(self, doc, host_id, vedge):
+ tunneling_ip = vedge["configurations"]["tunneling_ip"]
+ ifconfig_lines = self.run_fetch_lines("ifconfig", host_id)
+ interface = None
+ ip_string = " " * 10 + "inet addr:" + tunneling_ip + " "
+ vconnector = None
+ for l in ifconfig_lines:
+ if l.startswith(" "):
+ if interface and l.startswith(ip_string):
+ vconnector = interface
+ break
+ else:
+ if " " in l:
+ interface = l[:l.index(" ")]
+
+ if vconnector:
+ doc["vconnector"] = vconnector
diff --git a/app/discover/fetchers/db/db_fetch_port.py b/app/discover/fetchers/db/db_fetch_port.py
new file mode 100644
index 0000000..2cb814a
--- /dev/null
+++ b/app/discover/fetchers/db/db_fetch_port.py
@@ -0,0 +1,34 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+from discover.fetchers.db.db_access import DbAccess
+from utils.inventory_mgr import InventoryMgr
+
+
+class DbFetchPort(DbAccess):
+ def __init__(self):
+ super().__init__()
+ self.inv = InventoryMgr()
+ self.env_config = self.config.get_env_config()
+
+ def get(self, id=None):
+ query = """SELECT * FROM {}.ports where network_id = %s""" \
+ .format(self.neutron_db)
+ return self.get_objects_list_for_id(query, "port", id)
+
+ def get_id(self, id=None):
+ query = """SELECT id FROM {}.ports where network_id = %s""" \
+ .format(self.neutron_db)
+ result = self.get_objects_list_for_id(query, "port", id)
+ return result[0]['id'] if result != [] else None
+
+ def get_id_by_field(self, id, search=''):
+ query = """SELECT id FROM neutron.ports where network_id = %s AND """ + search
+ result = self.get_objects_list_for_id(query, "port", id)
+ return result[0]['id'] if result != [] else None \ No newline at end of file
diff --git a/app/discover/fetchers/db/db_fetch_vedges_ovs.py b/app/discover/fetchers/db/db_fetch_vedges_ovs.py
new file mode 100644
index 0000000..24cc9f8
--- /dev/null
+++ b/app/discover/fetchers/db/db_fetch_vedges_ovs.py
@@ -0,0 +1,178 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+import json
+
+import re
+
+from discover.fetchers.cli.cli_access import CliAccess
+from discover.fetchers.db.db_access import DbAccess
+from utils.inventory_mgr import InventoryMgr
+from utils.singleton import Singleton
+
+
+class DbFetchVedgesOvs(DbAccess, CliAccess, metaclass=Singleton):
+ def __init__(self):
+ super().__init__()
+ self.inv = InventoryMgr()
+ self.port_re = re.compile("^\s*port (\d+): ([^(]+)( \(internal\))?$")
+ self.port_line_header_prefix = " " * 8 + "Port "
+
+ def get(self, id):
+ host_id = id[:id.rindex('-')]
+ results = self.get_objects_list_for_id(
+ """
+ SELECT *
+ FROM {}.agents
+ WHERE host = %s AND agent_type = 'Open vSwitch agent'
+ """.format(self.neutron_db),
+ "vedge", host_id)
+ host = self.inv.get_by_id(self.get_env(), host_id)
+ if not host:
+ self.log.error("unable to find host in inventory: %s", host_id)
+ return []
+ host_types = host["host_type"]
+ if "Network" not in host_types and "Compute" not in host_types:
+ return []
+ vsctl_lines = self.run_fetch_lines("ovs-vsctl show", host["id"])
+ ports = self.fetch_ports(host, vsctl_lines)
+ for doc in results:
+ doc["name"] = doc["host"] + "-OVS"
+ doc["configurations"] = json.loads(doc["configurations"])
+ doc["ports"] = ports
+ doc["tunnel_ports"] = self.get_overlay_tunnels(doc, vsctl_lines)
+ return results
+
+ def fetch_ports(self, host, vsctl_lines):
+ host_types = host["host_type"]
+ if "Network" not in host_types and "Compute" not in host_types:
+ return {}
+ ports = self.fetch_ports_from_dpctl(host["id"])
+ self.fetch_port_tags_from_vsctl(vsctl_lines, ports)
+ return ports
+
+ def fetch_ports_from_dpctl(self, host_id):
+ cmd = "ovs-dpctl show"
+ lines = self.run_fetch_lines(cmd, host_id)
+ ports = {}
+ for l in lines:
+ port_matches = self.port_re.match(l)
+ if not port_matches:
+ continue
+ port = {}
+ id = port_matches.group(1)
+ name = port_matches.group(2)
+ is_internal = port_matches.group(3) == " (internal)"
+ port["internal"] = is_internal
+ port["id"] = id
+ port["name"] = name
+ ports[name] = port
+ return ports
+
+ # from ovs-vsctl, fetch tags of ports
+ # example format of ovs-vsctl output for a specific port:
+ # Port "tap9f94d28e-7b"
+ # tag: 5
+ # Interface "tap9f94d28e-7b"
+ # type: internal
+ def fetch_port_tags_from_vsctl(self, vsctl_lines, ports):
+ port = None
+ for l in vsctl_lines:
+ if l.startswith(self.port_line_header_prefix):
+ port = None
+ port_name = l[len(self.port_line_header_prefix):]
+ # remove quotes from port name
+ if '"' in port_name:
+ port_name = port_name[1:][:-1]
+ if port_name in ports:
+ port = ports[port_name]
+ continue
+ if not port:
+ continue
+ if l.startswith(" " * 12 + "tag: "):
+ port["tag"] = l[l.index(":") + 2:]
+ ports[port["name"]] = port
+ return ports
+
+ def get_overlay_tunnels(self, doc, vsctl_lines):
+ if doc["agent_type"] != "Open vSwitch agent":
+ return {}
+ if "tunneling_ip" not in doc["configurations"]:
+ return {}
+ if not doc["configurations"]["tunneling_ip"]:
+ self.get_bridge_pnic(doc)
+ return {}
+
+ # read the 'br-tun' interface ports
+ # this will be used later in the OTEP
+ tunnel_bridge_header = " " * 4 + "Bridge br-tun"
+ try:
+ br_tun_loc = vsctl_lines.index(tunnel_bridge_header)
+ except ValueError:
+ return []
+ lines = vsctl_lines[br_tun_loc + 1:]
+ tunnel_ports = {}
+ port = None
+ for l in lines:
+ # if we have only 4 or less spaces in the beginng,
+ # the br-tun section ended so return
+ if not l.startswith(" " * 5):
+ break
+ if l.startswith(self.port_line_header_prefix):
+ if port:
+ tunnel_ports[port["name"]] = port
+ name = l[len(self.port_line_header_prefix):].strip('" ')
+ port = {"name": name}
+ elif port and l.startswith(" " * 12 + "Interface "):
+ interface = l[10 + len("Interface ") + 1:].strip('" ')
+ port["interface"] = interface
+ elif port and l.startswith(" " * 16):
+ colon_pos = l.index(":")
+ attr = l[:colon_pos].strip()
+ val = l[colon_pos + 2:].strip('" ')
+ if attr == "options":
+ opts = val.strip('{}')
+ val = {}
+ for opt in opts.split(", "):
+ opt_name = opt[:opt.index("=")]
+ opt_val = opt[opt.index("=") + 1:].strip('" ')
+ val[opt_name] = opt_val
+ port[attr] = val
+ if port:
+ tunnel_ports[port["name"]] = port
+ return tunnel_ports
+
+ def get_bridge_pnic(self, doc):
+ conf = doc["configurations"]
+ if "bridge_mappings" not in conf or not conf["bridge_mappings"]:
+ return
+ for v in conf["bridge_mappings"].values(): br = v
+ ifaces_list_lines = self.run_fetch_lines("ovs-vsctl list-ifaces " + br,
+ doc["host"])
+ br_pnic_postfix = br + "--br-"
+ interface = ""
+ for l in ifaces_list_lines:
+ if l.startswith(br_pnic_postfix):
+ interface = l[len(br_pnic_postfix):]
+ break
+ if not interface:
+ return
+ doc["pnic"] = interface
+ # add port ID to pNIC
+ pnic = self.inv.find_items({
+ "environment": self.get_env(),
+ "type": "pnic",
+ "host": doc["host"],
+ "name": interface
+ }, get_single=True)
+ if not pnic:
+ return
+ port = doc["ports"][interface]
+ pnic["port_id"] = port["id"]
+ self.inv.set(pnic)
diff --git a/app/discover/fetchers/db/db_fetch_vedges_vpp.py b/app/discover/fetchers/db/db_fetch_vedges_vpp.py
new file mode 100644
index 0000000..a1c659e
--- /dev/null
+++ b/app/discover/fetchers/db/db_fetch_vedges_vpp.py
@@ -0,0 +1,56 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+from discover.fetchers.cli.cli_access import CliAccess
+from discover.fetchers.db.db_access import DbAccess
+from utils.inventory_mgr import InventoryMgr
+from utils.singleton import Singleton
+
+
+class DbFetchVedgesVpp(DbAccess, CliAccess, metaclass=Singleton):
+ def __init__(self):
+ super().__init__()
+ self.inv = InventoryMgr()
+
+ def get(self, id):
+ host_id = id[:id.rindex('-')]
+ vedge = {
+ 'host': host_id,
+ 'id': host_id + '-VPP',
+ 'name': 'VPP-' + host_id,
+ 'agent_type': 'VPP'
+ }
+ ver = self.run_fetch_lines('vppctl show ver', host_id)
+ if ver:
+ ver = ver[0]
+ vedge['binary'] = ver[:ver.index(' ', ver.index(' ') + 1)]
+ host = self.inv.get_by_id(self.get_env(), host_id)
+ if not host:
+ self.log.error("unable to find host in inventory: %s", host_id)
+ return []
+ host_types = host["host_type"]
+ if "Network" not in host_types and "Compute" not in host_types:
+ return []
+ interfaces = self.run_fetch_lines('vppctl show int', host_id)
+ vedge['ports'] = self.fetch_ports(interfaces)
+ return [vedge]
+
+ def fetch_ports(self, interfaces):
+ ports = {}
+ for i in interfaces:
+ if not i or i.startswith(' '):
+ continue
+ parts = i.split()
+ port = {
+ 'id': parts[1],
+ 'state': parts[2],
+ 'name': parts[0]
+ }
+ ports[port['name']] = port
+ return ports
diff --git a/app/discover/fetchers/folder_fetcher.py b/app/discover/fetchers/folder_fetcher.py
new file mode 100644
index 0000000..e7bb1fa
--- /dev/null
+++ b/app/discover/fetchers/folder_fetcher.py
@@ -0,0 +1,36 @@
+#!/usr/bin/env python3
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+from discover.fetcher import Fetcher
+from utils.string_utils import jsonify
+
+
+class FolderFetcher(Fetcher):
+ def __init__(self, types_name, parent_type, text="", create_folder=True):
+ super(FolderFetcher, self).__init__()
+ self.types_name = types_name
+ self.parent_type = parent_type
+ self.text = text
+ self.create_folder = create_folder
+ if not self.text:
+ self.text = self.types_name.capitalize()
+
+ def get(self, id):
+ oid = id + "-" + self.types_name
+ root_obj = {
+ "id": oid,
+ "create_object": self.create_folder,
+ "name": oid,
+ "text": self.text,
+ "type": self.types_name + "_folder",
+ "parent_id": id,
+ "parent_type": self.parent_type
+ }
+ return jsonify([root_obj])
diff --git a/app/discover/find_links.py b/app/discover/find_links.py
new file mode 100644
index 0000000..0967a60
--- /dev/null
+++ b/app/discover/find_links.py
@@ -0,0 +1,30 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+from discover.fetcher import Fetcher
+from utils.inventory_mgr import InventoryMgr
+
+
+class FindLinks(Fetcher):
+ def __init__(self):
+ super().__init__()
+ self.inv = InventoryMgr()
+
+ def create_link(self, env, host, source, source_id, target, target_id,
+ link_type, link_name, state, link_weight,
+ source_label="", target_label="",
+ extra_attributes=None):
+ if extra_attributes is None:
+ extra_attributes = {}
+ link = self.inv.create_link(env, host,
+ source, source_id, target, target_id,
+ link_type, link_name, state, link_weight,
+ extra_attributes=extra_attributes)
+ if self.inv.monitoring_setup_manager:
+ self.inv.monitoring_setup_manager.create_setup(link)
diff --git a/app/discover/find_links_for_instance_vnics.py b/app/discover/find_links_for_instance_vnics.py
new file mode 100644
index 0000000..7e081fc
--- /dev/null
+++ b/app/discover/find_links_for_instance_vnics.py
@@ -0,0 +1,59 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+from discover.find_links import FindLinks
+
+
+class FindLinksForInstanceVnics(FindLinks):
+ def __init__(self):
+ super().__init__()
+
+ def add_links(self):
+ self.log.info("adding links of type: instance-vnic")
+ vnics = self.inv.find_items({
+ "environment": self.get_env(),
+ "type": "vnic",
+ "vnic_type": "instance_vnic"
+ })
+ for v in vnics:
+ self.add_link_for_vnic(v)
+
+ def add_link_for_vnic(self, v):
+ instance = self.inv.get_by_id(self.get_env(), v["instance_id"])
+ if "network_info" not in instance:
+ self.log.warn("add_link_for_vnic: " +
+ "network_info missing in instance: %s ",
+ instance["id"])
+ return
+ host = self.inv.get_by_id(self.get_env(), instance["host"])
+ host_types = host["host_type"]
+ if "Network" not in host_types and "Compute" not in host_types:
+ return []
+ source = instance["_id"]
+ source_id = instance["id"]
+ target = v["_id"]
+ target_id = v["id"]
+ link_type = "instance-vnic"
+ # find related network
+ network_name = None
+ network_id = None
+ for net in instance["network_info"]:
+ if net["devname"] == v["id"]:
+ network_name = net["network"]["label"]
+ network_id = net['network']['id']
+ v['network'] = network_id
+ self.inv.set(v)
+ break
+ state = "up" # TBD
+ link_weight = 0 # TBD
+ attributes = {} if not network_id else {'network': network_id}
+ self.create_link(self.get_env(), host["name"],
+ source, source_id, target, target_id,
+ link_type, network_name, state, link_weight,
+ extra_attributes=attributes)
diff --git a/app/discover/find_links_for_oteps.py b/app/discover/find_links_for_oteps.py
new file mode 100644
index 0000000..84373a4
--- /dev/null
+++ b/app/discover/find_links_for_oteps.py
@@ -0,0 +1,85 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+from discover.find_links import FindLinks
+
+
+class FindLinksForOteps(FindLinks):
+ def __init__(self):
+ super().__init__()
+
+ def add_links(self):
+ self.log.info("adding link types: " +
+ "vedge-otep, otep-vconnector, otep-pnic")
+ oteps = self.inv.find_items({
+ "environment": self.get_env(),
+ "type": "otep"
+ })
+ for otep in oteps:
+ self.add_vedge_otep_link(otep)
+ self.add_otep_vconnector_link(otep)
+ self.add_otep_pnic_link(otep)
+
+ def add_vedge_otep_link(self, otep):
+ vedge = self.inv.get_by_id(self.get_env(), otep["parent_id"])
+ source = vedge["_id"]
+ source_id = vedge["id"]
+ target = otep["_id"]
+ target_id = otep["id"]
+ link_type = "vedge-otep"
+ link_name = vedge["name"] + "-otep"
+ state = "up" # TBD
+ link_weight = 0 # TBD
+ self.create_link(self.get_env(), vedge["host"],
+ source, source_id, target, target_id,
+ link_type, link_name, state, link_weight)
+
+ def add_otep_vconnector_link(self, otep):
+ if "vconnector" not in otep:
+ return
+ vconnector = self.inv.find_items({
+ "environment": self.get_env(),
+ "type": "vconnector",
+ "host": otep["host"],
+ "name": otep["vconnector"]
+ }, get_single=True)
+ if not vconnector:
+ return
+ source = otep["_id"]
+ source_id = otep["id"]
+ target = vconnector["_id"]
+ target_id = vconnector["id"]
+ link_type = "otep-vconnector"
+ link_name = otep["name"] + "-" + otep["vconnector"]
+ state = "up" # TBD
+ link_weight = 0 # TBD
+ self.create_link(self.get_env(), otep["host"],
+ source, source_id, target, target_id,
+ link_type, link_name, state, link_weight)
+
+ def add_otep_pnic_link(self, otep):
+ pnic = self.inv.find_items({
+ "environment": self.get_env(),
+ "type": "pnic",
+ "host": otep["host"],
+ "IP Address": otep["ip_address"]
+ }, get_single=True)
+ if not pnic:
+ return
+ source = otep["_id"]
+ source_id = otep["id"]
+ target = pnic["_id"]
+ target_id = pnic["id"]
+ link_type = "otep-pnic"
+ link_name = otep["host"] + "pnic" + pnic["name"]
+ state = "up" # TBD
+ link_weight = 0 # TBD
+ self.create_link(self.get_env(), otep["host"],
+ source, source_id, target, target_id,
+ link_type, link_name, state, link_weight)
diff --git a/app/discover/find_links_for_pnics.py b/app/discover/find_links_for_pnics.py
new file mode 100644
index 0000000..19828d0
--- /dev/null
+++ b/app/discover/find_links_for_pnics.py
@@ -0,0 +1,58 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+from discover.find_links import FindLinks
+
+
+class FindLinksForPnics(FindLinks):
+ def __init__(self):
+ super().__init__()
+
+ def add_links(self):
+ pnics = self.inv.find_items({
+ "environment": self.get_env(),
+ "type": "pnic",
+ "pnic_type": {"$ne": "switch"} # TODO: make a more educated guess
+ })
+ for pnic in pnics:
+ self.add_pnic_network_links(pnic)
+
+ def add_pnic_network_links(self, pnic):
+ self.log.info("adding links of type: pnic-network")
+ host = pnic["host"]
+ # find ports for that host, and fetch just the network ID
+ ports = self.inv.find_items({
+ "environment": self.get_env(),
+ "type": "port",
+ "binding:host_id": host
+ }, {"network_id": 1, "id": 1})
+ networks = {}
+ for port in ports:
+ networks[port["network_id"]] = 1
+ for network_id in networks.keys():
+ network = self.inv.get_by_id(self.get_env(), network_id)
+ if network == []:
+ return
+ source = pnic["_id"]
+ source_id = pnic["id"]
+ target = network["_id"]
+ target_id = network["id"]
+ link_type = "pnic-network"
+ link_name = "Segment-" + str(network["provider:segmentation_id"]) \
+ if "provider:segmentation_id" in network \
+ else "Segment-None"
+ state = "up" if pnic["Link detected"] == "yes" else "down"
+ link_weight = 0 # TBD
+ source_label = "port-" + pnic["port_id"] if "port_id" in pnic \
+ else ""
+ self.create_link(self.get_env(), host,
+ source, source_id, target, target_id,
+ link_type, link_name, state, link_weight,
+ source_label,
+ extra_attributes={"network": target_id})
diff --git a/app/discover/find_links_for_vconnectors.py b/app/discover/find_links_for_vconnectors.py
new file mode 100644
index 0000000..3d5cdb0
--- /dev/null
+++ b/app/discover/find_links_for_vconnectors.py
@@ -0,0 +1,88 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+from discover.find_links import FindLinks
+
+
+class FindLinksForVconnectors(FindLinks):
+ def __init__(self):
+ super().__init__()
+
+ def add_links(self):
+ vconnectors = self.inv.find_items({
+ "environment": self.get_env(),
+ "type": "vconnector"
+ })
+ self.log.info("adding links of type: vnic-vconnector, vconnector-pnic")
+ for vconnector in vconnectors:
+ for interface in vconnector["interfaces_names"]:
+ self.add_vnic_vconnector_link(vconnector, interface)
+ self.add_vconnector_pnic_link(vconnector, interface)
+
+ def add_vnic_vconnector_link(self, vconnector, interface_name):
+ mechanism_drivers = self.configuration.environment['mechanism_drivers']
+ is_ovs = mechanism_drivers and mechanism_drivers[0] == 'OVS'
+ if is_ovs:
+ # interface ID for OVS
+ vnic = self.inv.get_by_id(self.get_env(), interface_name)
+ else:
+ # interface ID for VPP - match interface MAC address to vNIC MAC
+ interface = vconnector['interfaces'][interface_name]
+ if not interface or 'mac_address' not in interface:
+ return
+ vnic_mac = interface['mac_address']
+ vnic = self.inv.get_by_field(self.get_env(), 'vnic',
+ 'mac_address', vnic_mac,
+ get_single=True)
+ if not vnic:
+ return
+ host = vnic["host"]
+ source = vnic["_id"]
+ source_id = vnic["id"]
+ target = vconnector["_id"]
+ target_id = vconnector["id"]
+ link_type = "vnic-vconnector"
+ link_name = vnic["mac_address"]
+ state = "up" # TBD
+ link_weight = 0 # TBD
+ attributes = {}
+ if 'network' in vnic:
+ attributes = {'network': vnic['network']}
+ vconnector['network'] = vnic['network']
+ self.inv.set(vconnector)
+ self.create_link(self.get_env(), host,
+ source, source_id, target, target_id,
+ link_type, link_name, state, link_weight,
+ extra_attributes=attributes)
+
+ def add_vconnector_pnic_link(self, vconnector, interface):
+ ifname = interface['name'] if isinstance(interface, dict) else interface
+ if "." in ifname:
+ ifname = ifname[:ifname.index(".")]
+ host = vconnector["host"]
+ pnic = self.inv.find_items({
+ "environment": self.get_env(),
+ "type": "pnic",
+ "host": vconnector["host"],
+ "name": ifname
+ }, get_single=True)
+ if not pnic:
+ return
+ source = vconnector["_id"]
+ source_id = vconnector["id"]
+ target = pnic["_id"]
+ target_id = pnic["id"]
+ link_type = "vconnector-pnic"
+ link_name = pnic["name"]
+ state = "up" # TBD
+ link_weight = 0 # TBD
+ self.create_link(self.get_env(), host,
+ source, source_id,
+ target, target_id,
+ link_type, link_name, state, link_weight)
diff --git a/app/discover/find_links_for_vedges.py b/app/discover/find_links_for_vedges.py
new file mode 100644
index 0000000..1235074
--- /dev/null
+++ b/app/discover/find_links_for_vedges.py
@@ -0,0 +1,124 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+from discover.find_links import FindLinks
+
+
+class FindLinksForVedges(FindLinks):
+ def __init__(self):
+ super().__init__()
+
+ def add_links(self):
+ self.log.info("adding link types: " +
+ "vnic-vedge, vconnector-vedge, vedge-pnic")
+ vedges = self.inv.find_items({
+ "environment": self.get_env(),
+ "type": "vedge"
+ })
+ for vedge in vedges:
+ ports = vedge["ports"]
+ for p in ports.values():
+ self.add_link_for_vedge(vedge, p)
+
+ def add_link_for_vedge(self, vedge, port):
+ vnic = self.inv.get_by_id(self.get_env(),
+ vedge['host'] + '-' + port["name"])
+ if not vnic:
+ self.find_matching_vconnector(vedge, port)
+ self.find_matching_pnic(vedge, port)
+ return
+ source = vnic["_id"]
+ source_id = vnic["id"]
+ target = vedge["_id"]
+ target_id = vedge["id"]
+ link_type = "vnic-vedge"
+ link_name = vnic["name"] + "-" + vedge["name"]
+ if "tag" in port:
+ link_name += "-" + port["tag"]
+ state = "up" # TBD
+ link_weight = 0 # TBD
+ source_label = vnic["mac_address"]
+ target_label = port["id"]
+ self.create_link(self.get_env(), vedge["host"],
+ source, source_id, target, target_id,
+ link_type, link_name, state, link_weight,
+ source_label, target_label)
+
+ def find_matching_vconnector(self, vedge, port):
+ if self.configuration.has_network_plugin('VPP'):
+ vconnector_interface_name = port['name']
+ else:
+ if not port["name"].startswith("qv"):
+ return
+ base_id = port["name"][3:]
+ vconnector_interface_name = "qvb" + base_id
+ vconnector = self.inv.find_items({
+ "environment": self.get_env(),
+ "type": "vconnector",
+ "host": vedge['host'],
+ 'interfaces_names': vconnector_interface_name},
+ get_single=True)
+ if not vconnector:
+ return
+ source = vconnector["_id"]
+ source_id = vconnector["id"]
+ target = vedge["_id"]
+ target_id = vedge["id"]
+ link_type = "vconnector-vedge"
+ link_name = "port-" + port["id"]
+ if "tag" in port:
+ link_name += "-" + port["tag"]
+ state = "up" # TBD
+ link_weight = 0 # TBD
+ source_label = vconnector_interface_name
+ target_label = port["name"]
+ mac_address = "Unknown"
+ attributes = {'mac_address': mac_address}
+ for interface in vconnector['interfaces'].values():
+ if vconnector_interface_name != interface['name']:
+ continue
+ if 'mac_address' not in interface:
+ continue
+ mac_address = interface['mac_address']
+ attributes['mac_address'] = mac_address
+ break
+ if 'network' in vconnector:
+ attributes['network'] = vconnector['network']
+ self.create_link(self.get_env(), vedge["host"],
+ source, source_id, target, target_id,
+ link_type, link_name, state, link_weight,
+ source_label, target_label,
+ attributes)
+
+ def find_matching_pnic(self, vedge, port):
+ pname = port["name"]
+ if "pnic" in vedge:
+ if pname != vedge["pnic"]:
+ return
+ elif self.configuration.has_network_plugin('VPP'):
+ pass
+ pnic = self.inv.find_items({
+ "environment": self.get_env(),
+ "type": "pnic",
+ "host": vedge["host"],
+ "name": pname
+ }, get_single=True)
+ if not pnic:
+ return
+ source = vedge["_id"]
+ source_id = vedge["id"]
+ target = pnic["_id"]
+ target_id = pnic["id"]
+ link_type = "vedge-pnic"
+ link_name = "Port-" + port["id"]
+ state = "up" if pnic["Link detected"] == "yes" else "down"
+ link_weight = 0 # TBD
+ self.create_link(self.get_env(), vedge["host"],
+ source, source_id, target, target_id,
+ link_type, link_name, state, link_weight)
diff --git a/app/discover/find_links_for_vservice_vnics.py b/app/discover/find_links_for_vservice_vnics.py
new file mode 100644
index 0000000..e8a91c8
--- /dev/null
+++ b/app/discover/find_links_for_vservice_vnics.py
@@ -0,0 +1,56 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+from discover.find_links import FindLinks
+
+
+class FindLinksForVserviceVnics(FindLinks):
+ def __init__(self):
+ super().__init__()
+
+ def add_links(self, search=None):
+ self.log.info("adding links of type: vservice-vnic")
+
+ if search is None:
+ search = {}
+
+ search.update({"environment": self.get_env(),
+ "type": "vnic",
+ "vnic_type": "vservice_vnic"})
+
+ vnics = self.inv.find_items(search)
+
+ for v in vnics:
+ self.add_link_for_vnic(v)
+
+ def add_link_for_vnic(self, v):
+ host = self.inv.get_by_id(self.get_env(), v["host"])
+ if "Network" not in host["host_type"]:
+ return
+ if "network" not in v:
+ return
+ network = self.inv.get_by_id(self.get_env(), v["network"])
+ if network == []:
+ return
+ vservice_id = v["parent_id"]
+ vservice_id = vservice_id[:vservice_id.rindex('-')]
+ vservice = self.inv.get_by_id(self.get_env(), vservice_id)
+ source = vservice["_id"]
+ source_id = vservice_id
+ target = v["_id"]
+ target_id = v["id"]
+ link_type = "vservice-vnic"
+ link_name = network["name"]
+ state = "up" # TBD
+ link_weight = 0 # TBD
+ self.create_link(self.get_env(), v["host"],
+ source, source_id,
+ target, target_id,
+ link_type, link_name, state, link_weight,
+ extra_attributes={'network': v['network']})
diff --git a/app/discover/manager.py b/app/discover/manager.py
new file mode 100644
index 0000000..e37bb31
--- /dev/null
+++ b/app/discover/manager.py
@@ -0,0 +1,45 @@
+#!/usr/bin/env python3
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+from abc import ABC, abstractmethod
+
+from utils.logging.file_logger import FileLogger
+from utils.logging.full_logger import FullLogger
+from utils.mongo_access import MongoAccess
+
+
+class Manager(ABC):
+
+ MIN_INTERVAL = 0.1 # To prevent needlessly frequent scans
+
+ def __init__(self, log_directory: str = None,
+ mongo_config_file: str = None):
+ super().__init__()
+ if log_directory:
+ FileLogger.LOG_DIRECTORY = log_directory
+ MongoAccess.config_file = mongo_config_file
+ self.log = FullLogger()
+ self.conf = None
+ self.inv = None
+ self.collection = None
+ self._update_document = None
+ self.interval = self.MIN_INTERVAL
+
+ @abstractmethod
+ def configure(self):
+ pass
+
+ @abstractmethod
+ def do_action(self):
+ pass
+
+ def run(self):
+ self.configure()
+ self.do_action()
diff --git a/app/discover/monitoring_mgr.py b/app/discover/monitoring_mgr.py
new file mode 100644
index 0000000..f3f737f
--- /dev/null
+++ b/app/discover/monitoring_mgr.py
@@ -0,0 +1,10 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+#moved
diff --git a/app/discover/network_agents_list.py b/app/discover/network_agents_list.py
new file mode 100644
index 0000000..c1c1b36
--- /dev/null
+++ b/app/discover/network_agents_list.py
@@ -0,0 +1,23 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+from utils.mongo_access import MongoAccess
+
+
+class NetworkAgentsList(MongoAccess):
+ def __init__(self):
+ super(NetworkAgentsList, self).__init__()
+ self.list = MongoAccess.db["network_agent_types"]
+
+ def get_type(self, type):
+ matches = self.list.find({"type": type})
+ for doc in matches:
+ doc["_id"] = str(doc["_id"])
+ return doc
+ return {}
diff --git a/app/discover/plugins/__init__.py b/app/discover/plugins/__init__.py
new file mode 100644
index 0000000..1e85a2a
--- /dev/null
+++ b/app/discover/plugins/__init__.py
@@ -0,0 +1,10 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+
diff --git a/app/discover/scan.py b/app/discover/scan.py
new file mode 100755
index 0000000..72184ec
--- /dev/null
+++ b/app/discover/scan.py
@@ -0,0 +1,324 @@
+#!/usr/bin/env python3
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+
+# Scan an object and insert/update in the inventory
+
+# phase 2: either scan default environment, or scan specific object
+
+import argparse
+import sys
+
+from discover.configuration import Configuration
+from discover.fetcher import Fetcher
+from discover.scan_error import ScanError
+from discover.scanner import Scanner
+from monitoring.setup.monitoring_setup_manager import MonitoringSetupManager
+from utils.constants import EnvironmentFeatures
+from utils.mongo_access import MongoAccess
+from utils.exceptions import ScanArgumentsError
+from utils.inventory_mgr import InventoryMgr
+from utils.ssh_connection import SshConnection
+from utils.util import setup_args
+
+
+class ScanPlan:
+ """
+ @DynamicAttrs
+ """
+
+ # Each tuple of COMMON_ATTRIBUTES consists of:
+ # attr_name, arg_name and def_key
+ #
+ # attr_name - name of class attribute to be set
+ # arg_name - corresponding name of argument (equal to attr_name if not set)
+ # def_key - corresponding key in DEFAULTS (equal to attr_name if not set)
+ COMMON_ATTRIBUTES = (("loglevel",),
+ ("inventory_only",),
+ ("links_only",),
+ ("cliques_only",),
+ ("monitoring_setup_only",),
+ ("clear",),
+ ("clear_all",),
+ ("object_type", "type", "type"),
+ ("env",),
+ ("object_id", "id", "env"),
+ ("parent_id",),
+ ("type_to_scan", "parent_type", "parent_type"),
+ ("id_field",),
+ ("scan_self",),
+ ("child_type", "type", "type"))
+
+ def __init__(self, args=None):
+ self.obj = None
+ self.scanner_type = None
+ self.args = args
+ for attribute in self.COMMON_ATTRIBUTES:
+ setattr(self, attribute[0], None)
+
+ if isinstance(args, dict):
+ self._init_from_dict()
+ else:
+ self._init_from_args()
+ self._validate_args()
+
+ def _validate_args(self):
+ errors = []
+ if (self.inventory_only and self.links_only) \
+ or (self.inventory_only and self.cliques_only) \
+ or (self.links_only and self.cliques_only):
+ errors.append("Only one of (inventory_only, links_only, "
+ "cliques_only) can be True.")
+ if errors:
+ raise ScanArgumentsError("\n".join(errors))
+
+ def _set_arg_from_dict(self, attribute_name, arg_name=None,
+ default_key=None):
+ default_attr = default_key if default_key else attribute_name
+ setattr(self, attribute_name,
+ self.args.get(arg_name if arg_name else attribute_name,
+ ScanController.DEFAULTS[default_attr]))
+
+ def _set_arg_from_cmd(self, attribute_name, arg_name=None):
+ setattr(self,
+ attribute_name,
+ getattr(self.args, arg_name if arg_name else attribute_name))
+
+ def _set_arg_from_form(self, attribute_name, arg_name=None,
+ default_key=None):
+ default_attr = default_key if default_key else attribute_name
+ setattr(self,
+ attribute_name,
+ self.args.getvalue(arg_name if arg_name else attribute_name,
+ ScanController.DEFAULTS[default_attr]))
+
+ def _init_from_dict(self):
+ for arg in self.COMMON_ATTRIBUTES:
+ self._set_arg_from_dict(*arg)
+ self.child_id = None
+
+ def _init_from_args(self):
+ for arg in self.COMMON_ATTRIBUTES:
+ self._set_arg_from_cmd(*arg[:2])
+ self.child_id = None
+
+
+class ScanController(Fetcher):
+ DEFAULTS = {
+ "env": "",
+ "mongo_config": "",
+ "type": "",
+ "inventory": "inventory",
+ "scan_self": False,
+ "parent_id": "",
+ "parent_type": "",
+ "id_field": "id",
+ "loglevel": "INFO",
+ "inventory_only": False,
+ "links_only": False,
+ "cliques_only": False,
+ "monitoring_setup_only": False,
+ "clear": False,
+ "clear_all": False
+ }
+
+ def __init__(self):
+ super().__init__()
+ self.conf = None
+ self.inv = None
+
+ def get_args(self):
+ # try to read scan plan from command line parameters
+ parser = argparse.ArgumentParser()
+ parser.add_argument("-m", "--mongo_config", nargs="?", type=str,
+ default=self.DEFAULTS["mongo_config"],
+ help="name of config file " +
+ "with MongoDB server access details")
+ parser.add_argument("-e", "--env", nargs="?", type=str,
+ default=self.DEFAULTS["env"],
+ help="name of environment to scan \n"
+ "(default: " + self.DEFAULTS["env"] + ")")
+ parser.add_argument("-t", "--type", nargs="?", type=str,
+ default=self.DEFAULTS["type"],
+ help="type of object to scan \n"
+ "(default: environment)")
+ parser.add_argument("-y", "--inventory", nargs="?", type=str,
+ default=self.DEFAULTS["inventory"],
+ help="name of inventory collection \n"
+ "(default: 'inventory')")
+ parser.add_argument("-s", "--scan_self", action="store_true",
+ help="scan changes to a specific object \n"
+ "(default: False)")
+ parser.add_argument("-i", "--id", nargs="?", type=str,
+ default=self.DEFAULTS["env"],
+ help="ID of object to scan (when scan_self=true)")
+ parser.add_argument("-p", "--parent_id", nargs="?", type=str,
+ default=self.DEFAULTS["parent_id"],
+ help="ID of parent object (when scan_self=true)")
+ parser.add_argument("-a", "--parent_type", nargs="?", type=str,
+ default=self.DEFAULTS["parent_type"],
+ help="type of parent object (when scan_self=true)")
+ parser.add_argument("-f", "--id_field", nargs="?", type=str,
+ default=self.DEFAULTS["id_field"],
+ help="name of ID field (when scan_self=true) \n"
+ "(default: 'id', use 'name' for projects)")
+ parser.add_argument("-l", "--loglevel", nargs="?", type=str,
+ default=self.DEFAULTS["loglevel"],
+ help="logging level \n(default: '{}')"
+ .format(self.DEFAULTS["loglevel"]))
+ parser.add_argument("--clear", action="store_true",
+ help="clear all data related to "
+ "the specified environment prior to scanning\n"
+ "(default: False)")
+ parser.add_argument("--clear_all", action="store_true",
+ help="clear all data prior to scanning\n"
+ "(default: False)")
+ parser.add_argument("--monitoring_setup_only", action="store_true",
+ help="do only monitoring setup deployment \n"
+ "(default: False)")
+
+ # At most one of these arguments may be present
+ scan_only_group = parser.add_mutually_exclusive_group()
+ scan_only_group.add_argument("--inventory_only", action="store_true",
+ help="do only scan to inventory\n" +
+ "(default: False)")
+ scan_only_group.add_argument("--links_only", action="store_true",
+ help="do only links creation \n" +
+ "(default: False)")
+ scan_only_group.add_argument("--cliques_only", action="store_true",
+ help="do only cliques creation \n" +
+ "(default: False)")
+
+ return parser.parse_args()
+
+ def get_scan_plan(self, args):
+ # PyCharm type checker can't reliably check types of document
+ # noinspection PyTypeChecker
+ return self.prepare_scan_plan(ScanPlan(args))
+
+ def prepare_scan_plan(self, plan):
+ # Find out object type if not specified in arguments
+ if not plan.object_type:
+ if not plan.object_id:
+ plan.object_type = "environment"
+ else:
+ # If we scan a specific object, it has to exist in db
+ scanned_object = self.inv.get_by_id(plan.env, plan.object_id)
+ if not scanned_object:
+ exc_msg = "No object found with specified id: '{}'" \
+ .format(plan.object_id)
+ raise ScanArgumentsError(exc_msg)
+ plan.object_type = scanned_object["type"]
+ plan.parent_id = scanned_object["parent_id"]
+ plan.type_to_scan = scanned_object["parent_type"]
+
+ class_module = plan.object_type
+ if not plan.scan_self:
+ plan.scan_self = plan.object_type != "environment"
+
+ plan.object_type = plan.object_type.title().replace("_", "")
+
+ if not plan.scan_self:
+ plan.child_type = None
+ else:
+ plan.child_id = plan.object_id
+ plan.object_id = plan.parent_id
+ if plan.type_to_scan.endswith("_folder"):
+ class_module = plan.child_type + "s_root"
+ else:
+ class_module = plan.type_to_scan
+ plan.object_type = class_module.title().replace("_", "")
+
+ if class_module == "environment":
+ plan.obj = {"id": plan.env}
+ else:
+ # fetch object from inventory
+ obj = self.inv.get_by_id(plan.env, plan.object_id)
+ if not obj:
+ raise ValueError("No match for object ID: {}"
+ .format(plan.object_id))
+ plan.obj = obj
+
+ plan.scanner_type = "Scan" + plan.object_type
+ return plan
+
+ def run(self, args: dict = None):
+ args = setup_args(args, self.DEFAULTS, self.get_args)
+ # After this setup we assume args dictionary has all keys
+ # defined in self.DEFAULTS
+
+ try:
+ MongoAccess.set_config_file(args['mongo_config'])
+ self.inv = InventoryMgr()
+ self.inv.set_collections(args['inventory'])
+ self.conf = Configuration()
+ except FileNotFoundError as e:
+ return False, 'Mongo configuration file not found: {}'\
+ .format(str(e))
+
+ scan_plan = self.get_scan_plan(args)
+ if scan_plan.clear or scan_plan.clear_all:
+ self.inv.clear(scan_plan)
+ self.conf.log.set_loglevel(scan_plan.loglevel)
+
+ env_name = scan_plan.env
+ self.conf.use_env(env_name)
+
+ # generate ScanObject Class and instance.
+ scanner = Scanner()
+ scanner.set_env(env_name)
+
+ # decide what scanning operations to do
+ inventory_only = scan_plan.inventory_only
+ links_only = scan_plan.links_only
+ cliques_only = scan_plan.cliques_only
+ monitoring_setup_only = scan_plan.monitoring_setup_only
+ run_all = False if inventory_only or links_only or cliques_only \
+ or monitoring_setup_only else True
+
+ # setup monitoring server
+ monitoring = \
+ self.inv.is_feature_supported(env_name,
+ EnvironmentFeatures.MONITORING)
+ if monitoring:
+ self.inv.monitoring_setup_manager = \
+ MonitoringSetupManager(env_name)
+ self.inv.monitoring_setup_manager.server_setup()
+
+ # do the actual scanning
+ try:
+ if inventory_only or run_all:
+ scanner.run_scan(
+ scan_plan.scanner_type,
+ scan_plan.obj,
+ scan_plan.id_field,
+ scan_plan.child_id,
+ scan_plan.child_type)
+ if links_only or run_all:
+ scanner.scan_links()
+ if cliques_only or run_all:
+ scanner.scan_cliques()
+ if monitoring:
+ if monitoring_setup_only:
+ self.inv.monitoring_setup_manager.simulate_track_changes()
+ if not (inventory_only or links_only or cliques_only):
+ scanner.deploy_monitoring_setup()
+ except ScanError as e:
+ return False, "scan error: " + str(e)
+ SshConnection.disconnect_all()
+ return True, 'ok'
+
+
+if __name__ == '__main__':
+ scan_manager = ScanController()
+ ret, msg = scan_manager.run()
+ if not ret:
+ scan_manager.log.error(msg)
+ sys.exit(0 if ret else 1)
diff --git a/app/discover/scan_error.py b/app/discover/scan_error.py
new file mode 100644
index 0000000..2e04275
--- /dev/null
+++ b/app/discover/scan_error.py
@@ -0,0 +1,11 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+class ScanError(Exception):
+ pass
diff --git a/app/discover/scan_manager.py b/app/discover/scan_manager.py
new file mode 100644
index 0000000..b6ad782
--- /dev/null
+++ b/app/discover/scan_manager.py
@@ -0,0 +1,294 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+import argparse
+import datetime
+
+import time
+
+import pymongo
+from functools import partial
+
+from discover.manager import Manager
+from utils.constants import ScanStatus, EnvironmentFeatures
+from utils.exceptions import ScanArgumentsError
+from utils.inventory_mgr import InventoryMgr
+from utils.logging.file_logger import FileLogger
+from utils.mongo_access import MongoAccess
+from discover.scan import ScanController
+
+
+class ScanManager(Manager):
+
+ DEFAULTS = {
+ "mongo_config": "",
+ "scans": "scans",
+ "scheduled_scans": "scheduled_scans",
+ "environments": "environments_config",
+ "interval": 1,
+ "loglevel": "INFO"
+ }
+
+ def __init__(self):
+ self.args = self.get_args()
+ super().__init__(log_directory=self.args.log_directory,
+ mongo_config_file=self.args.mongo_config)
+ self.db_client = None
+ self.environments_collection = None
+
+ @staticmethod
+ def get_args():
+ parser = argparse.ArgumentParser()
+ parser.add_argument("-m", "--mongo_config", nargs="?", type=str,
+ default=ScanManager.DEFAULTS["mongo_config"],
+ help="Name of config file " +
+ "with MongoDB server access details")
+ parser.add_argument("-c", "--scans_collection", nargs="?", type=str,
+ default=ScanManager.DEFAULTS["scans"],
+ help="Scans collection to read from")
+ parser.add_argument("-s", "--scheduled_scans_collection", nargs="?",
+ type=str,
+ default=ScanManager.DEFAULTS["scheduled_scans"],
+ help="Scans collection to read from")
+ parser.add_argument("-e", "--environments_collection", nargs="?",
+ type=str,
+ default=ScanManager.DEFAULTS["environments"],
+ help="Environments collection to update "
+ "after scans")
+ parser.add_argument("-i", "--interval", nargs="?", type=float,
+ default=ScanManager.DEFAULTS["interval"],
+ help="Interval between collection polls"
+ "(must be more than {} seconds)"
+ .format(ScanManager.MIN_INTERVAL))
+ parser.add_argument("-l", "--loglevel", nargs="?", type=str,
+ default=ScanManager.DEFAULTS["loglevel"],
+ help="Logging level \n(default: '{}')"
+ .format(ScanManager.DEFAULTS["loglevel"]))
+ parser.add_argument("-d", "--log_directory", nargs="?", type=str,
+ default=FileLogger.LOG_DIRECTORY,
+ help="File logger directory \n(default: '{}')"
+ .format(FileLogger.LOG_DIRECTORY))
+ args = parser.parse_args()
+ return args
+
+ def configure(self):
+ self.db_client = MongoAccess()
+ self.inv = InventoryMgr()
+ self.inv.set_collections()
+ self.scans_collection = self.db_client.db[self.args.scans_collection]
+ self.scheduled_scans_collection = \
+ self.db_client.db[self.args.scheduled_scans_collection]
+ self.environments_collection = \
+ self.db_client.db[self.args.environments_collection]
+ self._update_document = \
+ partial(MongoAccess.update_document, self.scans_collection)
+ self.interval = max(self.MIN_INTERVAL, self.args.interval)
+ self.log.set_loglevel(self.args.loglevel)
+
+ self.log.info("Started ScanManager with following configuration:\n"
+ "Mongo config file path: {0.args.mongo_config}\n"
+ "Scans collection: {0.scans_collection.name}\n"
+ "Environments collection: "
+ "{0.environments_collection.name}\n"
+ "Polling interval: {0.interval} second(s)"
+ .format(self))
+
+ def _build_scan_args(self, scan_request: dict):
+ args = {
+ 'mongo_config': self.args.mongo_config
+ }
+
+ def set_arg(name_from: str, name_to: str = None):
+ if name_to is None:
+ name_to = name_from
+ val = scan_request.get(name_from)
+ if val:
+ args[name_to] = val
+
+ set_arg("object_id", "id")
+ set_arg("log_level", "loglevel")
+ set_arg("environment", "env")
+ set_arg("scan_only_inventory", "inventory_only")
+ set_arg("scan_only_links", "links_only")
+ set_arg("scan_only_cliques", "cliques_only")
+ set_arg("inventory")
+ set_arg("clear")
+ set_arg("clear_all")
+
+ return args
+
+ def _finalize_scan(self, scan_request: dict, status: ScanStatus,
+ scanned: bool):
+ scan_request['status'] = status.value
+ self._update_document(scan_request)
+ # If no object id is present, it's a full env scan.
+ # We need to update environments collection
+ # to reflect the scan results.
+ if not scan_request.get('id'):
+ self.environments_collection\
+ .update_one(filter={'name': scan_request.get('environment')},
+ update={'$set': {'scanned': scanned}})
+
+ def _fail_scan(self, scan_request: dict):
+ self._finalize_scan(scan_request, ScanStatus.FAILED, False)
+
+ def _complete_scan(self, scan_request: dict):
+ self._finalize_scan(scan_request, ScanStatus.COMPLETED, True)
+
+ # PyCharm type checker can't reliably check types of document
+ # noinspection PyTypeChecker
+ def _clean_up(self):
+ # Find and fail all running scans
+ running_scans = list(self
+ .scans_collection
+ .find(filter={'status': ScanStatus.RUNNING.value}))
+ self.scans_collection \
+ .update_many(filter={'_id': {'$in': [scan['_id']
+ for scan
+ in running_scans]}},
+ update={'$set': {'status': ScanStatus.FAILED.value}})
+
+ # Find all environments connected to failed full env scans
+ env_scans = [scan['environment']
+ for scan in running_scans
+ if not scan.get('object_id')
+ and scan.get('environment')]
+
+ # Set 'scanned' flag in those envs to false
+ if env_scans:
+ self.environments_collection\
+ .update_many(filter={'name': {'$in': env_scans}},
+ update={'$set': {'scanned': False}})
+
+ INTERVALS = {
+ 'YEARLY': datetime.timedelta(days=365.25),
+ 'MONTHLY': datetime.timedelta(days=365.25/12),
+ 'WEEKLY': datetime.timedelta(weeks=1),
+ 'DAILY': datetime.timedelta(days=1),
+ 'HOURLY': datetime.timedelta(hours=1)
+ }
+
+ def _submit_scan_request_for_schedule(self, scheduled_scan, interval, ts):
+ scans = self.scans_collection
+ new_scan = {
+ 'status': 'submitted',
+ 'log_level': scheduled_scan['log_level'],
+ 'clear': scheduled_scan['clear'],
+ 'scan_only_inventory': scheduled_scan['scan_only_inventory'],
+ 'scan_only_links': scheduled_scan['scan_only_links'],
+ 'scan_only_cliques': scheduled_scan['scan_only_cliques'],
+ 'submit_timestamp': ts,
+ 'environment': scheduled_scan['environment'],
+ 'inventory': 'inventory'
+ }
+ scans.insert_one(new_scan)
+
+ def _set_scheduled_requests_next_run(self, scheduled_scan, interval, ts):
+ scheduled_scan['scheduled_timestamp'] = ts + self.INTERVALS[interval]
+ doc_id = scheduled_scan.pop('_id')
+ self.scheduled_scans_collection.update({'_id': doc_id}, scheduled_scan)
+
+ def _prepare_scheduled_requests_for_interval(self, interval):
+ now = datetime.datetime.utcnow()
+
+ # first, submit a scan request where the scheduled time has come
+ condition = {'$and': [
+ {'freq': interval},
+ {'scheduled_timestamp': {'$lte': now}}
+ ]}
+ matches = self.scheduled_scans_collection.find(condition) \
+ .sort('scheduled_timestamp', pymongo.ASCENDING)
+ for match in matches:
+ self._submit_scan_request_for_schedule(match, interval, now)
+ self._set_scheduled_requests_next_run(match, interval, now)
+
+ # now set scheduled time where it was not set yet (new scheduled scans)
+ condition = {'$and': [
+ {'freq': interval},
+ {'scheduled_timestamp': {'$exists': False}}
+ ]}
+ matches = self.scheduled_scans_collection.find(condition)
+ for match in matches:
+ self._set_scheduled_requests_next_run(match, interval, now)
+
+ def _prepare_scheduled_requests(self):
+ # see if any scheduled request is waiting to be submitted
+ for interval in self.INTERVALS.keys():
+ self._prepare_scheduled_requests_for_interval(interval)
+
+ def do_action(self):
+ self._clean_up()
+ try:
+ while True:
+ self._prepare_scheduled_requests()
+
+ # Find a pending request that is waiting the longest time
+ results = self.scans_collection \
+ .find({'status': ScanStatus.PENDING.value,
+ 'submit_timestamp': {'$ne': None}}) \
+ .sort("submit_timestamp", pymongo.ASCENDING) \
+ .limit(1)
+
+ # If no scans are pending, sleep for some time
+ if results.count() == 0:
+ time.sleep(self.interval)
+ else:
+ scan_request = results[0]
+ if not self.inv.is_feature_supported(scan_request.get('environment'),
+ EnvironmentFeatures.SCANNING):
+ self.log.error("Scanning is not supported for env '{}'"
+ .format(scan_request.get('environment')))
+ self._fail_scan(scan_request)
+ continue
+
+ scan_request['start_timestamp'] = datetime.datetime.utcnow()
+ scan_request['status'] = ScanStatus.RUNNING.value
+ self._update_document(scan_request)
+
+ # Prepare scan arguments and run the scan with them
+ try:
+ scan_args = self._build_scan_args(scan_request)
+
+ self.log.info("Starting scan for '{}' environment"
+ .format(scan_args.get('env')))
+ self.log.debug("Scan arguments: {}".format(scan_args))
+ result, message = ScanController().run(scan_args)
+ except ScanArgumentsError as e:
+ self.log.error("Scan request '{id}' "
+ "has invalid arguments. "
+ "Errors:\n{errors}"
+ .format(id=scan_request['_id'],
+ errors=e))
+ self._fail_scan(scan_request)
+ except Exception as e:
+ self.log.exception(e)
+ self.log.error("Scan request '{}' has failed."
+ .format(scan_request['_id']))
+ self._fail_scan(scan_request)
+ else:
+ # Check is scan returned success
+ if not result:
+ self.log.error(message)
+ self.log.error("Scan request '{}' has failed."
+ .format(scan_request['_id']))
+ self._fail_scan(scan_request)
+ continue
+
+ # update the status and timestamps.
+ self.log.info("Request '{}' has been scanned."
+ .format(scan_request['_id']))
+ end_time = datetime.datetime.utcnow()
+ scan_request['end_timestamp'] = end_time
+ self._complete_scan(scan_request)
+ finally:
+ self._clean_up()
+
+
+if __name__ == "__main__":
+ ScanManager().run()
diff --git a/app/discover/scan_metadata_parser.py b/app/discover/scan_metadata_parser.py
new file mode 100644
index 0000000..df27e18
--- /dev/null
+++ b/app/discover/scan_metadata_parser.py
@@ -0,0 +1,202 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+from discover.fetchers.folder_fetcher import FolderFetcher
+from utils.metadata_parser import MetadataParser
+from utils.mongo_access import MongoAccess
+from utils.util import ClassResolver
+
+
+class ScanMetadataParser(MetadataParser):
+
+ SCANNERS_PACKAGE = 'scanners_package'
+ SCANNERS_FILE = 'scanners.json'
+ SCANNERS = 'scanners'
+
+ TYPE = 'type'
+ FETCHER = 'fetcher'
+ CHILDREN_SCANNER = 'children_scanner'
+ ENVIRONMENT_CONDITION = 'environment_condition'
+ OBJECT_ID_TO_USE_IN_CHILD = 'object_id_to_use_in_child'
+
+ COMMENT = '_comment'
+
+ REQUIRED_SCANNER_ATTRIBUTES = [TYPE, FETCHER]
+ ALLOWED_SCANNER_ATTRIBUTES = [TYPE, FETCHER, CHILDREN_SCANNER,
+ ENVIRONMENT_CONDITION,
+ OBJECT_ID_TO_USE_IN_CHILD]
+
+ MECHANISM_DRIVER = 'mechanism_driver'
+
+ def __init__(self, inventory_mgr):
+ super().__init__()
+ self.inv = inventory_mgr
+ self.constants = {}
+
+ def get_required_fields(self):
+ return [self.SCANNERS_PACKAGE, self.SCANNERS]
+
+ def validate_fetcher(self, scanner_name: str, scan_type: dict,
+ type_index: int, package: str):
+ fetcher = scan_type.get(self.FETCHER, '')
+ if not fetcher:
+ self.add_error('missing or empty fetcher in scanner {} type #{}'
+ .format(scanner_name, str(type_index)))
+ elif isinstance(fetcher, str):
+ try:
+ module_name = ClassResolver.get_module_file_by_class_name(fetcher)
+ fetcher_package = module_name.split("_")[0]
+ if package:
+ fetcher_package = ".".join((package, fetcher_package))
+ instance = ClassResolver.get_instance_of_class(package_name=fetcher_package,
+ module_name=module_name,
+ class_name=fetcher)
+ except ValueError:
+ instance = None
+ if not instance:
+ self.add_error('failed to find fetcher class {} in scanner {}'
+ ' type #{}'
+ .format(fetcher, scanner_name, type_index))
+ scan_type[self.FETCHER] = instance
+ elif isinstance(fetcher, dict):
+ is_folder = fetcher.get('folder', False)
+ if not is_folder:
+ self.add_error('scanner {} type #{}: '
+ 'only folder dict accepted in fetcher'
+ .format(scanner_name, type_index))
+ else:
+ instance = FolderFetcher(fetcher['types_name'],
+ fetcher['parent_type'],
+ fetcher.get('text', ''))
+ scan_type[self.FETCHER] = instance
+ else:
+ self.add_error('incorrect type of fetcher for scanner {} type #{}'
+ .format(scanner_name, type_index))
+
+ def validate_children_scanner(self, scanner_name: str, type_index: int,
+ scanners: dict, scan_type: dict):
+ scanner = scanners[scanner_name]
+ if 'children_scanner' in scan_type:
+ children_scanner = scan_type.get('children_scanner')
+ if not isinstance(children_scanner, str):
+ self.add_error('scanner {} type #{}: '
+ 'children_scanner must be a string'
+ .format(scanner_name, type_index))
+ elif children_scanner not in scanners:
+ self.add_error('scanner {} type #{}: '
+ 'children_scanner {} not found '
+ .format(scanner_name, type_index,
+ children_scanner))
+
+ def validate_environment_condition(self, scanner_name: str, type_index: int,
+ scanner: dict):
+ if self.ENVIRONMENT_CONDITION not in scanner:
+ return
+ condition = scanner[self.ENVIRONMENT_CONDITION]
+ if not isinstance(condition, dict):
+ self.add_error('scanner {} type #{}: condition must be dict'
+ .format(scanner_name, str(type_index)))
+ return
+ if self.MECHANISM_DRIVER in condition.keys():
+ drivers = condition[self.MECHANISM_DRIVER]
+ if not isinstance(drivers, list):
+ self.add_error('scanner {} type #{}: '
+ '{} must be a list of strings'
+ .format(scanner_name, type_index,
+ self.MECHANISM_DRIVER))
+ if not all((isinstance(driver, str) for driver in drivers)):
+ self.add_error('scanner {} type #{}: '
+ '{} must be a list of strings'
+ .format(scanner_name, type_index,
+ self.MECHANISM_DRIVER))
+ else:
+ for driver in drivers:
+ self.validate_constant(scanner_name,
+ driver,
+ 'mechanism_drivers',
+ 'mechanism drivers')
+
+ def validate_scanner(self, scanners: dict, name: str, package: str):
+ scanner = scanners.get(name)
+ if not scanner:
+ self.add_error('failed to find scanner: {}')
+ return
+
+ # make sure only allowed attributes are supplied
+ for i in range(0, len(scanner)):
+ scan_type = scanner[i]
+ self.validate_scan_type(scanners, name, i+1, scan_type, package)
+
+ def validate_scan_type(self, scanners: dict, scanner_name: str,
+ type_index: int, scan_type: dict, package: str):
+ # keep previous error count to know if errors were detected here
+ error_count = len(self.errors)
+ # ignore comments
+ scan_type.pop(self.COMMENT, '')
+ for attribute in scan_type.keys():
+ if attribute not in self.ALLOWED_SCANNER_ATTRIBUTES:
+ self.add_error('unknown attribute {} '
+ 'in scanner {}, type #{}'
+ .format(attribute, scanner_name,
+ str(type_index)))
+
+ # make sure required attributes are supplied
+ for attribute in ScanMetadataParser.REQUIRED_SCANNER_ATTRIBUTES:
+ if attribute not in scan_type:
+ self.add_error('scanner {}, type #{}: '
+ 'missing attribute "{}"'
+ .format(scanner_name, str(type_index),
+ attribute))
+ # the following checks depend on previous checks,
+ # so return if previous checks found errors
+ if len(self.errors) > error_count:
+ return
+
+ # type must be valid object type
+ self.validate_constant(scanner_name, scan_type[self.TYPE],
+ 'scan_object_types', 'types')
+ self.validate_fetcher(scanner_name, scan_type, type_index, package)
+ self.validate_children_scanner(scanner_name, type_index, scanners,
+ scan_type)
+ self.validate_environment_condition(scanner_name, type_index,
+ scan_type)
+
+ def get_constants(self, scanner_name, items_desc, constant_type):
+ if not self.constants.get(constant_type):
+ constants = MongoAccess.db['constants']
+ values_list = constants.find_one({'name': constant_type})
+ if not values_list:
+ raise ValueError('scanner {}: '
+ 'could not find {} list in DB'
+ .format(scanner_name, items_desc))
+ self.constants[constant_type] = values_list
+ return self.constants[constant_type]
+
+ def validate_constant(self,
+ scanner_name: str,
+ value_to_check: str,
+ constant_type: str,
+ items_desc: str = None):
+ values_list = self.get_constants(scanner_name, items_desc,
+ constant_type)
+ values = [t['value'] for t in values_list['data']]
+ if value_to_check not in values:
+ self.add_error('scanner {}: value not in {}: {}'
+ .format(scanner_name, items_desc, value_to_check))
+
+ def validate_metadata(self, metadata: dict) -> bool:
+ super().validate_metadata(metadata)
+ scanners = metadata.get(self.SCANNERS, {})
+ package = metadata.get(self.SCANNERS_PACKAGE)
+ if not scanners:
+ self.add_error('no scanners found in scanners list')
+ else:
+ for name in scanners.keys():
+ self.validate_scanner(scanners, name, package)
+ return len(self.errors) == 0
diff --git a/app/discover/scanner.py b/app/discover/scanner.py
new file mode 100644
index 0000000..1b7cd51
--- /dev/null
+++ b/app/discover/scanner.py
@@ -0,0 +1,253 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+# base class for scanners
+
+import json
+import queue
+import os
+import traceback
+
+from discover.clique_finder import CliqueFinder
+from discover.configuration import Configuration
+from discover.fetcher import Fetcher
+from discover.find_links_for_instance_vnics import FindLinksForInstanceVnics
+from discover.find_links_for_oteps import FindLinksForOteps
+from discover.find_links_for_pnics import FindLinksForPnics
+from discover.find_links_for_vconnectors import FindLinksForVconnectors
+from discover.find_links_for_vedges import FindLinksForVedges
+from discover.find_links_for_vservice_vnics import FindLinksForVserviceVnics
+from discover.scan_error import ScanError
+from discover.scan_metadata_parser import ScanMetadataParser
+from utils.constants import EnvironmentFeatures
+from utils.inventory_mgr import InventoryMgr
+from utils.util import ClassResolver
+
+
+class Scanner(Fetcher):
+ config = None
+ environment = None
+ env = None
+ root_patern = None
+ scan_queue = queue.Queue()
+ scan_queue_track = {}
+
+ def __init__(self):
+ """
+ Scanner is the base class for scanners.
+ """
+ super().__init__()
+ self.config = Configuration()
+ self.inv = InventoryMgr()
+ self.scanners_package = None
+ self.scanners = {}
+ self.load_metadata()
+
+ def scan(self, scanner_type, obj, id_field="id",
+ limit_to_child_id=None, limit_to_child_type=None):
+ types_to_fetch = self.get_scanner(scanner_type)
+ types_children = []
+ if not limit_to_child_type:
+ limit_to_child_type = []
+ elif isinstance(limit_to_child_type, str):
+ limit_to_child_type = [limit_to_child_type]
+ try:
+ for t in types_to_fetch:
+ if limit_to_child_type and t["type"] not in limit_to_child_type:
+ continue
+ children = self.scan_type(t, obj, id_field)
+ if limit_to_child_id:
+ children = [c for c in children
+ if c[id_field] == limit_to_child_id]
+ if not children:
+ continue
+ types_children.append({"type": t["type"],
+ "children": children})
+ except ValueError:
+ return False
+ if limit_to_child_id and len(types_children) > 0:
+ t = types_children[0]
+ children = t["children"]
+ return children[0]
+ return obj
+
+ def check_type_env(self, type_to_fetch):
+ # check if type is to be run in this environment
+ if "environment_condition" not in type_to_fetch:
+ return True
+ env_cond = type_to_fetch.get("environment_condition", {})
+ if not env_cond:
+ return True
+ if not isinstance(env_cond, dict):
+ self.log.warn('illegal environment_condition given '
+ 'for type {}'.format(type_to_fetch['type']))
+ return True
+ conf = self.config.get_env_config()
+ for attr, required_val in env_cond.items():
+ if attr == "mechanism_drivers":
+ if "mechanism_drivers" not in conf:
+ self.log.warn('illegal environment configuration: '
+ 'missing mechanism_drivers')
+ return False
+ if not isinstance(required_val, list):
+ required_val = [required_val]
+ return bool(set(required_val) & set(conf["mechanism_drivers"]))
+ elif attr not in conf or conf[attr] != required_val:
+ return False
+ # no check failed
+ return True
+
+ def scan_type(self, type_to_fetch, parent, id_field):
+ # check if type is to be run in this environment
+ if not self.check_type_env(type_to_fetch):
+ return []
+
+ if not parent:
+ obj_id = None
+ else:
+ obj_id = str(parent[id_field])
+ if not obj_id or not obj_id.rstrip():
+ raise ValueError("Object missing " + id_field + " attribute")
+
+ # get Fetcher instance
+ fetcher = type_to_fetch["fetcher"]
+ fetcher.set_env(self.get_env())
+
+ # get children_scanner instance
+ children_scanner = type_to_fetch.get("children_scanner")
+
+ escaped_id = fetcher.escape(str(obj_id)) if obj_id else obj_id
+ self.log.info(
+ "scanning : type=%s, parent: (type=%s, name=%s, id=%s)",
+ type_to_fetch["type"],
+ parent.get('type', 'environment'),
+ parent.get('name', ''),
+ escaped_id)
+
+ # fetch OpenStack data from environment by CLI, API or MySQL
+ # or physical devices data from ACI API
+ # It depends on the Fetcher's config.
+ try:
+ db_results = fetcher.get(escaped_id)
+ except Exception as e:
+ self.log.error("Error while scanning : " +
+ "fetcher=%s, " +
+ "type=%s, " +
+ "parent: (type=%s, name=%s, id=%s), " +
+ "error: %s",
+ fetcher.__class__.__name__,
+ type_to_fetch["type"],
+ "environment" if "type" not in parent
+ else parent["type"],
+ "" if "name" not in parent else parent["name"],
+ escaped_id,
+ e)
+ traceback.print_exc()
+ raise ScanError(str(e))
+
+ # format results
+ if isinstance(db_results, dict):
+ results = db_results["rows"] if db_results["rows"] else [db_results]
+ elif isinstance(db_results, str):
+ results = json.loads(db_results)
+ else:
+ results = db_results
+
+ # get child_id_field
+ try:
+ child_id_field = type_to_fetch["object_id_to_use_in_child"]
+ except KeyError:
+ child_id_field = "id"
+
+ environment = self.get_env()
+ children = []
+
+ for o in results:
+ saved = self.inv.save_inventory_object(o,
+ parent=parent,
+ environment=environment,
+ type_to_fetch=type_to_fetch)
+
+ if saved:
+ # add objects into children list.
+ children.append(o)
+
+ # put children scanner into queue
+ if children_scanner:
+ self.queue_for_scan(o, child_id_field, children_scanner)
+ return children
+
+ # scanning queued items, rather than going depth-first (DFS)
+ # this is done to allow collecting all required data for objects
+ # before continuing to next level
+ # for example, get host ID from API os-hypervisors call, so later
+ # we can use this ID in the "os-hypervisors/<ID>/servers" call
+ @staticmethod
+ def queue_for_scan(o, child_id_field, children_scanner):
+ if o["id"] in Scanner.scan_queue_track:
+ return
+ Scanner.scan_queue_track[o["type"] + ";" + o["id"]] = 1
+ Scanner.scan_queue.put({"object": o,
+ "child_id_field": child_id_field,
+ "scanner": children_scanner})
+
+ def run_scan(self, scanner_type, obj, id_field, child_id, child_type):
+ results = self.scan(scanner_type, obj, id_field, child_id, child_type)
+
+ # run children scanner from queue.
+ self.scan_from_queue()
+ return results
+
+ def scan_from_queue(self):
+ while not Scanner.scan_queue.empty():
+ item = Scanner.scan_queue.get()
+ scanner_type = item["scanner"]
+
+ # scan the queued item
+ self.scan(scanner_type, item["object"], item["child_id_field"])
+ self.log.info("Scan complete")
+
+ def scan_links(self):
+ self.log.info("scanning for links")
+ fetchers_implementing_add_links = [
+ FindLinksForPnics(),
+ FindLinksForInstanceVnics(),
+ FindLinksForVserviceVnics(),
+ FindLinksForVconnectors(),
+ FindLinksForVedges(),
+ FindLinksForOteps()
+ ]
+ for fetcher in fetchers_implementing_add_links:
+ fetcher.set_env(self.get_env())
+ fetcher.add_links()
+
+ def scan_cliques(self):
+ clique_scanner = CliqueFinder()
+ clique_scanner.set_env(self.get_env())
+ clique_scanner.find_cliques()
+
+ def deploy_monitoring_setup(self):
+ self.inv.monitoring_setup_manager.handle_pending_setup_changes()
+
+ def load_metadata(self):
+ parser = ScanMetadataParser(self.inv)
+ conf = self.config.get_env_config()
+ scanners_file = os.path.join(conf.get('app_path', '/etc/calipso'),
+ 'config',
+ ScanMetadataParser.SCANNERS_FILE)
+
+ metadata = parser.parse_metadata_file(scanners_file)
+ self.scanners_package = metadata[ScanMetadataParser.SCANNERS_PACKAGE]
+ self.scanners = metadata[ScanMetadataParser.SCANNERS]
+
+ def get_scanner_package(self):
+ return self.scanners_package
+
+ def get_scanner(self, scanner_type: str) -> dict:
+ return self.scanners.get(scanner_type)
diff --git a/app/install/calipso-installer.py b/app/install/calipso-installer.py
new file mode 100644
index 0000000..bccddae
--- /dev/null
+++ b/app/install/calipso-installer.py
@@ -0,0 +1,380 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+from pymongo import MongoClient, ReturnDocument
+from pymongo.errors import ConnectionFailure
+from urllib.parse import quote_plus
+import docker
+import argparse
+import dockerpycreds
+# note : not used, useful for docker api security if used
+import time
+import json
+
+
+class MongoComm:
+ # deals with communication from host/installer server to mongoDB, includes methods for future use
+ try:
+
+ def __init__(self, host, user, password, port):
+ self.uri = "mongodb://%s:%s@%s:%s/%s" % (
+ quote_plus(user), quote_plus(password), host, port, "calipso")
+ self.client = MongoClient(self.uri)
+
+ def find(self, coll, key, val):
+ collection = self.client.calipso[coll]
+ doc = collection.find({key: val})
+ return doc
+
+ def get(self, coll, doc_name):
+ collection = self.client.calipso[coll]
+ doc = collection.find_one({"name": doc_name})
+ return doc
+
+ def insert(self, coll, doc):
+ collection = self.client.calipso[coll]
+ doc_id = collection.insert(doc)
+ return doc_id
+
+ def remove_doc(self, coll, doc):
+ collection = self.client.calipso[coll]
+ collection.remove(doc)
+
+ def remove_coll(self, coll):
+ collection = self.client.calipso[coll]
+ collection.remove()
+
+ def find_update(self, coll, key, val, data):
+ collection = self.client.calipso[coll]
+ collection.find_one_and_update(
+ {key: val},
+ {"$set": data},
+ upsert=True
+ )
+
+ def update(self, coll, doc, upsert=False):
+ collection = self.client.calipso[coll]
+ doc_id = collection.update_one({'_id': doc['_id']},{'$set': doc}, upsert=upsert)
+ return doc_id
+
+ except ConnectionFailure:
+ print("MongoDB Server not available")
+
+
+DockerClient = docker.from_env() # using local host docker environment parameters
+
+# use the below example for installer against a remote docker host:
+# DockerClient = docker.DockerClient(base_url='tcp://korlev-calipso-testing.cisco.com:2375')
+
+
+def copy_file(filename):
+ c = MongoComm(args.hostname, args.dbuser, args.dbpassword, args.dbport)
+ txt = open('db/'+filename+'.json')
+ data = json.load(txt)
+ c.remove_coll(filename)
+ doc_id = c.insert(filename, data)
+ print("Copied", filename, "mongo doc_ids:\n\n", doc_id, "\n\n")
+ time.sleep(1)
+
+
+C_MONGO_CONFIG = "/local_dir/calipso_mongo_access.conf"
+H_MONGO_CONFIG = "/home/calipso/calipso_mongo_access.conf"
+PYTHONPATH = "/home/scan/calipso_prod/app"
+C_LDAP_CONFIG = "/local_dir/ldap.conf"
+H_LDAP_CONFIG = "/home/calipso/ldap.conf"
+
+# functions to check and start calipso containers:
+def start_mongo(dbport):
+ if not DockerClient.containers.list(all=True, filters={"name": "calipso-mongo"}):
+ print("\nstarting container calipso-mongo, please wait...\n")
+ image = DockerClient.images.list(all=True, name="korenlev/calipso:mongo")
+ if image:
+ print(image, "exists...not downloading...")
+ else:
+ print("image korenlev/calipso:mongo missing, hold on while downloading first...\n")
+ image = DockerClient.images.pull("korenlev/calipso:mongo")
+ print("Downloaded", image, "\n\n")
+ mongocontainer = DockerClient.containers.run('korenlev/calipso:mongo', detach=True, name="calipso-mongo",
+ ports={'27017/tcp': dbport, '28017/tcp': 28017},
+ restart_policy={"Name": "always"})
+ # wait a bit till mongoDB is up before starting to copy the json files from 'db' folder:
+ time.sleep(5)
+ enable_copy = input("create initial calipso DB ? (copy json files from 'db' folder to mongoDB -"
+ " 'c' to copy, 'q' to skip):")
+ if enable_copy == "c":
+ print("\nstarting to copy json files to mongoDB...\n\n")
+ print("-----------------------------------------\n\n")
+ time.sleep(1)
+ copy_file("attributes_for_hover_on_data")
+ copy_file("clique_constraints")
+ copy_file("clique_types")
+ copy_file("cliques")
+ copy_file("constants")
+ copy_file("environments_config")
+ copy_file("inventory")
+ copy_file("link_types")
+ copy_file("links")
+ copy_file("messages")
+ copy_file("meteor_accounts_loginServiceConfiguration")
+ copy_file("users")
+ copy_file("monitoring_config")
+ copy_file("monitoring_config_templates")
+ copy_file("network_agent_types")
+ copy_file("roles")
+ copy_file("scans")
+ copy_file("scheduled_scans")
+ copy_file("statistics")
+ copy_file("supported_environments")
+
+ # note : 'messages', 'roles', 'users' and some of the 'constants' are filled by calipso-ui at runtime
+ # some other docs are filled later by scanning, logging and monitoring
+ else:
+ return
+ else:
+ print("container named calipso-mongo already exists, please deal with it using docker...\n")
+ return
+
+
+def start_listen():
+ if not DockerClient.containers.list(all=True, filters={"name": "calipso-listen"}):
+ print("\nstarting container calipso-listen...\n")
+ image = DockerClient.images.list(all=True, name="korenlev/calipso:listen")
+ if image:
+ print(image, "exists...not downloading...")
+ else:
+ print("image korenlev/calipso:listen missing, hold on while downloading first...\n")
+ image = DockerClient.images.pull("korenlev/calipso:listen")
+ print("Downloaded", image, "\n\n")
+ listencontainer = DockerClient.containers.run('korenlev/calipso:listen', detach=True, name="calipso-listen",
+ ports={'22/tcp': 50022},
+ restart_policy={"Name": "always"},
+ environment=["PYTHONPATH=" + PYTHONPATH,
+ "MONGO_CONFIG=" + C_MONGO_CONFIG],
+ volumes={'/home/calipso': {'bind': '/local_dir', 'mode': 'rw'}})
+ else:
+ print("container named calipso-listen already exists, please deal with it using docker...\n")
+ return
+
+
+def start_ldap():
+ if not DockerClient.containers.list(all=True, filters={"name": "calipso-ldap"}):
+ print("\nstarting container calipso-ldap...\n")
+ image = DockerClient.images.list(all=True, name="korenlev/calipso:ldap")
+ if image:
+ print(image, "exists...not downloading...")
+ else:
+ print("image korenlev/calipso:ldap missing, hold on while downloading first...\n")
+ image = DockerClient.images.pull("korenlev/calipso:ldap")
+ print("Downloaded", image, "\n\n")
+ ldapcontainer = DockerClient.containers.run('korenlev/calipso:ldap', detach=True, name="calipso-ldap",
+ ports={'389/tcp': 389, '389/udp': 389},
+ restart_policy={"Name": "always"},
+ volumes={'/home/calipso/': {'bind': '/local_dir/', 'mode': 'rw'}})
+ else:
+ print("container named calipso-ldap already exists, please deal with it using docker...\n")
+ return
+
+
+def start_api():
+ if not DockerClient.containers.list(all=True, filters={"name": "calipso-api"}):
+ print("\nstarting container calipso-api...\n")
+ image = DockerClient.images.list(all=True, name="korenlev/calipso:api")
+ if image:
+ print(image, "exists...not downloading...")
+ else:
+ print("image korenlev/calipso:api missing, hold on while downloading first...\n")
+ image = DockerClient.images.pull("korenlev/calipso:api")
+ print("Downloaded", image, "\n\n")
+ apicontainer = DockerClient.containers.run('korenlev/calipso:api', detach=True, name="calipso-api",
+ ports={'8000/tcp': 8000, '22/tcp': 40022},
+ restart_policy={"Name": "always"},
+ environment=["PYTHONPATH=" + PYTHONPATH,
+ "MONGO_CONFIG=" + C_MONGO_CONFIG,
+ "LDAP_CONFIG=" + C_LDAP_CONFIG,
+ "LOG_LEVEL=DEBUG"],
+ volumes={'/home/calipso/': {'bind': '/local_dir/', 'mode': 'rw'}})
+ else:
+ print("container named calipso-api already exists, please deal with it using docker...\n")
+ return
+
+
+def start_scan():
+ if not DockerClient.containers.list(all=True, filters={"name": "calipso-scan"}):
+ print("\nstarting container calipso-scan...\n")
+ image = DockerClient.images.list(all=True, name="korenlev/calipso:scan")
+ if image:
+ print(image, "exists...not downloading...")
+ else:
+ print("image korenlev/calipso:scan missing, hold on while downloading first...\n")
+ image = DockerClient.images.pull("korenlev/calipso:scan")
+ print("Downloaded", image, "\n\n")
+ scancontainer = DockerClient.containers.run('korenlev/calipso:scan', detach=True, name="calipso-scan",
+ ports={'22/tcp': 30022},
+ restart_policy={"Name": "always"},
+ environment=["PYTHONPATH=" + PYTHONPATH,
+ "MONGO_CONFIG=" + C_MONGO_CONFIG],
+ volumes={'/home/calipso/': {'bind': '/local_dir/', 'mode': 'rw'}})
+ else:
+ print("container named calipso-scan already exists, please deal with it using docker...\n")
+ return
+
+
+def start_sensu():
+ if not DockerClient.containers.list(all=True, filters={"name": "calipso-sensu"}):
+ print("\nstarting container calipso-sensu...\n")
+ image = DockerClient.images.list(all=True, name="korenlev/calipso:sensu")
+ if image:
+ print(image, "exists...not downloading...")
+ else:
+ print("image korenlev/calipso:sensu missing, hold on while downloading first...\n")
+ image = DockerClient.images.pull("korenlev/calipso:sensu")
+ print("Downloaded", image, "\n\n")
+ sensucontainer = DockerClient.containers.run('korenlev/calipso:sensu', detach=True, name="calipso-sensu",
+ ports={'22/tcp': 20022, '3000/tcp': 3000, '4567/tcp': 4567,
+ '5671/tcp': 5671, '15672/tcp': 15672},
+ restart_policy={"Name": "always"},
+ environment=["PYTHONPATH=" + PYTHONPATH],
+ volumes={'/home/calipso/': {'bind': '/local_dir/', 'mode': 'rw'}})
+ else:
+ print("container named calipso-sensu already exists, please deal with it using docker...\n")
+ return
+
+
+def start_ui(host, dbuser, dbpassword, webport, dbport):
+ if not DockerClient.containers.list(all=True, filters={"name": "calipso-ui"}):
+ print("\nstarting container calipso-ui...\n")
+ image = DockerClient.images.list(all=True, name="korenlev/calipso:ui")
+ if image:
+ print(image, "exists...not downloading...")
+ else:
+ print("image korenlev/calipso:ui missing, hold on while downloading first...\n")
+ image = DockerClient.images.pull("korenlev/calipso:ui")
+ print("Downloaded", image, "\n\n")
+ uicontainer = DockerClient.containers.run('korenlev/calipso:ui', detach=True, name="calipso-ui",
+ ports={'3000/tcp': webport},
+ restart_policy={"Name": "always"},
+ environment=["ROOT_URL=http://{}:{}".format(host, str(webport)),
+ "MONGO_URL=mongodb://{}:{}@{}:{}/calipso".format
+ (dbuser, dbpassword, host, str(dbport)),
+ "LDAP_CONFIG=" + C_LDAP_CONFIG])
+ else:
+ print("container named calipso-ui already exists, please deal with it using docker...\n")
+ return
+
+
+# function to check and stop calipso containers:
+
+def container_stop(container_name):
+ if DockerClient.containers.list(all=True, filters={"name": container_name}):
+ print("fetching container name", container_name, "...\n")
+ c = DockerClient.containers.get(container_name)
+ if c.status != "running":
+ print(container_name, "is not running...")
+ time.sleep(1)
+ print("removing container name", c.name, "...\n")
+ c.remove()
+ else:
+ print("killing container name", c.name, "...\n")
+ c.kill()
+ time.sleep(1)
+ print("removing container name", c.name, "...\n")
+ c.remove()
+ else:
+ print("no container named", container_name, "found...")
+
+
+# parser for getting optional command arguments:
+parser = argparse.ArgumentParser()
+parser.add_argument("--hostname", help="Hostname or IP address of the server (default=172.17.0.1)",type=str,
+ default="172.17.0.1", required=False)
+parser.add_argument("--webport", help="Port for the Calipso WebUI (default=80)",type=int,
+ default="80", required=False)
+parser.add_argument("--dbport", help="Port for the Calipso MongoDB (default=27017)",type=int,
+ default="27017", required=False)
+parser.add_argument("--dbuser", help="User for the Calipso MongoDB (default=calipso)",type=str,
+ default="calipso", required=False)
+parser.add_argument("--dbpassword", help="Password for the Calipso MongoDB (default=calipso_default)",type=str,
+ default="calipso_default", required=False)
+args = parser.parse_args()
+
+container = ""
+action = ""
+container_names = ["all", "calipso-mongo", "calipso-scan", "calipso-listen", "calipso-ldap", "calipso-api",
+ "calipso-sensu", "calipso-ui"]
+container_actions = ["stop", "start"]
+while action not in container_actions:
+ action = input("Action? (stop, start, or 'q' to quit):\n")
+ if action == "q":
+ exit()
+while container not in container_names:
+ container = input("Container? (all, calipso-mongo, calipso-scan, calipso-listen, calipso-ldap, calipso-api, "
+ "calipso-sensu, calipso-ui or 'q' to quit):\n")
+ if container == "q":
+ exit()
+
+# starting the containers per arguments:
+if action == "start":
+ # building /home/calipso/calipso_mongo_access.conf and /home/calipso/ldap.conf files, per the arguments:
+ calipso_mongo_access_text = "server " + args.hostname + "\nuser " + args.dbuser + "\npassword " + \
+ args.dbpassword + "\nauth_db calipso"
+ ldap_text = "user admin" + "\npassword password" + "\nurl ldap://" + args.hostname + ":389" + \
+ "\nuser_id_attribute CN" + "\nuser_pass_attribute userpassword" + \
+ "\nuser_objectclass inetOrgPerson" + \
+ "\nuser_tree_dn OU=Users,DC=openstack,DC=org" + "\nquery_scope one" + \
+ "\ntls_req_cert allow" + \
+ "\ngroup_member_attribute member"
+ print("creating default", H_MONGO_CONFIG, "file...\n")
+ calipso_mongo_access_file = open(H_MONGO_CONFIG, "w+")
+ time.sleep(1)
+ calipso_mongo_access_file.write(calipso_mongo_access_text)
+ calipso_mongo_access_file.close()
+ print("creating default", H_LDAP_CONFIG, "file...\n")
+ ldap_file = open(H_LDAP_CONFIG, "w+")
+ time.sleep(1)
+ ldap_file.write(ldap_text)
+ ldap_file.close()
+
+ if container == "calipso-mongo" or container == "all":
+ start_mongo(args.dbport)
+ time.sleep(1)
+ if container == "calipso-listen" or container == "all":
+ start_listen()
+ time.sleep(1)
+ if container == "calipso-ldap" or container == "all":
+ start_ldap()
+ time.sleep(1)
+ if container == "calipso-api" or container == "all":
+ start_api()
+ time.sleep(1)
+ if container == "calipso-scan" or container == "all":
+ start_scan()
+ time.sleep(1)
+ if container == "calipso-sensu" or container == "all":
+ start_sensu()
+ time.sleep(1)
+ if container == "calipso-ui" or container == "all":
+ start_ui(args.hostname, args.dbuser, args.dbpassword, args.webport, args.dbport)
+ time.sleep(1)
+
+# stopping the containers per arguments:
+if action == "stop":
+ if container == "calipso-mongo" or container == "all":
+ container_stop("calipso-mongo")
+ if container == "calipso-listen" or container == "all":
+ container_stop("calipso-listen")
+ if container == "calipso-ldap" or container == "all":
+ container_stop("calipso-ldap")
+ if container == "calipso-api" or container == "all":
+ container_stop("calipso-api")
+ if container == "calipso-scan" or container == "all":
+ container_stop("calipso-scan")
+ if container == "calipso-sensu" or container == "all":
+ container_stop("calipso-sensu")
+ if container == "calipso-ui" or container == "all":
+ container_stop("calipso-ui")
diff --git a/app/install/calipso_mongo_access.conf.example b/app/install/calipso_mongo_access.conf.example
new file mode 100644
index 0000000..1b3377d
--- /dev/null
+++ b/app/install/calipso_mongo_access.conf.example
@@ -0,0 +1,4 @@
+server korlev-calipso-dev.cisco.com
+user calipso
+password calipso_default
+auth_db calipso
diff --git a/app/install/db/attributes_for_hover_on_data.json b/app/install/db/attributes_for_hover_on_data.json
new file mode 100644
index 0000000..6fdc5a3
--- /dev/null
+++ b/app/install/db/attributes_for_hover_on_data.json
@@ -0,0 +1,89 @@
+[
+{
+ "attributes" : [
+ "object_name",
+ "model",
+ "mac_address",
+ "type",
+ "koren"
+ ],
+ "type" : "vnic"
+},
+{
+ "attributes" : [
+ "object_name",
+ "connector_type",
+ "type",
+ "interfaces"
+ ],
+ "type" : "vconnector"
+},
+{
+ "attributes" : [
+ "object_name",
+ "host",
+ "service_type",
+ "type"
+ ],
+ "type" : "vservice"
+},
+{
+ "attributes" : [
+ "object_name",
+ "host",
+ "agent_type",
+ "binary",
+ "type"
+ ],
+ "type" : "vedge"
+},
+{
+ "attributes" : [
+ "object_name",
+ "host",
+ "mac_address",
+ "Speed",
+ "Link detected",
+ "type"
+ ],
+ "type" : "pnic"
+},
+{
+ "attributes" : [
+ "object_name",
+ "provider:segmentation_id",
+ "provider:network_type",
+ "type"
+ ],
+ "type" : "network"
+},
+{
+ "attributes" : [
+ "object_name",
+ "host_type",
+ "parent_id",
+ "type"
+ ],
+ "type" : "host"
+},
+{
+ "attributes" : [
+ "object_name",
+ "host",
+ "project",
+ "type",
+ "name_path"
+ ],
+ "type" : "instance"
+},
+{
+ "attributes" : [
+ "object_name",
+ "overlay_type",
+ "ip_address",
+ "type",
+ "ports"
+ ],
+ "type" : "otep"
+}
+]
diff --git a/app/install/db/clique_constraints.json b/app/install/db/clique_constraints.json
new file mode 100644
index 0000000..e317c3d
--- /dev/null
+++ b/app/install/db/clique_constraints.json
@@ -0,0 +1,20 @@
+[
+{
+ "focal_point_type" : "instance",
+ "constraints" : [
+ "network"
+ ]
+},
+{
+ "focal_point_type" : "vservice",
+ "constraints" : [
+ "network"
+ ]
+},
+{
+ "constraints" : [
+ "network"
+ ],
+ "focal_point_type" : "network"
+}
+]
diff --git a/app/install/db/clique_types.json b/app/install/db/clique_types.json
new file mode 100644
index 0000000..a2ef3c2
--- /dev/null
+++ b/app/install/db/clique_types.json
@@ -0,0 +1,56 @@
+[
+{
+ "environment" : "ANY",
+ "focal_point_type" : "instance",
+ "link_types" : [
+ "instance-vnic",
+ "vnic-vconnector",
+ "vconnector-vedge",
+ "vedge-otep",
+ "otep-vconnector",
+ "vconnector-pnic",
+ "pnic-network"
+ ],
+ "name" : "instance"
+},
+{
+ "environment" : "ANY",
+ "focal_point_type" : "pnic",
+ "link_types" : [
+ "pnic-host",
+ "host-network",
+ "network-switch_pnic",
+ "switch_pnic-switch"
+ ],
+ "name" : "pnic_clique"
+},
+{
+ "environment" : "ANY",
+ "focal_point_type" : "vservice",
+ "link_types" : [
+ "vservice-vnic",
+ "vnic-vedge",
+ "vedge-otep",
+ "otep-vconnector",
+ "vconnector-pnic",
+ "pnic-network"
+ ],
+ "name" : "vservice"
+},
+{
+ "environment" : "ANY",
+ "focal_point_type" : "network",
+ "link_types" : [
+ "network-pnic",
+ "pnic-vconnector",
+ "vconnector-otep",
+ "otep-vedge",
+ "vedge-vconnector",
+ "vedge-vnic",
+ "vconnector-vnic",
+ "vnic-instance",
+ "vnic-vservice"
+ ],
+ "name" : "network"
+}
+]
diff --git a/app/install/db/cliques.json b/app/install/db/cliques.json
new file mode 100644
index 0000000..be99137
--- /dev/null
+++ b/app/install/db/cliques.json
@@ -0,0 +1,3 @@
+{
+ "_id" : "xyz"
+}
diff --git a/app/install/db/constants.json b/app/install/db/constants.json
new file mode 100644
index 0000000..0521d69
--- /dev/null
+++ b/app/install/db/constants.json
@@ -0,0 +1,668 @@
+[
+{
+ "data" : [
+ {
+ "label" : "network",
+ "value" : "network"
+ }
+ ],
+ "name" : "constraints"
+},
+{
+ "data" : [
+ {
+ "label" : "Development",
+ "value" : "development"
+ },
+ {
+ "label" : "Testing",
+ "value" : "testing"
+ },
+ {
+ "label" : "Staging",
+ "value" : "staging"
+ },
+ {
+ "label" : "Production",
+ "value" : "production"
+ }
+ ],
+ "name" : "env_types"
+},
+{
+ "data" : [
+ {
+ "label" : "CRITICAL",
+ "value" : "critical"
+ },
+ {
+ "label" : "ERROR",
+ "value" : "error"
+ },
+ {
+ "label" : "WARNING",
+ "value" : "warning"
+ },
+ {
+ "label" : "INFO",
+ "value" : "info"
+ },
+ {
+ "label" : "DEBUG",
+ "value" : "debug"
+ },
+ {
+ "label" : "NOTSET",
+ "value" : "notset"
+ }
+ ],
+ "name" : "log_levels"
+},
+{
+ "data" : [
+ {
+ "label" : "OVS",
+ "value" : "OVS"
+ },
+ {
+ "label" : "VPP",
+ "value" : "VPP"
+ },
+ {
+ "label" : "LXB",
+ "value" : "LXB"
+ },
+ {
+ "label" : "Arista",
+ "value" : "Arista"
+ },
+ {
+ "label" : "Nexus",
+ "value" : "Nexus"
+ }
+ ],
+ "name" : "mechanism_drivers"
+},
+{
+ "data" : [
+ {
+ "label" : "local",
+ "value" : "local"
+ },
+ {
+ "label" : "vlan",
+ "value" : "vlan"
+ },
+ {
+ "label" : "vxlan",
+ "value" : "vxlan"
+ },
+ {
+ "label" : "gre",
+ "value" : "gre"
+ },
+ {
+ "label" : "flat",
+ "value" : "flat"
+ }
+ ],
+ "name" : "type_drivers"
+},
+{
+ "data" : [
+ {
+ "label" : "Sensu",
+ "value" : "Sensu"
+ }
+ ],
+ "name" : "environment_monitoring_types"
+},
+{
+ "data" : [
+ {
+ "label" : "up",
+ "value" : "up"
+ },
+ {
+ "label" : "down",
+ "value" : "down"
+ }
+ ],
+ "name" : "link_states"
+},
+{
+ "name" : "environment_provision_types",
+ "data" : [
+ {
+ "label" : "None",
+ "value" : "None"
+ },
+ {
+ "label" : "Deploy",
+ "value" : "Deploy"
+ },
+ {
+ "label" : "Files",
+ "value" : "Files"
+ },
+ {
+ "label" : "DB",
+ "value" : "DB"
+ }
+ ]
+},
+{
+ "name" : "environment_operational_status",
+ "data" : [
+ {
+ "value" : "stopped",
+ "label" : "stopped"
+ },
+ {
+ "value" : "running",
+ "label" : "running"
+ },
+ {
+ "value" : "error",
+ "label" : "error"
+ }
+ ]
+},
+{
+ "name" : "link_types",
+ "data" : [
+ {
+ "label" : "instance-vnic",
+ "value" : "instance-vnic"
+ },
+ {
+ "label" : "otep-vconnector",
+ "value" : "otep-vconnector"
+ },
+ {
+ "label" : "otep-pnic",
+ "value" : "otep-pnic"
+ },
+ {
+ "label" : "pnic-network",
+ "value" : "pnic-network"
+ },
+ {
+ "label" : "vedge-otep",
+ "value" : "vedge-otep"
+ },
+ {
+ "label" : "vnic-vconnector",
+ "value" : "vnic-vconnector"
+ },
+ {
+ "label" : "vconnector-pnic",
+ "value" : "vconnector-pnic"
+ },
+ {
+ "label" : "vnic-vedge",
+ "value" : "vnic-vedge"
+ },
+ {
+ "label" : "vconnector-vedge",
+ "value" : "vconnector-vedge"
+ },
+ {
+ "label" : "vedge-pnic",
+ "value" : "vedge-pnic"
+ },
+ {
+ "label" : "vservice-vnic",
+ "value" : "vservice-vnic"
+ },
+ {
+ "label" : "pnic-host",
+ "value" : "pnic-host"
+ },
+ {
+ "label" : "host-pnic",
+ "value" : "host-pnic"
+ },
+ {
+ "label" : "host-network",
+ "value" : "host-network"
+ },
+ {
+ "label" : "network-host",
+ "value" : "network-host"
+ },
+ {
+ "label" : "switch_pnic-network",
+ "value" : "switch_pnic-network"
+ },
+ {
+ "label" : "network-switch_pnic",
+ "value" : "network-switch_pnic"
+ },
+ {
+ "label" : "switch_pnic-switch",
+ "value" : "switch_pnic-switch"
+ },
+ {
+ "label" : "switch-switch_pnic",
+ "value" : "switch-switch_pnic"
+ }
+ ]
+},
+{
+ "name" : "monitoring_sides",
+ "data" : [
+ {
+ "label" : "client",
+ "value" : "client"
+ },
+ {
+ "label" : "server",
+ "value" : "server"
+ }
+ ]
+},
+{
+ "name" : "messages_severity",
+ "data" : [
+ {
+ "label" : "panic",
+ "value" : "panic"
+ },
+ {
+ "label" : "alert",
+ "value" : "alert"
+ },
+ {
+ "label" : "crit",
+ "value" : "crit"
+ },
+ {
+ "label" : "error",
+ "value" : "error"
+ },
+ {
+ "label" : "warn",
+ "value" : "warn"
+ },
+ {
+ "label" : "notice",
+ "value" : "notice"
+ },
+ {
+ "label" : "info",
+ "value" : "info"
+ },
+ {
+ "label" : "debug",
+ "value" : "debug"
+ }
+ ]
+},
+{
+ "name" : "object_types",
+ "data" : [
+ {
+ "label" : "vnic",
+ "value" : "vnic"
+ },
+ {
+ "label" : "vconnector",
+ "value" : "vconnector"
+ },
+ {
+ "label" : "vedge",
+ "value" : "vedge"
+ },
+ {
+ "label" : "instance",
+ "value" : "instance"
+ },
+ {
+ "label" : "vservice",
+ "value" : "vservice"
+ },
+ {
+ "label" : "pnic",
+ "value" : "pnic"
+ },
+ {
+ "label" : "network",
+ "value" : "network"
+ },
+ {
+ "label" : "port",
+ "value" : "port"
+ },
+ {
+ "label" : "otep",
+ "value" : "otep"
+ },
+ {
+ "label" : "agent",
+ "value" : "agent"
+ },
+ {
+ "label" : "host",
+ "value" : "host"
+ },
+ {
+ "label" : "switch_pnic",
+ "value" : "switch_pnic"
+ },
+ {
+ "label" : "switch",
+ "value" : "switch"
+ }
+ ]
+},
+{
+ "name" : "scans_statuses",
+ "data" : [
+ {
+ "value" : "draft",
+ "label" : "Draft"
+ },
+ {
+ "value" : "pending",
+ "label" : "Pending"
+ },
+ {
+ "value" : "running",
+ "label" : "Running"
+ },
+ {
+ "value" : "completed",
+ "label" : "Completed"
+ },
+ {
+ "value" : "failed",
+ "label" : "Failed"
+ },
+ {
+ "value" : "aborted",
+ "label" : "Aborted"
+ }
+ ]
+},
+{
+ "data" : [
+ {
+ "label" : "Mirantis-6.0",
+ "value" : "Mirantis-6.0"
+ },
+ {
+ "label" : "Mirantis-7.0",
+ "value" : "Mirantis-7.0"
+ },
+ {
+ "label" : "Mirantis-8.0",
+ "value" : "Mirantis-8.0"
+ },
+ {
+ "label" : "Mirantis-9.0",
+ "value" : "Mirantis-9.0"
+ },
+ {
+ "label" : "RDO-Mitaka",
+ "value" : "RDO-Mitaka"
+ },
+ {
+ "label" : "RDO-Liberty",
+ "value" : "RDO-Liberty"
+ },
+ {
+ "label" : "RDO-Juno",
+ "value" : "RDO-Juno"
+ },
+ {
+ "label" : "RDO-kilo",
+ "value" : "RDO-kilo"
+ },
+ {
+ "label" : "devstack-liberty",
+ "value" : "devstack-liberty"
+ },
+ {
+ "label" : "Canonical-icehouse",
+ "value" : "Canonical-icehouse"
+ },
+ {
+ "label" : "Canonical-juno",
+ "value" : "Canonical-juno"
+ },
+ {
+ "label" : "Canonical-liberty",
+ "value" : "Canonical-liberty"
+ },
+ {
+ "label" : "Canonical-mitaka",
+ "value" : "Canonical-mitaka"
+ },
+ {
+ "label" : "Apex-Mitaka",
+ "value" : "Apex-Mitaka"
+ },
+ {
+ "label" : "Devstack-Mitaka",
+ "value" : "Devstack-Mitaka"
+ },
+ {
+ "label" : "packstack-7.0.0-0.10.dev1682",
+ "value" : "packstack-7.0.0-0.10.dev1682"
+ },
+ {
+ "label" : "Stratoscale-v2.1.6",
+ "value" : "Stratoscale-v2.1.6"
+ },
+ {
+ "label" : "Mirantis-9.1",
+ "value" : "Mirantis-9.1"
+ }
+ ],
+ "name" : "distributions"
+},
+{
+ "name" : "message_source_systems",
+ "data" : [
+ {
+ "value" : "OpenStack",
+ "label" : "OpenStack"
+ },
+ {
+ "value" : "Calipso",
+ "label" : "Calipso"
+ },
+ {
+ "value" : "Sensu",
+ "label" : "Sensu"
+ }
+ ]
+},
+{
+ "name" : "object_types_for_links",
+ "data" : [
+ {
+ "label" : "vnic",
+ "value" : "vnic"
+ },
+ {
+ "label" : "vconnector",
+ "value" : "vconnector"
+ },
+ {
+ "label" : "vedge",
+ "value" : "vedge"
+ },
+ {
+ "label" : "instance",
+ "value" : "instance"
+ },
+ {
+ "label" : "vservice",
+ "value" : "vservice"
+ },
+ {
+ "label" : "pnic",
+ "value" : "pnic"
+ },
+ {
+ "label" : "network",
+ "value" : "network"
+ },
+ {
+ "label" : "port",
+ "value" : "port"
+ },
+ {
+ "label" : "otep",
+ "value" : "otep"
+ },
+ {
+ "label" : "agent",
+ "value" : "agent"
+ },
+ {
+ "label" : "host",
+ "value" : "host"
+ },
+ {
+ "label" : "switch_pnic",
+ "value" : "switch_pnic"
+ },
+ {
+ "label" : "switch",
+ "value" : "switch"
+ }
+ ]
+},
+{
+ "name" : "scan_object_types",
+ "data" : [
+ {
+ "label" : "vnic",
+ "value" : "vnic"
+ },
+ {
+ "label" : "vconnector",
+ "value" : "vconnector"
+ },
+ {
+ "label" : "vedge",
+ "value" : "vedge"
+ },
+ {
+ "label" : "instance",
+ "value" : "instance"
+ },
+ {
+ "label" : "vservice",
+ "value" : "vservice"
+ },
+ {
+ "label" : "pnic",
+ "value" : "pnic"
+ },
+ {
+ "label" : "network",
+ "value" : "network"
+ },
+ {
+ "label" : "port",
+ "value" : "port"
+ },
+ {
+ "label" : "otep",
+ "value" : "otep"
+ },
+ {
+ "label" : "agent",
+ "value" : "agent"
+ },
+ {
+ "value" : "availability_zone",
+ "label" : "availability_zone"
+ },
+ {
+ "value" : "regions_folder",
+ "label" : "regions_folder"
+ },
+ {
+ "value" : "instances_folder",
+ "label" : "instances_folder"
+ },
+ {
+ "value" : "pnics_folder",
+ "label" : "pnics_folder"
+ },
+ {
+ "value" : "vconnectors_folder",
+ "label" : "vconnectors_folder"
+ },
+ {
+ "value" : "vedges_folder",
+ "label" : "vedges_folder"
+ },
+ {
+ "value" : "ports_folder",
+ "label" : "ports_folder"
+ },
+ {
+ "value" : "aggregates_folder",
+ "label" : "aggregates_folder"
+ },
+ {
+ "value" : "vservices_folder",
+ "label" : "vservices_folder"
+ },
+ {
+ "value" : "vnics_folder",
+ "label" : "vnics_folder"
+ },
+ {
+ "value" : "network_agent",
+ "label" : "network_agent"
+ },
+ {
+ "value" : "project",
+ "label" : "project"
+ },
+ {
+ "value" : "projects_folder",
+ "label" : "projects_folder"
+ },
+ {
+ "value" : "aggregate",
+ "label" : "aggregate"
+ },
+ {
+ "value" : "network_agents_folder",
+ "label" : "network_agents_folder"
+ },
+ {
+ "value" : "host",
+ "label" : "host"
+ },
+ {
+ "value" : "region",
+ "label" : "region"
+ },
+ {
+ "value" : "host_ref",
+ "label" : "host_ref"
+ },
+ {
+ "value" : "network_services_folder",
+ "label" : "network_services_folder"
+ },
+ {
+ "label" : "switch_pnic",
+ "value" : "switch_pnic"
+ },
+ {
+ "label" : "switch",
+ "value" : "switch"
+ }
+ ]
+}
+]
diff --git a/app/install/db/environments_config.json b/app/install/db/environments_config.json
new file mode 100644
index 0000000..9e05687
--- /dev/null
+++ b/app/install/db/environments_config.json
@@ -0,0 +1,78 @@
+[
+{
+ "operational" : "stopped",
+ "listen" : true,
+ "configuration" : [
+ {
+ "name" : "OpenStack",
+ "admin_token" : "sadgsgsagsa",
+ "user" : "adminuser",
+ "port" : 5000,
+ "pwd" : "saggsgsasg",
+ "host" : "10.0.0.1"
+ },
+ {
+ "name" : "mysql",
+ "password" : "sgasdggddsgsd",
+ "port" : 3307,
+ "user" : "mysqluser",
+ "host" : "10.0.0.1"
+ },
+ {
+ "name" : "CLI",
+ "user" : "sshuser",
+ "pwd" : "sagsagsagsa",
+ "host" : "10.0.0.1"
+ },
+ {
+ "name" : "AMQP",
+ "password" : "sagssdgassgd",
+ "port" : 5673,
+ "user" : "rabbitmquser",
+ "host" : "10.0.0.1"
+ },
+ {
+ "rabbitmq_port" : 5671,
+ "ssh_user" : "root",
+ "server_name" : "sensu_server",
+ "env_type" : "production",
+ "provision" : "None",
+ "name" : "Monitoring",
+ "ssh_port" : 20022,
+ "rabbitmq_pass" : "sagsagss",
+ "ssh_password" : "calipsoasgsagdg",
+ "rabbitmq_user" : "sensu",
+ "config_folder" : "/local_dir/sensu_config",
+ "type" : "Sensu",
+ "server_ip" : "10.0.0.1",
+ "api_port" : 4567
+ },
+ {
+ "name" : "ACI",
+ "user" : "admin",
+ "pwd" : "Cisco123456",
+ "host" : "10.1.1.104"
+ }
+ ],
+ "enable_monitoring" : true,
+ "name" : "DEMO-ENVIRONMENT-SCHEME",
+ "distribution" : "Mirantis-8.0",
+ "last_scanned" : "filled-by-scanning",
+ "app_path" : "/home/scan/calipso_prod/app",
+ "scanned" : false,
+ "type_drivers" : "vxlan",
+ "mechanism_drivers" : [
+ "OVS"
+ ],
+ "user" : "wNLeBJxNDyw8G7Ssg",
+ "auth" : {
+ "edit-env" : [
+ "wNLeBJxNDyw8G7Ssg"
+ ],
+ "view-env" : [
+ "wNLeBJxNDyw8G7Ssg"
+ ]
+ },
+ "type" : "environment"
+}
+]
diff --git a/app/install/db/inventory.json b/app/install/db/inventory.json
new file mode 100644
index 0000000..be99137
--- /dev/null
+++ b/app/install/db/inventory.json
@@ -0,0 +1,3 @@
+{
+ "_id" : "xyz"
+}
diff --git a/app/install/db/link_types.json b/app/install/db/link_types.json
new file mode 100644
index 0000000..30a610c
--- /dev/null
+++ b/app/install/db/link_types.json
@@ -0,0 +1,184 @@
+[
+{
+ "user_id" : "WS7j8oTbWPf3LbNne",
+ "description" : "instance-vnic",
+ "endPointA" : "instance",
+ "endPointB" : "vnic",
+ "type" : "instance-vnic"
+},
+{
+ "user_id" : "WS7j8oTbWPf3LbNne",
+ "description" : "vnic-vconnector",
+ "endPointA" : "vnic",
+ "endPointB" : "vconnector",
+ "type" : "vnic-vconnector"
+},
+{
+ "user_id" : "WS7j8oTbWPf3LbNne",
+ "description" : "vconnector-vedge",
+ "endPointA" : "vconnector",
+ "endPointB" : "vedge",
+ "type" : "vconnector-vedge"
+},
+{
+ "user_id" : "WS7j8oTbWPf3LbNne",
+ "description" : "vedge-otep",
+ "endPointA" : "vedge",
+ "endPointB" : "otep",
+ "type" : "vedge-otep"
+},
+{
+ "user_id" : "WS7j8oTbWPf3LbNne",
+ "description" : "vconnector-pnic",
+ "endPointA" : "vconnector",
+ "endPointB" : "pnic",
+ "type" : "vconnector-pnic"
+},
+{
+ "user_id" : "WS7j8oTbWPf3LbNne",
+ "description" : "pnic-network",
+ "endPointA" : "pnic",
+ "endPointB" : "network",
+ "type" : "pnic-network"
+},
+{
+ "user_id" : "WS7j8oTbWPf3LbNne",
+ "description" : "otep-vconnector",
+ "endPointA" : "otep",
+ "endPointB" : "vconnector",
+ "type" : "otep-vconnector"
+},
+{
+ "user_id" : "WS7j8oTbWPf3LbNne",
+ "description" : "vnic-vedge",
+ "endPointA" : "vnic",
+ "endPointB" : "vedge",
+ "type" : "vnic-vedge"
+},
+{
+ "user_id" : "WS7j8oTbWPf3LbNne",
+ "description" : "network-pnic",
+ "endPointA" : "network",
+ "endPointB" : "pnic",
+ "type" : "network-pnic"
+},
+{
+ "user_id" : "WS7j8oTbWPf3LbNne",
+ "description" : "vedge-vconnector",
+ "endPointA" : "vedge",
+ "endPointB" : "vconnector",
+ "type" : "vedge-vconnector"
+},
+{
+ "user_id" : "WS7j8oTbWPf3LbNne",
+ "description" : "vconnector-vnic",
+ "endPointA" : "vconnector",
+ "endPointB" : "vnic",
+ "type" : "vconnector-vnic"
+},
+{
+ "user_id" : "WS7j8oTbWPf3LbNne",
+ "description" : "vnic-instance",
+ "endPointA" : "vnic",
+ "endPointB" : "instance",
+ "type" : "vnic-instance"
+},
+{
+ "user_id" : "WS7j8oTbWPf3LbNne",
+ "description" : "vnic-vservice",
+ "endPointA" : "vnic",
+ "endPointB" : "vservice",
+ "type" : "vnic-vservice"
+},
+{
+ "user_id" : "WS7j8oTbWPf3LbNne",
+ "description" : "vservice-vnic",
+ "endPointA" : "vservice",
+ "endPointB" : "vnic",
+ "type" : "vservice-vnic"
+},
+{
+ "user_id" : "WS7j8oTbWPf3LbNne",
+ "description" : "pnic-vconnector",
+ "endPointA" : "pnic",
+ "endPointB" : "vconnector",
+ "type" : "pnic-vconnector"
+},
+{
+ "user_id" : "WS7j8oTbWPf3LbNne",
+ "description" : "vconnector-otep",
+ "endPointA" : "vconnector",
+ "endPointB" : "otep",
+ "type" : "vconnector-otep"
+},
+{
+ "user_id" : "WS7j8oTbWPf3LbNne",
+ "description" : "otep-vedge",
+ "endPointA" : "otep",
+ "endPointB" : "vedge",
+ "type" : "otep-vedge"
+},
+{
+ "user_id" : "WS7j8oTbWPf3LbNne",
+ "description" : "vedge-vnic",
+ "endPointA" : "vedge",
+ "endPointB" : "vnic",
+ "type" : "vedge-vnic"
+},
+{
+ "user_id" : "WS7j8oTbWPf3LbNne",
+ "description" : "pnic-host",
+ "endPointA" : "pnic",
+ "endPointB" : "host",
+ "type" : "pnic-host"
+},
+{
+ "user_id" : "WS7j8oTbWPf3LbNne",
+ "description" : "host-pnic",
+ "endPointA" : "host",
+ "endPointB" : "pnic",
+ "type" : "host-pnic"
+},
+{
+ "user_id" : "WS7j8oTbWPf3LbNne",
+ "description" : "host-network",
+ "endPointA" : "host",
+ "endPointB" : "network",
+ "type" : "host-network"
+},
+{
+ "user_id" : "WS7j8oTbWPf3LbNne",
+ "description" : "network-host",
+ "endPointA" : "network",
+ "endPointB" : "host",
+ "type" : "network-host"
+},
+{
+ "user_id" : "WS7j8oTbWPf3LbNne",
+ "description" : "network-switch_pnic",
+ "endPointA" : "network",
+ "endPointB" : "switch_pnic",
+ "type" : "network-switch_pnic"
+},
+{
+ "user_id" : "WS7j8oTbWPf3LbNne",
+ "description" : "switch_pnic-network",
+ "endPointA" : "switch_pnic",
+ "endPointB" : "network",
+ "type" : "switch_pnic-network"
+},
+{
+ "user_id" : "WS7j8oTbWPf3LbNne",
+ "description" : "switch_pnic-switch",
+ "endPointA" : "switch_pnic",
+ "endPointB" : "switch",
+ "type" : "switch_pnic-switch"
+},
+{
+ "user_id" : "WS7j8oTbWPf3LbNne",
+ "description" : "switch-switch_pnic",
+ "endPointA" : "switch",
+ "endPointB" : "switch_pnic",
+ "type" : "switch-switch_pnic"
+}
+]
diff --git a/app/install/db/links.json b/app/install/db/links.json
new file mode 100644
index 0000000..be99137
--- /dev/null
+++ b/app/install/db/links.json
@@ -0,0 +1,3 @@
+{
+ "_id" : "xyz"
+}
diff --git a/app/install/db/messages.json b/app/install/db/messages.json
new file mode 100644
index 0000000..b6c27cc
--- /dev/null
+++ b/app/install/db/messages.json
@@ -0,0 +1,44 @@
+[
+{
+ "message" : "2017-07-03 09:16:47,563 INFO: scanning : type=vnic, parent: (type=vnics_folder, name=vNICs, id=72bda9ec-2d0d-424c-8558-88ec22f09c95-vnics)",
+ "related_object" : null,
+ "received_timestamp" : null,
+ "finished_timestamp" : null,
+ "level" : "info",
+ "display_context" : null,
+ "viewed" : false,
+ "id" : "17350.33407.563988",
+ "source_system" : "CALIPSO",
+ "related_object_type" : null,
+ "timestamp" : "2017-07-03T09:16:47.563988",
+ "environment" : null
+},
+{
+ "related_object_type" : null,
+ "finished_timestamp" : null,
+ "environment" : null,
+ "received_timestamp" : null,
+ "related_object" : null,
+ "source_system" : "CALIPSO",
+ "message" : "2017-07-04 13:38:41,431 INFO: Started EventManager with following configuration:\nMongo config file path: /local_dir/calipso_mongo_access.conf\nCollection: environments_config\nPolling interval: 5 second(s)",
+ "timestamp" : "2017-07-04T13:38:41.431470",
+ "id" : "17351.49121.431470",
+ "level" : "info",
+ "viewed" : false,
+ "display_context" : null
+},
+{
+ "display_context" : null,
+ "level" : "info",
+ "id" : "17351.49126.596498",
+ "environment" : null,
+ "finished_timestamp" : null,
+ "viewed" : false,
+ "message" : "2017-07-04 13:38:46,596 INFO: Started ScanManager with following configuration:\nMongo config file path: /local_dir/calipso_mongo_access.conf\nScans collection: scans\nEnvironments collection: environments_config\nPolling interval: 1 second(s)",
+ "timestamp" : "2017-07-04T13:38:46.596498",
+ "related_object" : null,
+ "received_timestamp" : null,
+ "related_object_type" : null,
+ "source_system" : "CALIPSO"
+}
+]
diff --git a/app/install/db/meteor_accounts_loginServiceConfiguration.json b/app/install/db/meteor_accounts_loginServiceConfiguration.json
new file mode 100644
index 0000000..be99137
--- /dev/null
+++ b/app/install/db/meteor_accounts_loginServiceConfiguration.json
@@ -0,0 +1,3 @@
+{
+ "_id" : "xyz"
+}
diff --git a/app/install/db/monitoring_config.json b/app/install/db/monitoring_config.json
new file mode 100644
index 0000000..be99137
--- /dev/null
+++ b/app/install/db/monitoring_config.json
@@ -0,0 +1,3 @@
+{
+ "_id" : "xyz"
+}
diff --git a/app/install/db/monitoring_config_templates.json b/app/install/db/monitoring_config_templates.json
new file mode 100644
index 0000000..2e6d9ba
--- /dev/null
+++ b/app/install/db/monitoring_config_templates.json
@@ -0,0 +1,378 @@
+[
+{
+ "side" : "client",
+ "order" : "1",
+ "config" : {
+ "rabbitmq" : {
+ "port" : "{rabbitmq_port}",
+ "vhost" : "/sensu",
+ "password" : "{rabbitmq_pass}",
+ "host" : "{server_ip}",
+ "user" : "{rabbitmq_user}",
+ "ssl" : {
+ "cert_chain_file" : "/etc/sensu/ssl/cert.pem",
+ "private_key_file" : "/etc/sensu/ssl/key.pem"
+ }
+ }
+ },
+ "monitoring_system" : "sensu",
+ "type" : "rabbitmq.json"
+},
+{
+ "side" : "client",
+ "order" : "1",
+ "config" : {
+ "transport" : {
+ "name" : "rabbitmq",
+ "reconnect_on_error" : true
+ }
+ },
+ "monitoring_system" : "sensu",
+ "type" : "transport.json"
+},
+{
+ "side" : "server",
+ "order" : "1",
+ "config" : {
+ "redis" : {
+ "port" : "6379",
+ "host" : "127.0.0.1"
+ }
+ },
+ "monitoring_system" : "sensu",
+ "type" : "redis.json"
+},
+{
+ "side" : "client",
+ "order" : "1",
+ "config" : {
+ "api" : {
+ "port" : 4567,
+ "host" : "{server_ip}"
+ },
+ "client" : {
+ "address" : "{client_name}",
+ "subscriptions" : [
+
+ ],
+ "name" : "{client_name}",
+ "environment" : "{env_name}"
+ }
+ },
+ "monitoring_system" : "sensu",
+ "type" : "client.json"
+},
+{
+ "side" : "server",
+ "order" : "1",
+ "config" : {
+ "transport" : {
+ "name" : "rabbitmq",
+ "reconnect_on_error" : true
+ }
+ },
+ "monitoring_system" : "sensu",
+ "type" : "transport.json"
+},
+{
+ "side" : "client",
+ "order" : "1",
+ "config" : {
+ "checks" : {
+ "{objtype}_{objid}_{portid}" : {
+ "interval" : 15,
+ "command" : "check_ping.py -c 10 -i 0.5 -p 4f532d444e41 -w 10 -s 256 -f {otep_src_ip} -t {otep_dest_ip} -W 1%/301.11/600 -C 10%/1020.12/2000",
+ "standalone" : true,
+ "type": "metric",
+ "subscribers" : [
+ "base"
+ ],
+ "handlers" : [
+ "default",
+ "file",
+ "osdna-monitor"
+ ]
+ }
+ }
+ },
+ "monitoring_system" : "sensu",
+ "type" : "client_check_otep.json"
+},
+{
+ "side" : "server",
+ "order" : "1",
+ "config" : {
+ "rabbitmq" : {
+ "port" : "{rabbitmq_port}",
+ "vhost" : "/sensu",
+ "password" : "{rabbitmq_pass}",
+ "host" : "{server_ip}",
+ "user" : "{rabbitmq_user}",
+ "ssl" : {
+ "cert_chain_file" : "/etc/sensu/ssl/cert.pem",
+ "private_key_file" : "/etc/sensu/ssl/key.pem"
+ }
+ }
+ },
+ "monitoring_system" : "sensu",
+ "type" : "rabbitmq.json"
+},
+{
+ "side" : "server",
+ "order" : "1",
+ "config" : {
+ "api" : {
+ "port" : 4567,
+ "host" : "{server_ip}",
+ "bind" : "0.0.0.0"
+ }
+ },
+ "monitoring_system" : "sensu",
+ "type" : "api.json"
+},
+{
+ "side" : "server",
+ "order" : "1",
+ "config" : {
+ "client" : {
+ "address" : "sensu-server",
+ "socket" : {
+ "port" : 3030,
+ "bind" : "127.0.0.1"
+ },
+ "subscriptions" : [
+ "dev",
+ "base",
+ "test"
+ ],
+ "name" : "{server_name}",
+ "environment" : "{env_type}"
+ },
+ "keepalive" : {
+ "handlers" : [
+ "default",
+ "file"
+ ]
+ }
+ },
+ "monitoring_system" : "sensu",
+ "type" : "client.json"
+},
+{
+ "side" : "server",
+ "order" : "1",
+ "config" : {
+ "filters" : {
+ "state_change_only" : {
+ "negate" : true,
+ "attributes" : {
+ "check" : {
+ "history" : "eval: value.last == value[-2]"
+ }
+ }
+
+ }
+ }
+ },
+ "monitoring_system" : "sensu",
+ "type" : "filters.json"
+},
+{
+ "side" : "server",
+ "order" : "1",
+ "config" : {
+ "handlers" : {
+ "osdna-monitor" : {
+ "timeout" : 20,
+ "command" : "PYTHONPATH={app_path} {app_path}/monitoring/handlers/monitor.py -m /local_dir/calipso_mongo_access.conf",
+ "type" : "pipe",
+ "filter" : "state_change_only"
+ },
+ "file" : {
+ "timeout" : 20,
+ "command" : "/etc/sensu/plugins/event-file.rb",
+ "type" : "pipe",
+ "filter" : "state_change_only"
+ }
+ }
+ },
+ "monitoring_system" : "sensu",
+ "type" : "handlers.json"
+},
+{
+ "type" : "client_check_vedge.json",
+ "side" : "client",
+ "condition" : {
+ "mechanism_drivers" : [
+ "VPP"
+ ]
+ },
+ "config" : {
+ "checks" : {
+ "{objtype}_{objid}" : {
+ "interval" : 15,
+ "command" : "check_vedge_vpp.py",
+ "standalone" : true,
+ "type": "metric",
+ "subscribers" : [
+ "base"
+ ],
+ "handlers" : [
+ "default",
+ "file",
+ "osdna-monitor"
+ ]
+ }
+ }
+ },
+ "monitoring_system" : "sensu",
+ "order" : "1"
+},
+{
+ "side" : "client",
+ "order" : "1",
+ "condition" : {
+ "mechanism_drivers" : [
+ "VPP"
+ ]
+ },
+ "config" : {
+ "checks" : {
+ "{objtype}_{vnictype}_{objid}" : {
+ "interval" : 15,
+ "command" : "check_vnic_vpp.py",
+ "standalone" : true,
+ "type": "metric",
+ "subscribers" : [
+ "base"
+ ],
+ "handlers" : [
+ "default",
+ "file",
+ "osdna-monitor"
+ ]
+ }
+ }
+ },
+ "monitoring_system" : "sensu",
+ "type" : "client_check_vnic.json"
+},
+{
+ "side" : "client",
+ "config" : {
+ "checks" : {
+ "{objtype}_{objid}" : {
+ "interval" : 15,
+ "command" : "check_vedge_ovs.py",
+ "standalone" : true,
+ "type": "metric",
+ "subscribers" : [
+ "base"
+ ],
+ "handlers" : [
+ "default",
+ "file",
+ "osdna-monitor"
+ ]
+ }
+ }
+ },
+ "type" : "client_check_vedge.json",
+ "condition" : {
+ "mechanism_drivers" : [
+ "OVS"
+ ]
+ },
+ "monitoring_system" : "sensu",
+ "order" : "1"
+},
+{
+ "side" : "client",
+ "order" : "1",
+ "condition" : {
+ "mechanism_drivers" : [
+ "OVS",
+ "LXB"
+ ]
+ },
+ "config" : {
+ "checks" : {
+ "link_{linktype}_{fromobjid}_{toobjid}" : {
+ "interval" : 15,
+ "command" : "check_vnic_vconnector.py {bridge} {mac_address}",
+ "standalone" : true,
+ "type": "metric",
+ "subscribers" : [
+ "base"
+ ],
+ "handlers" : [
+ "default",
+ "file",
+ "osdna-monitor"
+ ]
+ }
+ }
+ },
+ "monitoring_system" : "sensu",
+ "type" : "client_check_link_vnic-vconnector.json"
+},
+{
+ "side" : "client",
+ "order" : "1",
+ "condition" : {
+ "mechanism_drivers" : [
+ "VPP"
+ ]
+ },
+ "config" : {
+ "checks" : {
+ "{objtype}_{objid}" : {
+ "interval" : 15,
+ "command" : "check_pnic_vpp.py",
+ "standalone" : true,
+ "type": "metric",
+ "subscribers" : [
+ "base"
+ ],
+ "handlers" : [
+ "default",
+ "file",
+ "osdna-monitor"
+ ]
+ }
+ }
+ },
+ "monitoring_system" : "sensu",
+ "type" : "client_check_pnic.json"
+},
+{
+ "side" : "client",
+ "order" : "1",
+ "condition" : {
+ "mechanism_drivers" : [
+ "OVS",
+ "LXB"
+ ]
+ },
+ "config" : {
+ "checks" : {
+ "{objtype}_{objid}" : {
+ "interval" : 15,
+ "command" : "PYTHONPATH=/etc/sensu/plugins check_vservice.py {service_type} {local_service_id}",
+ "standalone" : true,
+ "type": "metric",
+ "subscribers" : [
+ "base"
+ ],
+ "handlers" : [
+ "default",
+ "file",
+ "osdna-monitor"
+ ]
+ }
+ }
+ },
+ "monitoring_system" : "sensu",
+ "type" : "client_check_vservice.json"
+}
+]
diff --git a/app/install/db/network_agent_types.json b/app/install/db/network_agent_types.json
new file mode 100644
index 0000000..ba01ef2
--- /dev/null
+++ b/app/install/db/network_agent_types.json
@@ -0,0 +1,52 @@
+[
+{
+ "folder_text" : "VPNs",
+ "description" : "VPN agent",
+ "type" : "vpn"
+},
+{
+ "folder_text" : "Firewalls",
+ "description" : "Firewall agent",
+ "type" : "firewall"
+},
+{
+ "folder_text" : "vEdges",
+ "description" : "L2 agent",
+ "type" : "vedge"
+},
+{
+ "folder_text" : "Gateways",
+ "description" : "L3 agent",
+ "type" : "router"
+},
+{
+ "folder_text" : "Metadata",
+ "description" : "Metadata agent",
+ "type" : "metadata"
+},
+{
+ "folder_text" : "Load-Balancers",
+ "description" : "Load Balancing agent",
+ "type" : "load_balancer"
+},
+{
+ "folder_text" : "vEdges",
+ "description" : "Open vSwitch agent",
+ "type" : "vedge"
+},
+{
+ "folder_text" : "vConnectors",
+ "description" : "Linux bridge agent",
+ "type" : "vconnector"
+},
+{
+ "folder_text" : "DHCP servers",
+ "description" : "DHCP agent",
+ "type" : "dhcp"
+},
+{
+ "folder_text" : "Orchestrators",
+ "description" : "Orchestrator",
+ "type" : "orchestrator"
+}
+]
diff --git a/app/install/db/roles.json b/app/install/db/roles.json
new file mode 100644
index 0000000..78a7c75
--- /dev/null
+++ b/app/install/db/roles.json
@@ -0,0 +1,26 @@
+[
+{
+ "_id" : "z5W5wQqTnSHMYxXRB",
+ "name" : "manage-users"
+},
+{
+ "_id" : "q3o6nbR3xY88KyCkt",
+ "name" : "manage-link-types"
+},
+{
+ "_id" : "odxGpkgQ3oexmXYhc",
+ "name" : "manage-clique-types"
+},
+{
+ "_id" : "RcEgj6kJnkPqpyJzR",
+ "name" : "manage-clique-constraints"
+},
+{
+ "_id" : "7gs3Fyow2Cryc4cdf",
+ "name" : "view-env"
+},
+{
+ "_id" : "DyhzqcNymdYgkzyie",
+ "name" : "edit-env"
+}
+]
diff --git a/app/install/db/scans.json b/app/install/db/scans.json
new file mode 100644
index 0000000..cb480d2
--- /dev/null
+++ b/app/install/db/scans.json
@@ -0,0 +1,24 @@
+[
+{
+ "status" : "completed",
+ "inventory" : "inventory",
+ "start_timestamp" : "2017-05-17T11:00:17.939+0000",
+ "scan_only_inventory" : false,
+ "scan_only_links" : false,
+ "submit_timestamp" : "2017-05-17T07:53:09.194+0000",
+ "log_level" : "warning",
+ "scan_only_cliques" : false,
+ "clear" : true,
+ "environment" : "Mirantis-Liberty"
+},
+{
+ "status" : "failed",
+ "scan_only_inventory" : false,
+ "scan_only_links" : false,
+ "submit_timestamp" : "2017-06-14T13:42:32.710+0000",
+ "log_level" : "info",
+ "scan_only_cliques" : false,
+ "clear" : true,
+ "environment" : "staging"
+}
+]
diff --git a/app/install/db/scheduled_scans.json b/app/install/db/scheduled_scans.json
new file mode 100644
index 0000000..8f0c516
--- /dev/null
+++ b/app/install/db/scheduled_scans.json
@@ -0,0 +1,43 @@
+[
+{
+ "clear" : true,
+ "environment" : "staging",
+ "freq" : "WEEKLY",
+ "log_level" : "warning",
+ "scan_only_cliques" : false,
+ "scan_only_inventory" : true,
+ "scan_only_links" : false,
+ "submit_timestamp" : "2017-07-02T13:46:29.206+0000"
+},
+{
+ "clear" : false,
+ "environment" : "staging",
+ "freq" : "WEEKLY",
+ "log_level" : "warning",
+ "scan_only_cliques" : false,
+ "scan_only_inventory" : true,
+ "scan_only_links" : false,
+ "submit_timestamp" : "2017-07-02T13:46:36.079+0000"
+},
+{
+ "clear" : false,
+ "environment" : "staging",
+ "freq" : "WEEKLY",
+ "log_level" : "warning",
+ "scan_only_cliques" : false,
+ "scan_only_inventory" : true,
+ "scan_only_links" : false,
+ "submit_timestamp" : "2017-07-02T13:46:41.004+0000"
+},
+{
+ "scan_only_inventory" : true,
+ "freq" : "WEEKLY",
+ "scan_only_links" : false,
+ "scan_only_cliques" : false,
+ "log_level" : "warning",
+ "environment" : "staging",
+ "submit_timestamp" : "2017-07-02T14:38:09.032+0000",
+ "scheduled_timestamp" : "2017-07-08T14:38:09.032+0000",
+ "clear" : true
+}
+]
diff --git a/app/install/db/statistics.json b/app/install/db/statistics.json
new file mode 100644
index 0000000..195e0ec
--- /dev/null
+++ b/app/install/db/statistics.json
@@ -0,0 +1,23 @@
+[
+{
+ "recordGenerationMillis" : 1487147277000,
+ "object_id" : "devstack-vpp2-VPP",
+ "sample_time" : "2017-02-15T08:15:10Z",
+ "ingressInterface" : 2,
+ "averageArrivalNanoSeconds" : 1487146510773984049,
+ "destinationMacAddress" : "fa:16:3e:5d:7b:ae",
+ "destination" : "iperf2",
+ "packetCount" : 2,
+ "source" : "iperf1",
+ "object_type" : "vedge",
+ "sourceMacAddress" : "fa:16:3e:58:64:c6",
+ "ethernetType" : 2048,
+ "egressInterface" : 3,
+ "environment" : "Devstack-VPP",
+ "flowType" : "L2",
+ "data_arrival_avg" : 1487146510,
+ "type" : "vedge_flows",
+ "averageThroughput" : 17280,
+ "hostIp" : "10.56.20.78"
+}
+]
diff --git a/app/install/db/supported_environments.json b/app/install/db/supported_environments.json
new file mode 100644
index 0000000..1214ef3
--- /dev/null
+++ b/app/install/db/supported_environments.json
@@ -0,0 +1,230 @@
+[
+{
+ "environment" : {
+ "distribution" : "Stratoscale-v2.1.6",
+ "mechanism_drivers" : "OVS",
+ "type_drivers" : "vlan"
+ },
+ "features" : {
+ "monitoring" : true,
+ "listening" : false,
+ "scanning" : true
+ }
+},
+{
+ "environment" : {
+ "distribution" : "Mirantis-6.0",
+ "mechanism_drivers" : "OVS",
+ "type_drivers" : "vxlan"
+ },
+ "features" : {
+ "monitoring" : true,
+ "listening" : true,
+ "scanning" : true
+ }
+},
+{
+ "environment" : {
+ "distribution" : "Mirantis-7.0",
+ "mechanism_drivers" : "OVS",
+ "type_drivers" : "vxlan"
+ },
+ "features" : {
+ "monitoring" : true,
+ "listening" : true,
+ "scanning" : true
+ }
+},
+{
+ "environment" : {
+ "distribution" : "Mirantis-8.0",
+ "mechanism_drivers" : "OVS",
+ "type_drivers" : "vxlan"
+ },
+ "features" : {
+ "monitoring" : true,
+ "listening" : true,
+ "scanning" : true
+ }
+},
+{
+ "environment" : {
+ "distribution" : "Mirantis-9.1",
+ "mechanism_drivers" : "OVS",
+ "type_drivers" : "vxlan"
+ },
+ "features" : {
+ "monitoring" : true,
+ "listening" : false,
+ "scanning" : true
+ }
+},
+{
+ "environment" : {
+ "distribution" : "RDO-Mitaka",
+ "mechanism_drivers" : "OVS",
+ "type_drivers" : "vxlan"
+ },
+ "features" : {
+ "monitoring" : true,
+ "listening" : true,
+ "scanning" : true
+ }
+},
+{
+ "environment" : {
+ "distribution" : "RDO-Liberty",
+ "mechanism_drivers" : "OVS",
+ "type_drivers" : "vxlan"
+ },
+ "features" : {
+ "monitoring" : true,
+ "listening" : true,
+ "scanning" : true
+ }
+},
+{
+ "environment" : {
+ "distribution" : "Mirantis-9.0",
+ "mechanism_drivers" : "OVS",
+ "type_drivers" : "vxlan"
+ },
+ "features" : {
+ "monitoring" : true,
+ "listening" : true,
+ "scanning" : true
+ }
+},
+{
+ "environment" : {
+ "distribution" : "Mirantis-9.0",
+ "mechanism_drivers" : "OVS",
+ "type_drivers" : "vlan"
+ },
+ "features" : {
+ "monitoring" : true,
+ "listening" : true,
+ "scanning" : true
+ }
+},
+{
+ "environment" : {
+ "distribution" : "Mirantis-8.0",
+ "mechanism_drivers" : "OVS",
+ "type_drivers" : "vlan"
+ },
+ "features" : {
+ "monitoring" : true,
+ "listening" : true,
+ "scanning" : true
+ }
+},
+{
+ "environment" : {
+ "distribution" : "Mirantis-6.0",
+ "mechanism_drivers" : "OVS",
+ "type_drivers" : "vlan"
+ },
+ "features" : {
+ "monitoring" : true,
+ "listening" : true,
+ "scanning" : true
+ }
+},
+{
+ "environment" : {
+ "distribution" : "Mirantis-7.0",
+ "mechanism_drivers" : "OVS",
+ "type_drivers" : "vlan"
+ },
+ "features" : {
+ "monitoring" : true,
+ "listening" : true,
+ "scanning" : true
+ }
+},
+{
+ "environment" : {
+ "distribution" : "Mirantis-9.1",
+ "mechanism_drivers" : "OVS",
+ "type_drivers" : "vlan"
+ },
+ "features" : {
+ "monitoring" : true,
+ "listening" : true,
+ "scanning" : true
+ }
+},
+{
+ "environment" : {
+ "distribution" : "RDO-Mitaka",
+ "mechanism_drivers" : "VPP",
+ "type_drivers" : "vxlan"
+ },
+ "features" : {
+ "monitoring" : true,
+ "listening" : true,
+ "scanning" : true
+ }
+},
+{
+ "environment" : {
+ "distribution" : "RDO-Mitaka",
+ "mechanism_drivers" : "VPP",
+ "type_drivers" : "vlan"
+ },
+ "features" : {
+ "monitoring" : true,
+ "listening" : true,
+ "scanning" : true
+ }
+},
+{
+ "environment" : {
+ "distribution" : "Devstack-Mitaka",
+ "mechanism_drivers" : "VPP",
+ "type_drivers" : "vlan"
+ },
+ "features" : {
+ "monitoring" : true,
+ "listening" : true,
+ "scanning" : true
+ }
+},
+{
+ "environment" : {
+ "distribution" : "Devstack-Mitaka",
+ "mechanism_drivers" : "VPP",
+ "type_drivers" : "vxlan"
+ },
+ "features" : {
+ "monitoring" : true,
+ "listening" : true,
+ "scanning" : true
+ }
+},
+{
+ "environment" : {
+ "distribution" : "RDO-Mitaka",
+ "mechanism_drivers" : "OVS",
+ "type_drivers" : "vlan"
+ },
+ "features" : {
+ "monitoring" : true,
+ "listening" : true,
+ "scanning" : true
+ }
+},
+{
+ "environment" : {
+ "distribution" : "RDO-Liberty",
+ "mechanism_drivers" : "OVS",
+ "type_drivers" : "vlan"
+ },
+ "features" : {
+ "monitoring" : true,
+ "listening" : true,
+ "scanning" : true
+ }
+}
+]
diff --git a/app/install/db/users.json b/app/install/db/users.json
new file mode 100644
index 0000000..a0ccdb3
--- /dev/null
+++ b/app/install/db/users.json
@@ -0,0 +1,51 @@
+[
+{
+ "_id" : "wNLeBJxNDyw8G7Ssg",
+ "createdAt" : "2017-06-22T18:20:43.963+0000",
+ "services" : {
+ "password" : {
+ "bcrypt" : "$2a$10$LRnem9Y5OIo0hYpqi8JY5.G7uMp1LKAyHiq.ha2xHWmbPRo7CJuUS"
+ },
+ "resume" : {
+ "loginTokens" : [
+ {
+ "when" : "2017-06-29T16:41:06.183+0000",
+ "hashedToken" : "x2WrmQXpX6slx1/y+ReTuTZAqmPFMCX1ZI+N9COkK7c="
+ },
+ {
+ "when" : "2017-07-02T08:22:34.591+0000",
+ "hashedToken" : "Ls5V0a2I90A5TWWYU8kIZ5ByJR/fK8Kt5R65ch/iPt8="
+ },
+ {
+ "when" : "2017-07-02T15:02:09.357+0000",
+ "hashedToken" : "tEHXx9BGQUATbrHEaQyXm693Dfe5mzf8QPM9zpGnykE="
+ },
+ {
+ "when" : "2017-07-03T06:34:38.405+0000",
+ "hashedToken" : "WiI6vfcQ9zAnMN8SqBmfrF14ndBVLAQzhpsf9DZarRA="
+ }
+ ]
+ }
+ },
+ "username" : "admin",
+ "emails" : [
+ {
+ "address" : "admin@example.com",
+ "verified" : false
+ }
+ ],
+ "profile" : {
+ "name" : "admin"
+ },
+ "roles" : {
+ "__global_roles__" : [
+ "manage-users",
+ "manage-link-types",
+ "manage-clique-types",
+ "manage-clique-constraints",
+ "view-env",
+ "edit-env"
+ ]
+ }
+}
+]
diff --git a/app/install/ldap.conf.example b/app/install/ldap.conf.example
new file mode 100644
index 0000000..b1798f7
--- /dev/null
+++ b/app/install/ldap.conf.example
@@ -0,0 +1,10 @@
+user admin
+password password
+url ldap://korlev-calipso-dev.cisco.com:389
+user_id_attribute CN
+user_pass_attribute userpassword
+user_objectclass inetOrgPerson
+user_tree_dn OU=Users,DC=openstack,DC=org
+query_scope one
+tls_req_cert allow
+group_member_attribute member
diff --git a/app/messages/message.py b/app/messages/message.py
new file mode 100644
index 0000000..03c9069
--- /dev/null
+++ b/app/messages/message.py
@@ -0,0 +1,65 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+from typing import Union
+
+from bson import ObjectId
+
+
+class Message:
+
+ LEVELS = ['info', 'warn', 'error']
+ DEFAULT_LEVEL = LEVELS[0]
+
+ def __init__(self,
+ msg_id: str,
+ msg: dict,
+ source: str,
+ env: str = None,
+ object_id: Union[str, ObjectId] = None,
+ display_context: Union[str, ObjectId] = None,
+ level: str = DEFAULT_LEVEL,
+ object_type: str = None,
+ ts: str = None,
+ received_ts: str = None,
+ finished_ts: str = None):
+ super().__init__()
+
+ if level and level.lower() in self.LEVELS:
+ self.level = level.lower()
+ else:
+ self.level = self.DEFAULT_LEVEL
+
+ self.id = msg_id
+ self.environment = env
+ self.source_system = source
+ self.related_object = object_id
+ self.related_object_type = object_type
+ self.display_context = display_context
+ self.message = msg
+ self.timestamp = ts if ts else received_ts
+ self.received_timestamp = received_ts
+ self.finished_timestamp = finished_ts
+ self.viewed = False
+
+ def get(self):
+ return {
+ "id": self.id,
+ "environment": self.environment,
+ "source_system": self.source_system,
+ "related_object": self.related_object,
+ "related_object_type": self.related_object_type,
+ "display_context": self.display_context,
+ "level": self.level,
+ "message": self.message,
+ "timestamp": self.timestamp,
+ "received_timestamp": self.received_timestamp,
+ "finished_timestamp": self.finished_timestamp,
+ "viewed": self.viewed
+ }
diff --git a/app/monitoring/__init__.py b/app/monitoring/__init__.py
new file mode 100644
index 0000000..1e85a2a
--- /dev/null
+++ b/app/monitoring/__init__.py
@@ -0,0 +1,10 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+
diff --git a/app/monitoring/checks/binary_converter.py b/app/monitoring/checks/binary_converter.py
new file mode 100644
index 0000000..4da1107
--- /dev/null
+++ b/app/monitoring/checks/binary_converter.py
@@ -0,0 +1,17 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+def binary2str(txt):
+ if not isinstance(txt, bytes):
+ return str(txt)
+ try:
+ s = txt.decode("utf-8")
+ except TypeError:
+ s = str(txt)
+ return s
diff --git a/app/monitoring/checks/check_interface.py b/app/monitoring/checks/check_interface.py
new file mode 100755
index 0000000..4140dfe
--- /dev/null
+++ b/app/monitoring/checks/check_interface.py
@@ -0,0 +1,50 @@
+#!/usr/bin/env python3
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+
+import re
+import sys
+import subprocess
+
+from binary_converter import binary2str
+
+
+if len(sys.argv) < 2:
+ print('name of interface must be specified')
+ exit(2)
+nic_name = str(sys.argv[1])
+
+rc = 0
+
+try:
+ out = subprocess.check_output(["ifconfig " + nic_name],
+ stderr=subprocess.STDOUT,
+ shell=True)
+ out = binary2str(out)
+ lines = out.splitlines()
+ line_number = 1
+ line = -1
+ while line_number < len(lines):
+ line = lines[line_number]
+ if ' BROADCAST ' in line:
+ break
+ line_number += 1
+ state_match = re.match('^\W+([A-Z]+)', line)
+ if not state_match:
+ rc = 2
+ print('Error: failed to find status in ifconfig output: ' + out)
+ else:
+ rc = 0 if state_match.group(1) == 'UP' else 2
+ print(out)
+except subprocess.CalledProcessError as e:
+ print("Error finding NIC {}: {}\n".format(nic_name, binary2str(e.output)))
+ rc = 2
+
+exit(rc)
diff --git a/app/monitoring/checks/check_ping.py b/app/monitoring/checks/check_ping.py
new file mode 100755
index 0000000..35e7234
--- /dev/null
+++ b/app/monitoring/checks/check_ping.py
@@ -0,0 +1,121 @@
+#!/usr/bin/env python3
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+
+import argparse
+import re
+import sys
+import subprocess
+
+from binary_converter import binary2str
+
+
+if len(sys.argv) < 2:
+ raise ValueError('destination address must be specified')
+
+
+def thresholds_string(string):
+ matches = re.match('\d+%/\d+([.]\d+)?/\d+([.]\d+)?', string)
+ if not matches:
+ msg = "%r is not a valid thresholds string" % string
+ raise argparse.ArgumentTypeError(msg)
+ return string
+
+
+def get_args():
+ # try to read scan plan from command line parameters
+ parser = argparse.ArgumentParser()
+
+ parser.add_argument("-W", "--warning", nargs="?",
+ type=thresholds_string,
+ default='1%/300/600',
+ help="warning thresholds: packet-loss "
+ "(%)/avg-rtt (ms)/max-rtt (ms)"
+ "(example: 1%/300ms/600ms)")
+ parser.add_argument("-C", "--critical", nargs="?",
+ type=thresholds_string,
+ default='10%/1000/2000',
+ help="critical thresholds: packet-loss "
+ "(%)/avg-rtt (ms)/max-rtt (ms)"
+ "(example: 1%/300ms/600ms)")
+ parser.add_argument("-f", "--source", nargs="?", type=str, default='',
+ help="source address")
+ parser.add_argument("-t", "--target", nargs="?", type=str, default='',
+ help="target address")
+ parser.add_argument("-c", "--count", nargs="?", type=int, default=5,
+ help="how many packets will be sent")
+ parser.add_argument("-i", "--interval", nargs="?", type=float, default=0.5,
+ help="seconds between sending each packet")
+ parser.add_argument("-p", "--pattern", nargs="?", type=str,
+ default='OS-DNA', help="pattern to pad packet with")
+ parser.add_argument("-w", "--wait", nargs="?", type=int, default=5,
+ help="seconds to wait for completion of all responses")
+ parser.add_argument("-s", "--packetsize", nargs="?", type=int, default=256,
+ help="size of packet vseconds to wait for completion "
+ "of all responses")
+ return parser.parse_args()
+
+args = get_args()
+
+if not args.target:
+ raise ValueError('target address must be specified')
+
+rc = 0
+
+try:
+ cmd = "ping -c {} -i {} -p {} -w {} -s {} {}{} {}".format(
+ args.count, args.interval,
+ args.pattern, args.wait,
+ args.packetsize,
+ '-I ' if args.source else '',
+ args.source, args.target)
+ out = subprocess.check_output([cmd],
+ stderr=subprocess.STDOUT,
+ shell=True)
+ out = binary2str(out)
+except subprocess.CalledProcessError as e:
+ print("Error doing ping: {}\n".format(binary2str(e.output)))
+
+# find packet loss data
+packet_loss_match = re.search('(\d+)[%] packet loss', out, re.M)
+if not packet_loss_match:
+ out += '\npacket loss data not found'
+ rc = 2
+
+# find rtt avg/max data
+rtt_results = None
+if rc < 2:
+ regexp = 'rtt min/avg/max/mdev = [0-9.]+/([0-9.]+)/([0-9.]+)/[0-9.]+ ms'
+ rtt_results = re.search(regexp, out, re.M)
+ if not rtt_results:
+ out += '\nrtt results not found'
+ rc = 2
+if rc < 2:
+ packet_loss = int(packet_loss_match.group(1))
+ avg_rtt = float(rtt_results.group(1))
+ max_rtt = float(rtt_results.group(2))
+ thresholds_regexp = r'(\d+)%/(\d+[.0-9]*)/(\d+[.0-9]*)'
+ warn_threshold_match = re.match(thresholds_regexp, args.warning)
+ critical_threshold_match = re.match(thresholds_regexp, args.critical)
+ packet_loss_warn = int(warn_threshold_match.group(1))
+ packet_loss_critical = int(critical_threshold_match.group(1))
+ avg_rtt_warn = float(warn_threshold_match.group(2))
+ avg_rtt_critical = float(critical_threshold_match.group(2))
+ max_rtt_warn = float(warn_threshold_match.group(3))
+ max_rtt_critical = float(critical_threshold_match.group(3))
+ if packet_loss > packet_loss_critical or avg_rtt >= avg_rtt_critical or \
+ max_rtt >= max_rtt_critical:
+ rc = 2
+ elif packet_loss > packet_loss_warn or avg_rtt >= avg_rtt_warn or \
+ max_rtt >= max_rtt_warn:
+ rc = 1
+
+print(out)
+exit(rc)
diff --git a/app/monitoring/checks/check_pnic_vpp.py b/app/monitoring/checks/check_pnic_vpp.py
new file mode 100755
index 0000000..942fdc2
--- /dev/null
+++ b/app/monitoring/checks/check_pnic_vpp.py
@@ -0,0 +1,53 @@
+#!/usr/bin/env python3
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+"""
+sudo vppctl show hardware-interfaces:
+
+take only the virtual interfaces, e.g. "VirtualEthernet0/0/0"
+Status: "OK" if "up" is detected in the interface line, CRITICAL otherwise
+
+return full text of "vppctl show hardware-interfaces"
+"""
+
+import re
+import subprocess
+
+from binary_converter import binary2str
+
+
+NAME_RE = '^[a-zA-Z]*GigabitEthernet'
+
+rc = 0
+
+try:
+ out = subprocess.check_output(["sudo vppctl show hardware-interfaces"],
+ stderr=subprocess.STDOUT,
+ shell=True)
+ out = binary2str(out)
+ lines = out.splitlines()
+ name_re = re.compile(NAME_RE)
+ matching_lines = [l for l in lines if name_re.search(l)]
+ matching_line = matching_lines[0] if matching_lines else None
+ if matching_line:
+ rc = 0 if "up" in matching_line.split() else 2
+ print('output from "vppctl show hardware-interfaces":\n{}'
+ .format(out))
+ else:
+ rc = 2
+ print('Error: failed to find pNic in output of '
+ '"vppctl show hardware-interfaces": {}'
+ .format(out))
+except subprocess.CalledProcessError as e:
+ print("Error running 'vppctl show hardware-interfaces': {}"
+ .format(binary2str(e.output)))
+ rc = 2
+
+exit(rc)
diff --git a/app/monitoring/checks/check_vedge_ovs.py b/app/monitoring/checks/check_vedge_ovs.py
new file mode 100755
index 0000000..849af66
--- /dev/null
+++ b/app/monitoring/checks/check_vedge_ovs.py
@@ -0,0 +1,43 @@
+#!/usr/bin/env python3
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+"""
+Check OVS vEdge health
+
+Run command:
+ps -aux | grep "\(ovs-vswitchd\|ovsdb-server\)"
+
+OK if for both ovs-vswitchd AND ovsdb-server processes we see '(healthy)'
+otherwise CRITICAL
+
+return full text output of the command
+"""
+
+import subprocess
+
+from binary_converter import binary2str
+
+
+rc = 0
+cmd = 'ps aux | grep "\(ovs-vswitchd\|ovsdb-server\): monitoring" | ' + \
+ 'grep -v grep'
+
+try:
+ out = subprocess.check_output([cmd], stderr=subprocess.STDOUT, shell=True)
+ out = binary2str(out)
+ lines = out.splitlines()
+ matching_lines = [l for l in lines if '(healthy)']
+ rc = 0 if len(matching_lines) == 2 else 2
+ print(out)
+except subprocess.CalledProcessError as e:
+ print("Error finding expected output: {}".format(binary2str(e.output)))
+ rc = 2
+
+exit(rc)
diff --git a/app/monitoring/checks/check_vedge_vpp.py b/app/monitoring/checks/check_vedge_vpp.py
new file mode 100755
index 0000000..346feae
--- /dev/null
+++ b/app/monitoring/checks/check_vedge_vpp.py
@@ -0,0 +1,50 @@
+#!/usr/bin/env python3
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+"""
+sudo vppctl show runtime:
+
+test 1: was the return value not null?
+test 2: is startup-config-process = done?
+1 and 2 = vedge status ok
+1 and not 2 = vedge status warning
+not 1 = vedge status critical
+
+return full text of "vppctl show runtime"
+"""
+
+import re
+import subprocess
+
+from binary_converter import binary2str
+
+
+rc = 0
+search_pattern = re.compile("^startup-config-process ")
+
+try:
+ out = subprocess.check_output(["sudo vppctl show runtime"],
+ stderr=subprocess.STDOUT,
+ shell=True)
+ out = binary2str(out)
+ lines = out.splitlines()
+ matching_lines = [l for l in lines if search_pattern.match(l)]
+ matching_line = matching_lines[0] if matching_lines else None
+ if matching_line and "done" in matching_line.split():
+ print(out)
+ else:
+ rc = 1
+ print('Error: failed to find status in ifconfig output: ' + out)
+except subprocess.CalledProcessError as e:
+ print("Error finding 'vppctl show runtime': {}"
+ .format(binary2str(e.output)))
+ rc = 2
+
+exit(rc)
diff --git a/app/monitoring/checks/check_vnic_vconnector.py b/app/monitoring/checks/check_vnic_vconnector.py
new file mode 100755
index 0000000..b0f96cd
--- /dev/null
+++ b/app/monitoring/checks/check_vnic_vconnector.py
@@ -0,0 +1,72 @@
+#!/usr/bin/env python3
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+
+# find status of vnic-vconnector link
+# vconnector object name defines name of bridge
+# use "brctl showmacs <bridge>", then look for the MAC address
+
+import re
+import sys
+import subprocess
+
+from binary_converter import binary2str
+
+
+if len(sys.argv) < 3:
+ print('usage: ' + sys.argv[0] + ' <bridge> <mac_address>')
+ exit(2)
+bridge_name = str(sys.argv[1])
+mac_address = str(sys.argv[2])
+
+rc = 0
+
+try:
+ out = subprocess.check_output(["brctl showmacs " + bridge_name],
+ stderr=subprocess.STDOUT,
+ shell=True)
+ out = binary2str(out)
+ lines = out.splitlines()
+ line_number = 1
+ line = ''
+ found = False
+ while line_number < len(lines):
+ line = lines[line_number]
+ if mac_address in line:
+ found = True
+ break
+ line_number += 1
+ state_match = re.match('^\W+([A-Z]+)', line)
+ if not found:
+ rc = 2
+ print('Error: failed to find MAC {}:\n{}\n'
+ .format(mac_address, out))
+ else:
+ # grab "is local?" and "ageing timer" values
+ line_parts = line.split() # port, mac address, is local?, ageing timer
+ is_local = line_parts[2]
+ ageing_timer = line_parts[3]
+ msg_format =\
+ 'vConnector bridge name: {}\n'\
+ 'vNIC MAC address: {}\n'\
+ 'is local: {}\n'\
+ 'ageing timer: {}\n'\
+ 'vNIC MAC address: {}\n'\
+ 'command: brctl showmacs {}\n'\
+ 'output:\n{}'
+ msg = msg_format.format(bridge_name, mac_address, is_local,
+ ageing_timer, mac_address, bridge_name, out)
+ print(msg)
+except subprocess.CalledProcessError as e:
+ print("Error finding MAC {}: {}\n"
+ .format(mac_address, binary2str(e.output)))
+ rc = 2
+
+exit(rc)
diff --git a/app/monitoring/checks/check_vnic_vpp.py b/app/monitoring/checks/check_vnic_vpp.py
new file mode 100755
index 0000000..0f77ddd
--- /dev/null
+++ b/app/monitoring/checks/check_vnic_vpp.py
@@ -0,0 +1,48 @@
+#!/usr/bin/env python3
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+"""
+sudo vppctl show hardware-interfaces:
+
+take only the virtual interfaces, e.g. "VirtualEthernet0/0/0"
+Status: "OK" if "up" is detected in the interface line, CRITICAL otherwise
+
+return full text of "vppctl show hardware-interfaces"
+"""
+
+import re
+import subprocess
+
+from binary_converter import binary2str
+
+rc = 0
+search_pattern = re.compile("^Virtual")
+
+try:
+ out = subprocess.check_output(["sudo vppctl show hardware-interfaces"],
+ stderr=subprocess.STDOUT,
+ shell=True)
+ out = binary2str(out)
+ lines = out.splitlines()
+ matching_lines = [l for l in lines if search_pattern.match(l)]
+ matching_line = matching_lines[0] if matching_lines else None
+ if matching_line and "up" in matching_line.split():
+ print('output of "vppctl show hardware-interfaces":\n{}'
+ .format(out))
+ else:
+ rc = 2
+ print('Error: failed to find status in output of '
+ '"vppctl show hardware-interfaces": {}'.format(out))
+except subprocess.CalledProcessError as e:
+ print("Error finding 'vppctl show hardware-interfaces': {}"
+ .format(binary2str(e.output)))
+ rc = 2
+
+exit(rc)
diff --git a/app/monitoring/checks/check_vservice.py b/app/monitoring/checks/check_vservice.py
new file mode 100644
index 0000000..a95a46a
--- /dev/null
+++ b/app/monitoring/checks/check_vservice.py
@@ -0,0 +1,82 @@
+#!/usr/bin/env python3
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+
+"""
+for vservice with type T and id X
+run on the corresponding host:
+ip netns pid X
+response is pid(s), for example:
+32075
+
+For DHCP there are multiple pid, we will take the dnsmasq process
+
+then run :
+ps -uf -p 32075
+
+get STAT - "S" and "R" = OK
+"""
+
+import subprocess
+import sys
+
+from binary_converter import binary2str
+
+
+rc = 0
+
+args = sys.argv
+if len(args) < 3:
+ print('usage: check_vservice.py <vService type> <vService ID>')
+ exit(2)
+
+vservice_type = args[1]
+vservice_id = args[2]
+netns_cmd = 'sudo ip netns pid {}'.format(vservice_id)
+pid = ''
+ps_cmd = ''
+try:
+ out = subprocess.check_output([netns_cmd], stderr=subprocess.STDOUT,
+ shell=True)
+ out = binary2str(out)
+ lines = out.splitlines()
+ if not lines:
+ print('no matching vservice: {}\ncommand: {}\noutput: {}'
+ .format(vservice_id, netns_cmd, out))
+ exit(2)
+ pid = lines[0]
+except subprocess.CalledProcessError as e:
+ print("Error running '{}': {}"
+ .format(netns_cmd, binary2str(e.output)))
+ exit(2)
+try:
+ ps_cmd = 'ps -uf -p {}'.format(pid)
+ out = subprocess.check_output([ps_cmd], stderr=subprocess.STDOUT,
+ shell=True)
+ ps_out = binary2str(out)
+ lines = ps_out.splitlines()
+ if not lines:
+ print('no matching vservice: {}\noutput of {}:\n{}'
+ .format(vservice_id, netns_cmd, out))
+ exit(2)
+ headers = lines[0].split()
+ lines = lines[1:]
+ if vservice_type == 'dhcp' and len(lines) > 1:
+ lines = [line for line in lines if 'dnsmasq' in line]
+ values = lines[0].split()
+ stat_index = headers.index('STAT')
+ status = values[stat_index]
+ rc = 0 if status in ['S', 'R'] else 2
+ print('{}\n{}\n{}'.format(netns_cmd, ps_cmd, ps_out))
+except subprocess.CalledProcessError as e:
+ print("Error running '{}': {}".format(ps_cmd, binary2str(e.output)))
+ rc = 2
+
+exit(rc)
diff --git a/app/monitoring/handlers/__init__.py b/app/monitoring/handlers/__init__.py
new file mode 100644
index 0000000..1e85a2a
--- /dev/null
+++ b/app/monitoring/handlers/__init__.py
@@ -0,0 +1,10 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+
diff --git a/app/monitoring/handlers/basic_check_handler.py b/app/monitoring/handlers/basic_check_handler.py
new file mode 100644
index 0000000..7c945e8
--- /dev/null
+++ b/app/monitoring/handlers/basic_check_handler.py
@@ -0,0 +1,25 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+# handle monitoring event for VPP vEdge objects
+
+from monitoring.handlers.monitoring_check_handler import MonitoringCheckHandler
+
+
+class BasicCheckHandler(MonitoringCheckHandler):
+
+ def __init__(self, args):
+ super().__init__(args)
+
+ def handle(self, id, check_result):
+ doc = self.doc_by_id(id)
+ if not doc:
+ return 1
+ self.keep_result(doc, check_result)
+ return check_result['status']
diff --git a/app/monitoring/handlers/handle_link.py b/app/monitoring/handlers/handle_link.py
new file mode 100644
index 0000000..26f4d12
--- /dev/null
+++ b/app/monitoring/handlers/handle_link.py
@@ -0,0 +1,36 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+# handle monitoring event for links
+
+from monitoring.handlers.monitoring_check_handler import MonitoringCheckHandler
+
+
+class HandleLink(MonitoringCheckHandler):
+
+ def __init__(self, args):
+ super().__init__(args)
+
+ def handle(self, link_id_from_check, check_result):
+ # link ID from check is formatted like this:
+ # <link type>_<source_id>_<target_id>
+ link_type = link_id_from_check[:link_id_from_check.index('_')]
+ remainder = link_id_from_check[len(link_type)+1:]
+ source_id = remainder[:remainder.index('_')]
+ target_id = remainder[len(source_id)+1:]
+ search = {
+ 'link_type': link_type,
+ 'source_id': source_id,
+ 'target_id': target_id
+ }
+ doc = self.inv.find_items(search, collection='links', get_single=True)
+ if not doc:
+ return 1
+ self.keep_result(doc, check_result)
+ return check_result['status']
diff --git a/app/monitoring/handlers/handle_otep.py b/app/monitoring/handlers/handle_otep.py
new file mode 100644
index 0000000..0189625
--- /dev/null
+++ b/app/monitoring/handlers/handle_otep.py
@@ -0,0 +1,48 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+# handle monitoring event for OTEP objects
+
+from monitoring.handlers.monitoring_check_handler import MonitoringCheckHandler
+
+
+class HandleOtep(MonitoringCheckHandler):
+
+ def __init__(self, args):
+ super().__init__(args)
+
+ def handle(self, id, check_result):
+ object_id = id[:id.index('_')]
+ port_id = id[id.index('_')+1:]
+ doc = self.doc_by_id(object_id)
+ if not doc:
+ return 1
+ ports = doc['ports']
+ port = ports[port_id]
+ if not port:
+ self.log.error('Port not found: ' + port_id)
+ return 1
+ status = check_result['status']
+ port['status'] = self.STATUS_LABEL[status]
+ port['status_value'] = status
+ port['status_text'] = check_result['output']
+
+ # set object status based on overall state of ports
+ status_list = [p['status'] for p in ports.values() if 'status' in p]
+ # OTEP overall status:
+ # - Critical if no port is OK
+ # - Warning if some ports not OK
+ # - otherwise OK
+ status = \
+ 2 if 'OK' not in status_list \
+ else 1 if 'Critical' in status_list or 'Warning' in status_list \
+ else 0
+ self.set_doc_status(doc, status, None, self.check_ts(check_result))
+ self.keep_message(doc, check_result)
+ return status
diff --git a/app/monitoring/handlers/handle_pnic.py b/app/monitoring/handlers/handle_pnic.py
new file mode 100644
index 0000000..934bb16
--- /dev/null
+++ b/app/monitoring/handlers/handle_pnic.py
@@ -0,0 +1,29 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+# handle monitoring event for pNIC objects
+
+from monitoring.handlers.monitoring_check_handler import MonitoringCheckHandler
+
+class HandlePnic(MonitoringCheckHandler):
+
+ def __init__(self, args):
+ super().__init__(args)
+
+ def handle(self, id, check_result):
+ object_id = id[:id.index('-')]
+ mac = id[id.index('-')+1:]
+ mac_address = '%s:%s:%s:%s:%s:%s' % \
+ (mac[0:2], mac[2:4], mac[4:6], mac[6:8], mac[8:10], mac[10:12])
+ object_id += '-' + mac_address
+ doc = self.doc_by_id(object_id)
+ if not doc:
+ return 1
+ self.keep_result(doc, check_result)
+ return check_result['status']
diff --git a/app/monitoring/handlers/handle_pnic_vpp.py b/app/monitoring/handlers/handle_pnic_vpp.py
new file mode 100644
index 0000000..47a76e5
--- /dev/null
+++ b/app/monitoring/handlers/handle_pnic_vpp.py
@@ -0,0 +1,28 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+# handle monitoring event for VPP vEdge objects
+
+from monitoring.handlers.monitoring_check_handler import MonitoringCheckHandler
+
+
+class HandlePnicVpp(MonitoringCheckHandler):
+
+ def __init__(self, args):
+ super().__init__(args)
+
+ def handle(self, id, check_result):
+ id = self.decode_special_characters(id)
+ pnic = self.doc_by_id(id)
+ if not pnic:
+ return 1
+ self.keep_result(pnic, check_result)
+ # in vEdge object in corresponding port name, set attributes:
+ # "status", "status_timestamp", "status_text"
+ return check_result['status']
diff --git a/app/monitoring/handlers/handle_vnic_vpp.py b/app/monitoring/handlers/handle_vnic_vpp.py
new file mode 100644
index 0000000..c7d234d
--- /dev/null
+++ b/app/monitoring/handlers/handle_vnic_vpp.py
@@ -0,0 +1,28 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+# handle monitoring event for VPP vEdge objects
+
+from monitoring.handlers.monitoring_check_handler import MonitoringCheckHandler
+
+
+class HandleVnicVpp(MonitoringCheckHandler):
+
+ def __init__(self, args):
+ super().__init__(args)
+
+ def handle(self, id, check_result):
+ is_instance_vnic = id.startswith('instance_vnic')
+ vnic_type = 'instance_vnic' if is_instance_vnic else 'vservice_vnic'
+ id = self.decode_special_characters(id[len(vnic_type)+1:])
+ doc = self.doc_by_id(id)
+ if not doc:
+ return 1
+ self.keep_result(doc, check_result)
+ return check_result['status']
diff --git a/app/monitoring/handlers/monitor.py b/app/monitoring/handlers/monitor.py
new file mode 100755
index 0000000..e147a7d
--- /dev/null
+++ b/app/monitoring/handlers/monitor.py
@@ -0,0 +1,92 @@
+#!/usr/bin/env python3
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+
+# handle monitoring events
+
+import argparse
+import json
+import sys
+
+from utils.mongo_access import MongoAccess
+from utils.util import ClassResolver
+
+DEFAULTS = {
+ 'env': 'WebEX-Mirantis@Cisco',
+ 'inventory': 'inventory',
+ 'loglevel': 'WARNING'
+}
+
+
+
+def get_args():
+ parser = argparse.ArgumentParser()
+ parser.add_argument("-m", "--mongo_config", nargs="?", type=str,
+ default="",
+ help="name of config file with MongoDB server " +
+ "access details")
+ parser.add_argument("-e", "--env", nargs="?", type=str,
+ default=DEFAULTS['env'],
+ help="name of environment to scan \n" +
+ "(default: {})".format(DEFAULTS['env']))
+ parser.add_argument("-y", "--inventory", nargs="?", type=str,
+ default=DEFAULTS['inventory'],
+ help="name of inventory collection \n" +
+ "(default: {}".format(DEFAULTS['inventory']))
+ parser.add_argument('-i', '--inputfile', nargs='?', type=str,
+ default='',
+ help="read input from the specifed file \n" +
+ "(default: from stdin)")
+ parser.add_argument("-l", "--loglevel", nargs="?", type=str,
+ default=DEFAULTS["loglevel"],
+ help="logging level \n(default: '{}')"
+ .format(DEFAULTS["loglevel"]))
+ args = parser.parse_args()
+ return args
+
+input = None
+args = get_args()
+MongoAccess.set_config_file(args.mongo_config)
+if args.inputfile:
+ try:
+ with open(args.inputfile, 'r') as input_file:
+ input = input_file.read()
+ except Exception as e:
+ raise FileNotFoundError("failed to open input file: " + args.inputfile)
+ exit(1)
+else:
+ input = sys.stdin.read()
+ if not input:
+ raise ValueError("No input provided on stdin")
+ exit(1)
+
+check_result_full = json.loads(input)
+check_client = check_result_full['client']
+check_result = check_result_full['check']
+check_result['id'] = check_result_full['id']
+name = check_result['name']
+status = check_result['status']
+object_type = name[:name.index('_')]
+object_id = name[name.index('_')+1:]
+if 'environment' in check_client:
+ args.env = check_client['environment']
+
+handler = None
+basic_handling_types = ['vedge', 'vservice']
+if object_type in basic_handling_types:
+ from monitoring.handlers.basic_check_handler import BasicCheckHandler
+ handler = BasicCheckHandler(args)
+else:
+ module_name = 'handle_' + object_type
+ handler = ClassResolver.get_instance_single_arg(args,
+ module_name=module_name,
+ package_name='monitoring.handlers')
+if handler:
+ handler.handle(object_id, check_result)
diff --git a/app/monitoring/handlers/monitoring_check_handler.py b/app/monitoring/handlers/monitoring_check_handler.py
new file mode 100644
index 0000000..51769ab
--- /dev/null
+++ b/app/monitoring/handlers/monitoring_check_handler.py
@@ -0,0 +1,94 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+# handle monitoring event
+import datetime
+import sys
+from time import gmtime, strftime
+
+from bson import ObjectId
+
+from discover.configuration import Configuration
+from messages.message import Message
+from utils.inventory_mgr import InventoryMgr
+from utils.logging.full_logger import FullLogger
+from utils.special_char_converter import SpecialCharConverter
+from utils.string_utils import stringify_datetime
+
+TIME_FORMAT = '%Y-%m-%d %H:%M:%S %Z'
+SOURCE_SYSTEM = 'Sensu'
+ERROR_LEVEL = ['info', 'warn', 'error']
+
+
+class MonitoringCheckHandler(SpecialCharConverter):
+ STATUS_LABEL = ['OK', 'Warning', 'Critical']
+
+ def __init__(self, args):
+ super().__init__()
+ self.log = FullLogger()
+ self.log.set_loglevel(args.loglevel)
+ self.env = args.env
+ try:
+ self.conf = Configuration(args.mongo_config)
+ self.inv = InventoryMgr()
+ self.inv.log.set_loglevel(args.loglevel)
+ self.inv.set_collections(args.inventory)
+ except FileNotFoundError:
+ sys.exit(1)
+
+ def doc_by_id(self, object_id):
+ doc = self.inv.get_by_id(self.env, object_id)
+ if not doc:
+ self.log.warn('No matching object found with ID: ' + object_id)
+ return doc
+
+ def doc_by_db_id(self, db_id, coll_name=None):
+ coll = self.inv.collections[coll_name] if coll_name else None
+ doc = self.inv.find({'_id': ObjectId(db_id)},
+ get_single=True, collection=coll)
+ if not doc:
+ self.log.warn('No matching object found with DB ID: ' + db_id)
+ return doc
+
+ def set_doc_status(self, doc, status, status_text, timestamp):
+ doc['status'] = self.STATUS_LABEL[status] if isinstance(status, int) \
+ else status
+ if status_text:
+ doc['status_text'] = status_text
+ doc['status_timestamp'] = strftime(TIME_FORMAT, timestamp)
+ if 'link_type' in doc:
+ self.inv.write_link(doc)
+ else:
+ self.inv.set(doc)
+
+ @staticmethod
+ def check_ts(check_result):
+ return gmtime(check_result['executed'])
+
+ def keep_result(self, doc, check_result):
+ status = check_result['status']
+ ts = self.check_ts(check_result)
+ self.set_doc_status(doc, status, check_result['output'], ts)
+ self.keep_message(doc, check_result)
+
+ def keep_message(self, doc, check_result, error_level=None):
+ msg_id = check_result['id']
+ obj_id = doc['id']
+ display_context = doc['network_id'] if doc['type'] == 'port'\
+ else doc['id']
+ level = error_level if error_level\
+ else ERROR_LEVEL[check_result['status']]
+ dt = datetime.datetime.utcfromtimestamp(check_result['executed'])
+ ts = stringify_datetime(dt)
+ message = Message(msg_id=msg_id, env=self.env, source=SOURCE_SYSTEM,
+ object_id=obj_id, object_type=doc['type'],
+ display_context=display_context, level=level,
+ msg=check_result, ts=ts)
+ collection = self.inv.collections['messages']
+ collection.insert_one(message.get())
diff --git a/app/monitoring/setup/__init__.py b/app/monitoring/setup/__init__.py
new file mode 100644
index 0000000..1e85a2a
--- /dev/null
+++ b/app/monitoring/setup/__init__.py
@@ -0,0 +1,10 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+
diff --git a/app/monitoring/setup/monitoring_check_handler.py b/app/monitoring/setup/monitoring_check_handler.py
new file mode 100644
index 0000000..1c9a013
--- /dev/null
+++ b/app/monitoring/setup/monitoring_check_handler.py
@@ -0,0 +1,54 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+from monitoring.setup.monitoring_handler import MonitoringHandler
+from utils.inventory_mgr import InventoryMgr
+from utils.special_char_converter import SpecialCharConverter
+
+
+class MonitoringCheckHandler(MonitoringHandler, SpecialCharConverter):
+
+ def __init__(self, env):
+ super().__init__(env)
+
+ # add monitoring setup on remote host for given object
+ def create_monitoring_for_object(self, o, values):
+ self.replacements.update(self.env_monitoring_config)
+ self.replacements.update(values)
+ if 'host' in o:
+ host = self.inv.get_by_id(self.env, o['host'])
+ if host and 'ip_address' in host:
+ self.replacements['client_ip'] = host['ip_address']
+ type_str = o['type'] if 'type' in o else 'link_' + o['link_type']
+ file_type = 'client_check_' + type_str + '.json'
+ host = o['host']
+ sub_dir = '/host/' + host
+ content = self.prepare_config_file(
+ file_type,
+ {'side': 'client', 'type': file_type})
+ # need to put this content inside client.json file
+ client_file = 'client.json'
+ host = o['host']
+ client_file_content = self.get_config_from_db(host, client_file)
+ # merge checks attribute from current content into client.json
+ checks = client_file_content['config']['checks'] \
+ if (client_file_content and
+ 'checks' in client_file_content['config']) \
+ else {}
+ checks.update(content.get('config', {}).get('checks', {}))
+ if client_file_content:
+ client_file_content['config']['checks'] = checks
+ else:
+ client_file_content = {
+ 'config': {
+ 'checks': checks
+ }
+ }
+ content = client_file_content
+ self.write_config_file(client_file, sub_dir, host, content)
diff --git a/app/monitoring/setup/monitoring_handler.py b/app/monitoring/setup/monitoring_handler.py
new file mode 100644
index 0000000..5b7cae0
--- /dev/null
+++ b/app/monitoring/setup/monitoring_handler.py
@@ -0,0 +1,485 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+# handle specific setup of monitoring
+
+import os
+import json
+import subprocess
+from socket import *
+
+import copy
+import pymongo
+import shutil
+import stat
+from boltons.iterutils import remap
+
+from discover.configuration import Configuration
+from discover.fetchers.cli.cli_access import CliAccess
+from utils.binary_converter import BinaryConverter
+from utils.deep_merge import remerge
+from utils.inventory_mgr import InventoryMgr
+from utils.logging.full_logger import FullLogger
+from utils.mongo_access import MongoAccess
+from utils.ssh_conn import SshConn
+from utils.ssh_connection import SshConnection
+
+
+class MonitoringHandler(MongoAccess, CliAccess, BinaryConverter):
+ PRODUCTION_CONFIG_DIR = '/etc/sensu/conf.d'
+ APP_SCRIPTS_FOLDER = 'monitoring/checks'
+ REMOTE_SCRIPTS_FOLDER = '/etc/sensu/plugins'
+ TMP_SSL_FOLDER = '/tmp/monitoring_ssl_files'
+
+ provision_levels = {
+ 'none': 0,
+ 'db': 1,
+ 'files': 2,
+ 'deploy': 3
+ }
+
+ pending_changes = {}
+
+ fetch_ssl_files = []
+
+ def __init__(self, env):
+ super().__init__()
+ self.log = FullLogger()
+ self.configuration = Configuration()
+ self.mechanism_drivers = \
+ self.configuration.environment['mechanism_drivers']
+ self.env = env
+ self.monitoring_config = self.db.monitoring_config_templates
+ try:
+ self.env_monitoring_config = self.configuration.get('Monitoring')
+ except IndexError:
+ self.env_monitoring_config = {}
+ self.local_host = self.env_monitoring_config.get('server_ip', '')
+ self.scripts_prepared_for_host = {}
+ self.replacements = self.env_monitoring_config
+ self.inv = InventoryMgr()
+ self.config_db = self.db[self.inv.get_coll_name('monitoring_config')]
+ self.provision = self.provision_levels['none']
+ if self.env_monitoring_config:
+ provision = self.env_monitoring_config.get('provision', 'none')
+ provision = str.lower(provision)
+ self.provision =\
+ self.provision_levels.get(provision,
+ self.provision_levels['none'])
+
+ # create a directory if it does not exist
+ @staticmethod
+ def make_directory(directory):
+ if not os.path.exists(directory):
+ os.makedirs(directory)
+ return directory
+
+ def get_config_dir(self, sub_dir=''):
+ config_folder = self.env_monitoring_config['config_folder'] + \
+ (os.sep + sub_dir if sub_dir else '')
+ return self.make_directory(config_folder).rstrip(os.sep)
+
+ def prepare_config_file(self, file_type, base_condition):
+ condition = base_condition
+ condition['type'] = file_type
+ sort = [('order', pymongo.ASCENDING)]
+ docs = self.monitoring_config.find(condition, sort=sort)
+ content = {}
+ for doc in docs:
+ if not self.check_env_condition(doc):
+ return {}
+ content.update(doc)
+ self.replacements['app_path'] = \
+ self.configuration.environment['app_path']
+ config = self.content_replace({'config': content.get('config', {})})
+ return config
+
+ def check_env_condition(self, doc):
+ if 'condition' not in doc:
+ return True
+ condition = doc['condition']
+ if 'mechanism_drivers' not in condition:
+ return True
+ required_mechanism_drivers = condition['mechanism_drivers']
+ if not isinstance(required_mechanism_drivers, list):
+ required_mechanism_drivers = [required_mechanism_drivers]
+ intersection = [val for val in required_mechanism_drivers
+ if val in self.mechanism_drivers]
+ return bool(intersection)
+
+ def content_replace(self, content):
+ content_remapped = remap(content, visit=self.fill_values)
+ return content_remapped
+
+ def format_string(self, val):
+ formatted = val if not isinstance(val, str) or '{' not in val \
+ else val.format_map(self.replacements)
+ return formatted
+
+ def fill_values(self, path, key, value):
+ if not path:
+ return key, value
+ key_formatted = self.format_string(key)
+ value_formatted = self.format_string(value)
+ return key_formatted, value_formatted
+
+ def get_config_from_db(self, host, file_type):
+ find_tuple = {
+ 'environment': self.env,
+ 'host': host,
+ 'type': file_type
+ }
+ doc = self.config_db.find_one(find_tuple)
+ if not doc:
+ return {}
+ doc.pop("_id", None)
+ return self.decode_mongo_keys(doc)
+
+ def write_config_to_db(self, host, config, file_type):
+ find_tuple = {
+ 'environment': self.env,
+ 'host': host,
+ 'type': file_type
+ }
+ doc = copy.copy(find_tuple)
+ doc['config'] = config
+ doc = self.encode_mongo_keys(doc)
+ if not doc:
+ return {}
+ self.config_db.update_one(find_tuple, {'$set': doc}, upsert=True)
+
+ def merge_config(self, host, file_type, content):
+ """
+ merge current monitoring config of host
+ with newer content.
+ return the merged config
+ """
+ doc = self.get_config_from_db(host, file_type)
+ config = remerge([doc['config'], content.get('config')]) if doc \
+ else content.get('config', {})
+ self.write_config_to_db(host, config, file_type)
+ return config
+
+ def write_config_file(self, file_name, sub_dir, host, content,
+ is_container=False, is_server=False):
+ """
+ apply environment definitions to the config,
+ e.g. replace {server_ip} with the IP or host name for the server
+ """
+ # save the config to DB first, and while doing that
+ # merge it with any existing config on same host
+ content = self.merge_config(host, file_name, content)
+
+ if self.provision == self.provision_levels['db']:
+ self.log.debug('Monitoring setup kept only in DB')
+ return
+ # now dump the config to the file
+ content_json = json.dumps(content.get('config', content),
+ sort_keys=True, indent=4)
+ content_json += '\n'
+ # always write the file locally first
+ local_dir = self.make_directory(os.path.join(self.get_config_dir(),
+ sub_dir.strip(os.path.sep)))
+ local_path = os.path.join(local_dir, file_name)
+ self.write_to_local_host(local_path, content_json)
+ self.track_setup_changes(host, is_container, file_name, local_path,
+ sub_dir, is_server=is_server)
+
+ def add_changes_for_all_clients(self):
+ """
+ to debug deployment, add simulated track changes entries.
+ no need to add for server, as these are done by server_setup()
+ """
+ docs = self.config_db.find({'environment': self.env})
+ for doc in docs:
+ host = doc['host']
+ sub_dir = os.path.join('host', host)
+ file_name = doc['type']
+ config_folder = self.env_monitoring_config['config_folder']
+ local_path = os.path.join(config_folder, sub_dir, file_name)
+ if host == self.env_monitoring_config['server_ip']:
+ continue
+ self.track_setup_changes(host, False, file_name, local_path,
+ sub_dir)
+
+ def get_ssh(self, host, is_container=False, for_sftp=False):
+ ssh = SshConnection.get_ssh(host, for_sftp)
+ if not ssh:
+ if is_container:
+ conf = self.env_monitoring_config
+ host = conf['server_ip']
+ port = int(conf['ssh_port'])
+ user = conf['ssh_user']
+ pwd = conf['ssh_password']
+ ssh = SshConnection(host, user, _pwd=pwd, _port=port,
+ for_sftp=for_sftp)
+ else:
+ ssh = SshConn(host, for_sftp=for_sftp)
+ return ssh
+
+ def track_setup_changes(self, host=None, is_container=False, file_name=None,
+ local_path=None, sub_dir=None,
+ is_server=False,
+ target_mode=None,
+ target_path=PRODUCTION_CONFIG_DIR):
+ if host not in self.pending_changes:
+ self.pending_changes[host] = {}
+ if file_name not in self.pending_changes[host]:
+ self.pending_changes[host][file_name] = {
+ "host": host,
+ "is_container": is_container,
+ "is_server": is_server,
+ "file_name": file_name,
+ "local_path": local_path,
+ "sub_dir": sub_dir,
+ "target_path": target_path,
+ "target_mode": target_mode
+ }
+
+ def handle_pending_setup_changes(self):
+ if self.provision < self.provision_levels['files']:
+ if self.provision == self.provision_levels['db']:
+ self.log.info('Monitoring config applied only in DB')
+ return
+ self.log.info('applying monitoring setup')
+ hosts = {}
+ scripts_to_hosts = {}
+ for host, host_changes in self.pending_changes.items():
+ self.handle_pending_host_setup_changes(host_changes, hosts,
+ scripts_to_hosts)
+ if self.provision < self.provision_levels['deploy']:
+ return
+ if self.fetch_ssl_files:
+ self.deploy_ssl_files(list(scripts_to_hosts.keys()))
+ for host in scripts_to_hosts.values():
+ self.deploy_scripts_to_host(host)
+ for host in hosts.values():
+ self.deploy_config_to_target(host)
+ self.log.info('done applying monitoring setup')
+
+ def handle_pending_host_setup_changes(self, host_changes, hosts,
+ scripts_to_hosts):
+ if self.provision < self.provision_levels['deploy']:
+ self.log.info('Monitoring config not deployed to remote host')
+ for file_type, changes in host_changes.items():
+ host = changes['host']
+ is_container = changes['is_container']
+ is_server = changes['is_server']
+ local_dir = changes['local_path']
+ if local_dir == "scripts":
+ scripts_to_hosts[host] = {'host': host, 'is_server': is_server}
+ continue
+ self.log.debug('applying monitoring setup changes ' +
+ 'for host ' + host + ', file type: ' + file_type)
+ is_local_host = host == self.local_host
+ file_path = os.path.join(self.PRODUCTION_CONFIG_DIR, file_type)
+ if not is_server and host not in hosts:
+ hosts[host] = {
+ 'host': host,
+ 'local_dir': local_dir,
+ 'is_local_host': is_local_host,
+ 'is_container': is_container,
+ 'is_server': is_server
+ }
+ if is_server:
+ remote_path = self.PRODUCTION_CONFIG_DIR
+ if os.path.isfile(local_dir):
+ remote_path += os.path.sep + os.path.basename(local_dir)
+ self.write_to_server(local_dir,
+ remote_path=remote_path,
+ is_container=is_container)
+ elif is_local_host:
+ # write to production configuration directory on local host
+ self.make_directory(self.PRODUCTION_CONFIG_DIR)
+ shutil.copy(changes['local_path'], file_path)
+ else:
+ # write to remote host prepare dir - use sftp
+ if self.provision < self.provision_levels['deploy']:
+ continue
+ self.write_to_remote_host(host, changes['local_path'])
+
+ def prepare_scripts(self, host, is_server):
+ if self.scripts_prepared_for_host.get(host, False):
+ return
+ gateway_host = SshConn.get_gateway_host(host)
+ # copy scripts to host
+ scripts_dir = os.path.join(self.env_monitoring_config['app_path'],
+ self.APP_SCRIPTS_FOLDER)
+ script_files = [f for f in os.listdir(scripts_dir)
+ if os.path.isfile(os.path.join(scripts_dir, f))]
+ script_mode = stat.S_IRWXU | stat.S_IRGRP | stat.S_IXGRP | \
+ stat.S_IROTH | stat.S_IXOTH
+ target_host = host if is_server else gateway_host
+ self.make_remote_dir(target_host, self.REMOTE_SCRIPTS_FOLDER)
+ for file_name in script_files:
+ remote_path = os.path.join(self.REMOTE_SCRIPTS_FOLDER, file_name)
+ local_path = os.path.join(scripts_dir, file_name)
+ if not os.path.isfile(local_path):
+ continue
+ if is_server:
+ ssh = self.get_ssh(target_host, for_sftp=True)
+ ssh.copy_file(local_path, remote_path, mode=script_mode)
+ else:
+ self.copy_to_remote_host(target_host, local_path, remote_path,
+ mode=script_mode,
+ make_remote_dir=False)
+ self.scripts_prepared_for_host[host] = True
+
+ def deploy_ssl_files(self, hosts: list):
+ monitoring_server = self.env_monitoring_config['server_ip']
+ gateway_host = SshConn.get_gateway_host(hosts[0])
+ self.make_directory(self.TMP_SSL_FOLDER)
+ for file_path in self.fetch_ssl_files:
+ # copy SSL files from the monitoring server
+ file_name = os.path.basename(file_path)
+ local_path = os.path.join(self.TMP_SSL_FOLDER, file_name)
+ self.get_file(monitoring_server, file_path, local_path)
+ # first copy the files to the gateway
+ self.write_to_remote_host(gateway_host, local_path,
+ remote_path=file_path)
+ ssl_path = os.path.commonprefix(self.fetch_ssl_files)
+ for host in hosts:
+ self.copy_from_gateway_to_host(host, ssl_path, ssl_path)
+ # remove files from temporary folder
+ for file_path in self.fetch_ssl_files:
+ tmp_path = os.path.join(self.TMP_SSL_FOLDER,
+ os.path.basename(file_path))
+ if os.path.exists(tmp_path):
+ os.remove(tmp_path) # remove files from temporary folder
+
+ def deploy_scripts_to_host(self, host_details):
+ host = host_details['host']
+ is_server = host_details['is_server']
+ self.prepare_scripts(host, is_server)
+ remote_path = self.REMOTE_SCRIPTS_FOLDER
+ local_path = remote_path + os.path.sep + '*.py'
+ if is_server:
+ return # this was done earlier
+ self.copy_from_gateway_to_host(host, local_path, remote_path)
+
+ def restart_service(self, host: str = None,
+ service: str = 'sensu-client',
+ is_server: bool = False,
+ msg: str =None):
+ ssh = self.get_ssh(host)
+ cmd = 'sudo /etc/init.d/{} restart'.format(service)
+ log_msg = msg if msg else 'deploying config to host {}'.format(host)
+ self.log.info(log_msg)
+ if is_server:
+ ssh.exec(cmd)
+ else:
+ self.run(cmd, ssh_to_host=host, ssh=ssh)
+
+ def deploy_config_to_target(self, host_details):
+ host = host_details['host']
+ is_local_host = host_details['is_local_host']
+ is_container = host_details['is_container']
+ is_server = host_details['is_server']
+ local_dir = host_details['local_dir']
+ if is_container or is_server or not is_local_host:
+ local_dir = os.path.dirname(local_dir)
+ if not is_server:
+ self.move_setup_files_to_remote_host(host, local_dir)
+ # restart the Sensu client on the remote host,
+ # so it takes the new setup
+ self.restart_service(host)
+
+ def run_cmd_locally(self, cmd):
+ try:
+ subprocess.popen(cmd.split(),
+ shell=True,
+ stdin=subprocess.PIPE,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE)
+ except subprocess.CalledProcessError as e:
+ print("Error running command: " + cmd +
+ ", output: " + self.binary2str(e.output) + "\n")
+
+ def move_setup_files_to_remote_host(self, host, local_dir):
+ if self.provision < self.provision_levels['deploy']:
+ self.log.info('Monitoring config not written to remote host')
+ return
+ # need to scp the files from the gateway host to the target host
+ remote_path = self.PRODUCTION_CONFIG_DIR
+ self.copy_from_gateway_to_host(host, local_dir, remote_path)
+
+ def copy_from_gateway_to_host(self, host, local_dir, remote_path):
+ ssh = self.get_ssh(host)
+ what_to_copy = local_dir if '*' in local_dir else local_dir + '/*'
+ if ssh.is_gateway_host(host):
+ # on gateway host, perform a simple copy
+ # make sure the source and destination are not the same
+ local_dir_base = local_dir[:local_dir.rindex('/*')] \
+ if '/*' in local_dir else local_dir
+ if local_dir_base.strip('/*') == remote_path.strip('/*'):
+ return # same directory - nothing to do
+ cmd = 'cp {} {}'.format(what_to_copy, remote_path)
+ self.run(cmd, ssh=ssh)
+ return
+ self.make_remote_dir(host, remote_path)
+ remote_path = ssh.get_user() + '@' + host + ':' + \
+ remote_path + os.sep
+ self.run_on_gateway('scp {} {}'.format(what_to_copy, remote_path),
+ enable_cache=False,
+ use_sudo=None)
+
+ def make_remote_dir_on_host(self, ssh, host, path, path_is_file=False):
+ # make sure we have write permissions in target directories
+ dir_path = path
+ if path_is_file:
+ dir_path = os.path.dirname(dir_path)
+ cmd = 'sudo mkdir -p ' + dir_path
+ try:
+ self.run(cmd, ssh_to_host=host, ssh=ssh)
+ except timeout:
+ self.log.error('timed out trying to create directory {} on host {}'
+ .format(dir_path, host))
+ return
+ cmd = 'sudo chown -R ' + ssh.get_user() + ' ' + dir_path
+ self.run(cmd, ssh_to_host=host, ssh=ssh)
+
+ def make_remote_dir(self, host, path, path_is_file=False):
+ ssh = self.get_ssh(host, for_sftp=True)
+ self.make_remote_dir_on_host(ssh, host, path, path_is_file)
+
+ def copy_to_remote_host(self, host, local_path, remote_path, mode=None,
+ make_remote_dir=True):
+ # copy the local file to the preparation folder for the remote host
+ # on the gateway host
+ ssh = self.get_ssh(host)
+ gateway_host = ssh.get_gateway_host(host)
+ if make_remote_dir:
+ self.make_remote_dir(gateway_host, remote_path, path_is_file=True)
+ ftp_ssh = self.get_ssh(gateway_host, for_sftp=True)
+ ftp_ssh.copy_file(local_path, remote_path, mode)
+
+ def write_to_remote_host(self, host, local_path=None, remote_path=None):
+ remote_path = remote_path if remote_path else local_path
+ self.copy_to_remote_host(host, local_path, remote_path)
+
+ def write_to_server(self, local_path, remote_path=None, is_container=False):
+ host = self.env_monitoring_config['server_ip']
+ ssh = self.get_ssh(host, is_container=is_container)
+ remote_path = remote_path if remote_path else local_path
+ self.make_remote_dir_on_host(ssh, host, remote_path, True)
+ # copy to config dir first
+ ftp_ssh = self.get_ssh(host, is_container=is_container, for_sftp=True)
+ ftp_ssh.copy_file(local_path, remote_path)
+
+ @staticmethod
+ def write_to_local_host(file_path, content):
+ f = open(file_path, "w")
+ f.write(content)
+ f.close()
+ return file_path
+
+ def get_file(self, host, remote_path, local_path):
+ ftp_ssh = self.get_ssh(host, for_sftp=True)
+ ftp_ssh.copy_file_from_remote(remote_path, local_path)
+
diff --git a/app/monitoring/setup/monitoring_host.py b/app/monitoring/setup/monitoring_host.py
new file mode 100644
index 0000000..800b5a2
--- /dev/null
+++ b/app/monitoring/setup/monitoring_host.py
@@ -0,0 +1,91 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+import copy
+import os
+from os.path import join, sep
+
+from monitoring.setup.monitoring_handler import MonitoringHandler
+
+RABBITMQ_CONFIG_FILE = 'rabbitmq.json'
+RABBITMQ_CONFIG_ATTR = 'rabbitmq'
+
+RABBITMQ_CERT_FILE_ATTR = 'cert_chain_file'
+RABBITMQ_PK_FILE_ATTR = 'private_key_file'
+TMP_FILES_DIR = '/tmp'
+
+
+class MonitoringHost(MonitoringHandler):
+
+ def __init__(self, env):
+ super().__init__(env)
+
+ # add monitoring setup for remote host
+ def create_setup(self, o):
+ sensu_host_files = [
+ 'transport.json',
+ 'rabbitmq.json',
+ 'client.json'
+ ]
+ server_ip = self.env_monitoring_config['server_ip']
+ host_id = o['host']
+ sub_dir = join('/host', host_id)
+ config = copy.copy(self.env_monitoring_config)
+ env_name = self.configuration.env_name
+ client_name = env_name + '-' + o['id']
+ client_ip = o['ip_address'] if 'ip_address' in o else o['id']
+ self.replacements.update(config)
+ self.replacements.update({
+ 'server_ip': server_ip,
+ 'client_name': client_name,
+ 'client_ip': client_ip,
+ 'env_name': env_name
+ })
+
+ # copy configuration files
+ for file_name in sensu_host_files:
+ content = self.prepare_config_file(file_name, {'side': 'client'})
+ self.get_ssl_files(host_id, file_name, content)
+ self.write_config_file(file_name, sub_dir, host_id, content)
+
+ if self.provision < self.provision_levels['deploy']:
+ return
+
+ self.track_setup_changes(host_id, False, "", "scripts", None)
+
+ # mark this environment as prepared
+ self.configuration.update_env({'monitoring_setup_done': True})
+
+ def get_ssl_files(self, host, file_type, content):
+ if self.fetch_ssl_files:
+ return # already got names of SSL files
+ if file_type != RABBITMQ_CONFIG_FILE:
+ return
+ if not isinstance(content, dict):
+ self.log.warn('invalid content of {}'.format(RABBITMQ_CONFIG_FILE))
+ return
+ config = content['config']
+ if not config:
+ self.log.warn('invalid content of {}'.format(RABBITMQ_CONFIG_FILE))
+ return
+ if RABBITMQ_CONFIG_ATTR not in config:
+ self.log.warn('invalid content of {}'.format(RABBITMQ_CONFIG_FILE))
+ return
+ ssl_conf = config.get(RABBITMQ_CONFIG_ATTR).get('ssl')
+ if not ssl_conf:
+ return # SSL not used
+
+ for path_attr in [RABBITMQ_CERT_FILE_ATTR, RABBITMQ_PK_FILE_ATTR]:
+ path = ssl_conf.get(path_attr)
+ if not path:
+ self.log.error('missing SSL path {}'.format(path_attr))
+ return
+ # this configuration requires SSL
+ # keep the path of the files for later use
+ self.fetch_ssl_files.append(path)
diff --git a/app/monitoring/setup/monitoring_link_vnic_vconnector.py b/app/monitoring/setup/monitoring_link_vnic_vconnector.py
new file mode 100644
index 0000000..18f04e4
--- /dev/null
+++ b/app/monitoring/setup/monitoring_link_vnic_vconnector.py
@@ -0,0 +1,37 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+from monitoring.setup.monitoring_check_handler import MonitoringCheckHandler
+
+
+class MonitoringLinkVnicVconnector(MonitoringCheckHandler):
+
+ def __init__(self, env):
+ super().__init__(env)
+
+ # add monitoring setup for remote host
+ def create_setup(self, link):
+ vnic = self.inv.get_by_id(self.env, link['source_id'])
+ if not vnic:
+ self.log.error('could not find vnic for vnic-vconnector link')
+ return
+ if 'mac_address' not in vnic:
+ self.log.error('could not find MAC address in vNIC: ' + vnic['id'])
+ return
+ vconnector = self.inv.get_by_id(self.env, link['target_id'])
+ if not vnic:
+ self.log.error('could not find vconnector for vnic-vconnector link')
+ return
+ values = {
+ 'linktype': 'vnic-vconnector',
+ 'fromobjid': self.encode_special_characters(vnic['id']),
+ 'toobjid': vconnector['id'],
+ 'bridge': vconnector['object_name'],
+ 'mac_address': vnic['mac_address']}
+ self.create_monitoring_for_object(link, values)
diff --git a/app/monitoring/setup/monitoring_otep.py b/app/monitoring/setup/monitoring_otep.py
new file mode 100644
index 0000000..366bd77
--- /dev/null
+++ b/app/monitoring/setup/monitoring_otep.py
@@ -0,0 +1,34 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+from monitoring.setup.monitoring_check_handler import MonitoringCheckHandler
+
+
+class MonitoringOtep(MonitoringCheckHandler):
+
+ def __init__(self, env):
+ super().__init__(env)
+
+ # add monitoring setup for remote host
+ def create_setup(self, o):
+ if o['ports']:
+ for port in o['ports'].values():
+ self.create_monitoring_for_otep_port(o, port)
+
+ def create_monitoring_for_otep_port(self, o, port):
+ if port['type'] not in ['vxlan', 'gre']:
+ return # we only handle vxlan and gre
+ opt = port['options']
+ values = {
+ "objtype": "otep",
+ "objid": o['id'],
+ "portid": port['name'],
+ "otep_src_ip": opt['local_ip'],
+ "otep_dest_ip": opt['remote_ip']}
+ self.create_monitoring_for_object(o, values)
diff --git a/app/monitoring/setup/monitoring_pnic.py b/app/monitoring/setup/monitoring_pnic.py
new file mode 100644
index 0000000..d64c8ff
--- /dev/null
+++ b/app/monitoring/setup/monitoring_pnic.py
@@ -0,0 +1,21 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+from monitoring.setup.monitoring_simple_object import MonitoringSimpleObject
+
+
+class MonitoringPnic(MonitoringSimpleObject):
+
+ def __init__(self, env):
+ super().__init__(env)
+
+ # add monitoring setup for remote host
+ def create_setup(self, o):
+ if o.get("pnic_type") != "switch":
+ self.setup('pnic', o)
diff --git a/app/monitoring/setup/monitoring_setup_manager.py b/app/monitoring/setup/monitoring_setup_manager.py
new file mode 100644
index 0000000..d6ada33
--- /dev/null
+++ b/app/monitoring/setup/monitoring_setup_manager.py
@@ -0,0 +1,84 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+# handle adding of monitoring setup as needed
+
+from monitoring.setup.monitoring_handler import MonitoringHandler
+from monitoring.setup.monitoring_host import MonitoringHost
+from monitoring.setup.monitoring_link_vnic_vconnector \
+ import MonitoringLinkVnicVconnector
+from monitoring.setup.monitoring_pnic import MonitoringPnic
+from monitoring.setup.monitoring_otep import MonitoringOtep
+from monitoring.setup.monitoring_vedge import MonitoringVedge
+from monitoring.setup.monitoring_vnic import MonitoringVnic
+from monitoring.setup.monitoring_vservice import MonitoringVservice
+
+
+class MonitoringSetupManager(MonitoringHandler):
+
+ object_handlers = None
+
+ def __init__(self, env):
+ super().__init__(env)
+ self.object_handlers = {
+ "host": MonitoringHost(env),
+ "otep": MonitoringOtep(env),
+ "vedge": MonitoringVedge(env),
+ "pnic": MonitoringPnic(env),
+ "vnic": MonitoringVnic(env),
+ "vservice": MonitoringVservice(env),
+ "vnic-vconnector": MonitoringLinkVnicVconnector(env)}
+
+ # add monitoring setup to Sensu server
+ def server_setup(self):
+ if self.provision == self.provision_levels['none']:
+ self.log.debug('Monitoring config setup skipped')
+ return
+ sensu_server_files = [
+ 'transport.json',
+ 'client.json',
+ 'rabbitmq.json',
+ 'handlers.json',
+ 'redis.json',
+ 'api.json'
+ ]
+ conf = self.env_monitoring_config
+ is_container = bool(conf.get('ssh_user', ''))
+ server_host = conf['server_ip']
+ sub_dir = 'server'
+ self.replacements.update(conf)
+ for file_name in sensu_server_files:
+ content = self.prepare_config_file(file_name, {'side': 'server'})
+ self.write_config_file(file_name, sub_dir, server_host, content,
+ is_container=is_container, is_server=True)
+ # restart sensu server and Uchiwa services
+ # so it takes the new setup
+ self.restart_service(host=server_host, service='sensu-server',
+ is_server=True,
+ msg='restart sensu-server on {}'
+ .format(server_host))
+ self.restart_service(host=server_host, service='uchiwa',
+ is_server=True,
+ msg='restart uchiwa on {}'
+ .format(server_host))
+ self.configuration.update_env({'monitoring_setup_done': True})
+
+ # add setup for inventory object
+ def create_setup(self, o):
+ if self.provision == self.provision_levels['none']:
+ self.log.debug('Monitoring config setup skipped')
+ return
+ type_attribute = 'type' if 'type' in o else 'link_type'
+ type_value = o[type_attribute]
+ object_handler = self.object_handlers.get(type_value)
+ if object_handler:
+ object_handler.create_setup(o)
+
+ def simulate_track_changes(self):
+ self.add_changes_for_all_clients()
diff --git a/app/monitoring/setup/monitoring_simple_object.py b/app/monitoring/setup/monitoring_simple_object.py
new file mode 100644
index 0000000..a66abe0
--- /dev/null
+++ b/app/monitoring/setup/monitoring_simple_object.py
@@ -0,0 +1,25 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+from monitoring.setup.monitoring_check_handler import MonitoringCheckHandler
+
+
+class MonitoringSimpleObject(MonitoringCheckHandler):
+
+ def __init__(self, env):
+ super().__init__(env)
+
+ # add monitoring setup for remote host
+ def setup(self, type: str, o: dict, values: dict = None):
+ if not values:
+ values = {}
+ values['objtype'] = type
+ objid = values.get('objid', o['id'])
+ values['objid'] = self.encode_special_characters(objid)
+ self.create_monitoring_for_object(o, values)
diff --git a/app/monitoring/setup/monitoring_vedge.py b/app/monitoring/setup/monitoring_vedge.py
new file mode 100644
index 0000000..144ee3a
--- /dev/null
+++ b/app/monitoring/setup/monitoring_vedge.py
@@ -0,0 +1,19 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+from monitoring.setup.monitoring_simple_object import MonitoringSimpleObject
+
+
+class MonitoringVedge(MonitoringSimpleObject):
+
+ def __init__(self, env):
+ super().__init__(env)
+
+ def create_setup(self, o):
+ self.setup('vedge', o)
diff --git a/app/monitoring/setup/monitoring_vnic.py b/app/monitoring/setup/monitoring_vnic.py
new file mode 100644
index 0000000..7c229f8
--- /dev/null
+++ b/app/monitoring/setup/monitoring_vnic.py
@@ -0,0 +1,20 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+from monitoring.setup.monitoring_simple_object import MonitoringSimpleObject
+
+
+class MonitoringVnic(MonitoringSimpleObject):
+
+ def __init__(self, env):
+ super().__init__(env)
+
+ # add monitoring setup for remote host
+ def create_setup(self, o):
+ self.setup('vnic', o, values={'vnictype': o['vnic_type']})
diff --git a/app/monitoring/setup/monitoring_vservice.py b/app/monitoring/setup/monitoring_vservice.py
new file mode 100644
index 0000000..8313b9b
--- /dev/null
+++ b/app/monitoring/setup/monitoring_vservice.py
@@ -0,0 +1,23 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+from monitoring.setup.monitoring_simple_object import MonitoringSimpleObject
+
+
+class MonitoringVservice(MonitoringSimpleObject):
+
+ def __init__(self, env):
+ super().__init__(env)
+
+ def create_setup(self, o):
+ values = {
+ 'local_service_id': o['local_service_id'],
+ 'service_type': o['service_type']
+ }
+ self.setup('vservice', o, values=values)
diff --git a/app/statistics/stats_consumer.py b/app/statistics/stats_consumer.py
new file mode 100755
index 0000000..e0a7d46
--- /dev/null
+++ b/app/statistics/stats_consumer.py
@@ -0,0 +1,134 @@
+#!/usr/bin/env python3
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+
+import argparse
+import json
+import time
+
+from kafka import KafkaConsumer
+
+from discover.configuration import Configuration
+from utils.inventory_mgr import InventoryMgr
+from utils.logging.full_logger import FullLogger
+from utils.mongo_access import MongoAccess
+
+
+class StatsConsumer(MongoAccess):
+ default_env = "WebEX-Mirantis@Cisco"
+
+ def __init__(self):
+ self.get_args()
+ MongoAccess.set_config_file(self.args.mongo_config)
+ MongoAccess.__init__(self)
+ self.log = FullLogger()
+ self.log.set_loglevel(self.args.loglevel)
+ self.conf = Configuration()
+ self.inv = InventoryMgr()
+ self.inv.set_collections(self.args.inventory)
+ stats_coll = self.inv.get_coll_name('statistics')
+ self.stats = self.db[stats_coll]
+ # consume messages from topic
+ self.consumer = KafkaConsumer('VPP.stats',
+ group_id='calipso_test',
+ auto_offset_reset=self.args.offset,
+ bootstrap_servers=['localhost:9092'])
+
+ def get_args(self):
+ # try to read scan plan from command line parameters
+ parser = argparse.ArgumentParser()
+ parser.add_argument("-m", "--mongo_config", nargs="?", type=str,
+ default="",
+ help="name of config file " +
+ "with MongoDB servr access details")
+ parser.add_argument("-e", "--env", nargs="?", type=str,
+ default=self.default_env,
+ help="name of environment to scan \n" +
+ "(default: " + self.default_env + ")")
+ parser.add_argument("-y", "--inventory", nargs="?", type=str,
+ default="inventory",
+ help="name of inventory collection \n" +
+ "(default: 'inventory')")
+ parser.add_argument("-l", "--loglevel", nargs="?", type=str,
+ default="INFO",
+ help="logging level \n(default: 'INFO')")
+ parser.add_argument("-o", "--offset", nargs="?", type=str,
+ default="largest",
+ help="where to start reading" +
+ " - use 'smallest' for start \n" +
+ "(default: 'largest')")
+ self.args = parser.parse_args()
+
+ def read(self):
+ for kafka_msg in self.consumer:
+ msg = json.loads(kafka_msg.value.decode())
+ self.add_stats(msg)
+
+ def add_stats(self, msg):
+ host_ip = msg['hostIp']
+ search = {
+ 'environment': self.args.env,
+ 'type': 'host',
+ 'ip_address': host_ip
+ }
+ host = self.inv.find_items(search, get_single=True)
+ if not host:
+ self.log.error('could not find host with ip address=' + host_ip)
+ return
+ host_id = host['id']
+ search = {
+ 'environment': self.args.env,
+ 'type': 'vedge',
+ 'host': host_id
+ }
+ vedge = self.inv.find_items(search, get_single=True)
+ if not vedge:
+ self.log.error('could not find vEdge for host: ' + host_id)
+ return
+ self.log.info('setting VPP stats for vEdge of host: ' + host_id)
+ self.add_stats_for_object(vedge, msg)
+
+ def add_stats_for_object(self, o, msg):
+ msg['type'] = 'vedge_flows'
+ msg['environment'] = self.args.env
+ msg['object_type'] = o['type']
+ msg['object_id'] = o['id']
+ time_seconds = int(msg['averageArrivalNanoSeconds'] / 1000000000)
+ sample_time = time.gmtime(time_seconds)
+ msg['sample_time'] = time.strftime("%Y-%m-%dT%H:%M:%SZ", sample_time)
+ # find instances between which the flow happens
+ # to find the instance, find the related vNIC first
+ msg['source'] = self.find_instance_for_stat('source', msg)
+ msg['destination'] = self.find_instance_for_stat('destination', msg)
+ self.stats.insert_one(msg)
+
+ def find_instance_for_stat(self, direction, msg):
+ search_by_mac_address = 'sourceMacAddress' in msg
+ value_attr = 'MacAddress' if search_by_mac_address else 'IpAddress'
+ value_to_search = msg[direction + value_attr]
+ attr = 'mac_address' if search_by_mac_address else 'ip_address'
+ search = {
+ 'environment': self.args.env,
+ 'type': 'vnic',
+ attr: value_to_search
+ }
+ vnic = self.inv.find_items(search, get_single=True)
+ if not vnic:
+ self.log.error('failed to find vNIC for ' +
+ attr + '=' + value_to_search)
+ return 'Unknown'
+ # now find the instance name from the vnic name
+ name_path = vnic['name_path'].split('/')
+ instance_name = name_path[8]
+ return instance_name
+
+if __name__ == '__main__':
+ stats_consumer = StatsConsumer()
+ stats_consumer.read()
diff --git a/app/test/__init__.py b/app/test/__init__.py
new file mode 100644
index 0000000..1e85a2a
--- /dev/null
+++ b/app/test/__init__.py
@@ -0,0 +1,10 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+
diff --git a/app/test/api/__init__.py b/app/test/api/__init__.py
new file mode 100644
index 0000000..1e85a2a
--- /dev/null
+++ b/app/test/api/__init__.py
@@ -0,0 +1,10 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+
diff --git a/app/test/api/responders_test/__init__.py b/app/test/api/responders_test/__init__.py
new file mode 100644
index 0000000..1e85a2a
--- /dev/null
+++ b/app/test/api/responders_test/__init__.py
@@ -0,0 +1,10 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+
diff --git a/app/test/api/responders_test/auth/__init__.py b/app/test/api/responders_test/auth/__init__.py
new file mode 100644
index 0000000..1e85a2a
--- /dev/null
+++ b/app/test/api/responders_test/auth/__init__.py
@@ -0,0 +1,10 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+
diff --git a/app/test/api/responders_test/auth/test_tokens.py b/app/test/api/responders_test/auth/test_tokens.py
new file mode 100644
index 0000000..d7b9675
--- /dev/null
+++ b/app/test/api/responders_test/auth/test_tokens.py
@@ -0,0 +1,105 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+import json
+from unittest.mock import patch
+
+from test.api.responders_test.test_data import base
+
+from test.api.responders_test.test_data import tokens
+from test.api.test_base import TestBase
+
+
+class TestTokens(TestBase):
+
+ def test_create_token_without_auth_obj(self):
+ self.validate_post_request(tokens.URL,
+ body=json.dumps(tokens.AUTH_OBJ_WITHOUT_AUTH),
+ expected_code=base.BAD_REQUEST_CODE)
+
+ def test_create_token_without_methods(self):
+ self.validate_post_request(tokens.URL,
+ body=json.dumps(tokens.AUTH_OBJ_WITHOUT_METHODS),
+ expected_code=base.BAD_REQUEST_CODE)
+
+ def test_create_token_without_credentials_in_credentials_method(self):
+ self.validate_post_request(tokens.URL,
+ body=json.dumps(tokens.AUTH_OBJ_WITHOUT_CREDENTIALS),
+ expected_code=base.UNAUTHORIZED_CODE)
+
+ def test_create_token_without_token_in_token_method(self):
+ self.validate_post_request(tokens.URL,
+ body=json.dumps(tokens.AUTH_OBJ_WITHOUT_TOKEN),
+ expected_code=base.UNAUTHORIZED_CODE)
+
+ @patch(tokens.AUTH_VALIDATE_CREDENTIALS)
+ def test_create_token_with_wrong_credentials(self, validate_credentials):
+ self.validate_post_request(tokens.URL,
+ body=json.dumps(tokens.AUTH_OBJ_WITH_WRONG_CREDENTIALS),
+ mocks={
+ validate_credentials: False
+ },
+ expected_code=base.UNAUTHORIZED_CODE)
+
+ @patch(tokens.AUTH_VALIDATE_TOKEN)
+ def test_create_token_with_wrong_token(self, validate_token):
+ self.validate_post_request(tokens.URL,
+ body=json.dumps(tokens.AUTH_OBJ_WITH_WRONG_TOKEN),
+ mocks={
+ validate_token: 'token error'
+ },
+ expected_code=base.UNAUTHORIZED_CODE)
+
+ @patch(tokens.AUTH_WRITE_TOKEN)
+ @patch(tokens.AUTH_VALIDATE_CREDENTIALS)
+ def test_create_token_with_correct_credentials(self, validate_credentials, write_token):
+ self.validate_post_request(tokens.URL,
+ body=json.dumps(tokens.AUTH_OBJ_WITH_CORRECT_CREDENTIALS),
+ mocks={
+ validate_credentials: True,
+ write_token: None
+ },
+ expected_code=base.CREATED_CODE)
+
+ @patch(tokens.AUTH_WRITE_TOKEN)
+ @patch(tokens.AUTH_VALIDATE_TOKEN)
+ def test_create_token_with_correct_token(self, validate_token, write_token):
+ self.validate_post_request(tokens.URL,
+ body=json.dumps(tokens.AUTH_OBJ_WITH_CORRECT_TOKEN),
+ mocks={
+ validate_token: None,
+ write_token: None
+ },
+ expected_code=base.CREATED_CODE)
+
+ def test_delete_token_without_token(self):
+ self.validate_delete_request(tokens.URL,
+ headers=tokens.HEADER_WITHOUT_TOKEN,
+ expected_code=base.UNAUTHORIZED_CODE
+ )
+
+ @patch(tokens.AUTH_VALIDATE_TOKEN)
+ def test_delete_token_with_wrong_token(self, validate_token):
+ self.validate_delete_request(tokens.URL,
+ headers=tokens.HEADER_WITH_WRONG_TOKEN,
+ mocks={
+ validate_token: 'token error'
+ },
+ expected_code=base.UNAUTHORIZED_CODE)
+
+ @patch(tokens.AUTH_VALIDATE_TOKEN)
+ @patch(tokens.AUTH_DELETE_TOKEN)
+ def test_delete_token_with_correct_token(self, delete_token, validate_token):
+ self.validate_delete_request(tokens.URL,
+ headers=tokens.HEADER_WITH_CORRECT_TOKEN,
+ mocks={
+ validate_token: None,
+ delete_token: None
+ },
+ expected_code=base.SUCCESSFUL_CODE)
diff --git a/app/test/api/responders_test/resource/__init__.py b/app/test/api/responders_test/resource/__init__.py
new file mode 100644
index 0000000..1e85a2a
--- /dev/null
+++ b/app/test/api/responders_test/resource/__init__.py
@@ -0,0 +1,10 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+
diff --git a/app/test/api/responders_test/resource/test_aggregates.py b/app/test/api/responders_test/resource/test_aggregates.py
new file mode 100644
index 0000000..1b642e0
--- /dev/null
+++ b/app/test/api/responders_test/resource/test_aggregates.py
@@ -0,0 +1,103 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+from test.api.test_base import TestBase
+from test.api.responders_test.test_data import base
+from test.api.responders_test.test_data import aggregates
+from unittest.mock import patch
+
+
+class TestAggregates(TestBase):
+
+ def test_get_aggregate_without_type(self):
+ self.validate_get_request(aggregates.URL,
+ params={},
+ expected_code=base.BAD_REQUEST_CODE)
+
+ def test_get_aggregate_with_wrong_filter(self):
+ self.validate_get_request(aggregates.URL,
+ params={
+ "unknown": "unknown"
+ },
+ expected_code=base.BAD_REQUEST_CODE)
+
+ def test_get_environment_aggregates_without_env_name(self):
+ self.validate_get_request(aggregates.URL,
+ params={
+ "type": aggregates.ENV_TYPE
+ },
+ expected_code=base.BAD_REQUEST_CODE)
+
+ @patch(base.RESPONDER_BASE_CHECK_ENVIRONMENT_NAME)
+ def test_get_environment_aggregates_with_unknown_env_name(self,
+ check_env_name):
+ self.validate_get_request(aggregates.URL,
+ params={
+ "type": aggregates.ENV_TYPE,
+ "env_name": base.UNKNOWN_ENV
+ },
+ mocks={
+ check_env_name: False
+ },
+ expected_code=base.BAD_REQUEST_CODE)
+
+ @patch(base.RESPONDER_BASE_CHECK_ENVIRONMENT_NAME)
+ @patch(base.RESPONDER_BASE_AGGREGATE)
+ def test_get_environment_aggregates_with_env_name(self, aggregates_method,
+ check_env_name):
+ self.validate_get_request(aggregates.URL,
+ params={
+ "type": aggregates.ENV_TYPE,
+ "env_name": base.ENV_NAME
+ },
+ mocks={
+ check_env_name: True,
+ aggregates_method:
+ aggregates.ENVIRONMENT_AGGREGATES
+ },
+ expected_code=base.SUCCESSFUL_CODE,
+ expected_response=
+ aggregates.ENVIRONMENT_AGGREGATES_RESPONSE
+ )
+
+ @patch(base.RESPONDER_BASE_AGGREGATE)
+ def test_get_message_aggregates(self, aggregate):
+ self.validate_get_request(aggregates.URL,
+ params={
+ "type": aggregates.MESSAGE_TYPE
+ },
+ side_effects={aggregate: [
+ aggregates.MESSAGE_ENV_AGGREGATES,
+ aggregates.MESSAGE_LEVEL_AGGREGATES]
+ },
+ expected_code=base.SUCCESSFUL_CODE,
+ expected_response=
+ aggregates.MESSAGE_AGGREGATES_RESPONSE
+ )
+
+ @patch(base.RESPONDER_BASE_AGGREGATE)
+ def test_get_constant_aggregates(self, aggregate):
+ self.validate_get_request(aggregates.URL,
+ params={
+ "type": aggregates.CONSTANT_TYPE
+ },
+ mocks={
+ aggregate: aggregates.CONSTANT_AGGREGATES
+ },
+ expected_code=base.SUCCESSFUL_CODE,
+ expected_response=
+ aggregates.CONSTANT_AGGREGATES_RESPONSE
+ )
+
+ def test_get_unknown_aggregates(self):
+ self.validate_get_request(aggregates.URL,
+ params={
+ "type": aggregates.UNKNOWN_TYPE
+ },
+ expected_code=base.BAD_REQUEST_CODE)
diff --git a/app/test/api/responders_test/resource/test_clique_constraints.py b/app/test/api/responders_test/resource/test_clique_constraints.py
new file mode 100644
index 0000000..f990b5c
--- /dev/null
+++ b/app/test/api/responders_test/resource/test_clique_constraints.py
@@ -0,0 +1,138 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+from test.api.test_base import TestBase
+from test.api.responders_test.test_data import base
+from test.api.responders_test.test_data import clique_constraints
+from unittest.mock import patch
+
+
+class TestCliqueConstraints(TestBase):
+
+ def test_get_clique_constraints_list_with_invalid_filter(self):
+ self.validate_get_request(clique_constraints.URL,
+ params={
+ "invalid": "invalid"
+ },
+ expected_code=base.BAD_REQUEST_CODE)
+
+ def test_get_clique_constraints_list_with_non_int_page(self):
+ self.validate_get_request(clique_constraints.URL,
+ params={
+ "page": base.NON_INT_PAGE
+ },
+ expected_code=base.BAD_REQUEST_CODE)
+
+ @patch(base.RESPONDER_BASE_READ)
+ def test_get_clique_constraints_list_with_int_page(self, read):
+ self.validate_get_request(clique_constraints.URL,
+ params={
+ "page": base.INT_PAGE
+ },
+ mocks={
+ read: clique_constraints.CLIQUE_CONSTRAINTS
+ },
+ expected_code=base.SUCCESSFUL_CODE,
+ expected_response=clique_constraints.
+ CLIQUE_CONSTRAINTS_RESPONSE
+ )
+
+ def test_get_clique_constraints_list_with_non_int_pagesize(self):
+ self.validate_get_request(clique_constraints.URL,
+ params={
+ "page_size": base.NON_INT_PAGESIZE
+ },
+ expected_code=base.BAD_REQUEST_CODE)
+
+ @patch(base.RESPONDER_BASE_READ)
+ def test_get_clique_constraints_list_with_int_pagesize(self, read):
+ self.validate_get_request(clique_constraints.URL,
+ params={
+ "page_size": base.INT_PAGESIZE
+ },
+ mocks={
+ read: clique_constraints.CLIQUE_CONSTRAINTS
+ },
+ expected_code=base.SUCCESSFUL_CODE,
+ expected_response=clique_constraints.
+ CLIQUE_CONSTRAINTS_RESPONSE
+ )
+
+ def test_get_clique_constraints_with_wrong_id(self):
+ self.validate_get_request(clique_constraints.URL,
+ params={
+ 'id': clique_constraints.WRONG_ID
+ },
+ expected_code=base.BAD_REQUEST_CODE)
+
+ @patch(base.RESPONDER_BASE_READ)
+ def test_get_clique_constraints_with_nonexistent_id(self, read):
+ self.validate_get_request(clique_constraints.URL,
+ params={
+ "id": clique_constraints.NONEXISTENT_ID
+ },
+ mocks={
+ read: []
+ },
+ expected_code=base.NOT_FOUND_CODE
+ )
+
+ @patch(base.RESPONDER_BASE_READ)
+ def test_get_clique_constraints_with_id(self, read):
+ self.validate_get_request(clique_constraints.URL,
+ params={
+ "id": clique_constraints.CORRECT_ID
+ },
+ mocks={
+ read: clique_constraints.
+ CLIQUE_CONSTRAINTS_WITH_SPECIFIC_ID
+ },
+ expected_code=base.SUCCESSFUL_CODE,
+ expected_response=clique_constraints.
+ CLIQUE_CONSTRAINTS_WITH_SPECIFIC_ID[0]
+ )
+
+ def test_get_clique_constraints_list_with_wrong_focal_point_type(self):
+ self.validate_get_request(clique_constraints.URL,
+ params={
+ "focal_point_type":
+ clique_constraints.WRONG_FOCAL_POINT_TYPE
+ },
+ expected_code=base.BAD_REQUEST_CODE)
+
+ @patch(base.RESPONDER_BASE_READ)
+ def test_get_clique_constraints_list_with_focal_point_type(self, read):
+ self.validate_get_request(clique_constraints.URL,
+ params={
+ "focal_point_type":
+ clique_constraints.CORRECT_FOCAL_POINT_TYPE
+ },
+ mocks={
+ read: clique_constraints.
+ CLIQUE_CONSTRAINTS_WITH_SPECIFIC_FOCAL_POINT_TYPE
+ },
+ expected_code=base.SUCCESSFUL_CODE,
+ expected_response=clique_constraints.
+ CLIQUE_CONSTRAINTS_WITH_SPECIFIC_FOCAL_POINT_TYPE_RESPONSE
+ )
+
+ @patch(base.RESPONDER_BASE_READ)
+ def test_get_clique_constraints_list_with_constraints(self, read):
+ self.validate_get_request(clique_constraints.URL,
+ params={
+ "constraint": clique_constraints.CONSTRAINT
+ },
+ mocks={
+ read: clique_constraints.
+ CLIQUE_CONSTRAINTS_WITH_SPECIFIC_CONSTRAINT
+ },
+ expected_code=base.SUCCESSFUL_CODE,
+ expected_response=clique_constraints.
+ CLIQUE_CONSTRAINTS_WITH_SPECIFIC_CONSTRAINT_RESPONSE
+ )
diff --git a/app/test/api/responders_test/resource/test_clique_types.py b/app/test/api/responders_test/resource/test_clique_types.py
new file mode 100644
index 0000000..f5e331e
--- /dev/null
+++ b/app/test/api/responders_test/resource/test_clique_types.py
@@ -0,0 +1,267 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+import json
+
+from test.api.responders_test.test_data import base
+from test.api.test_base import TestBase
+from test.api.responders_test.test_data import clique_types
+from unittest.mock import patch
+
+
+class TestCliqueTypes(TestBase):
+
+ def test_get_clique_types_list_without_env_name(self):
+ self.validate_get_request(clique_types.URL,
+ params={},
+ expected_code=base.BAD_REQUEST_CODE)
+
+ def test_get_clique_types_with_invalid_filter(self):
+ self.validate_get_request(clique_types.URL,
+ params={
+ "env_name": base.ENV_NAME,
+ "invalid": "invalid"
+ },
+ expected_code=base.BAD_REQUEST_CODE)
+
+ def test_get_clique_type_with_wrong_id(self):
+ self.validate_get_request(clique_types.URL,
+ params={
+ "env_name": base.ENV_NAME,
+ "id": clique_types.WRONG_ID
+ },
+ expected_code=base.BAD_REQUEST_CODE)
+
+ @patch(base.RESPONDER_BASE_READ)
+ def test_get_clique_type_with_id(self, read):
+ self.validate_get_request(clique_types.URL,
+ params={
+ "env_name": base.ENV_NAME,
+ "id": clique_types.CORRECT_ID
+ },
+ mocks={
+ read: clique_types.CLIQUE_TYPES_WITH_SPECIFIC_ID
+ },
+ expected_response=clique_types.
+ CLIQUE_TYPES_WITH_SPECIFIC_ID[0],
+ expected_code=base.SUCCESSFUL_CODE
+ )
+
+ def test_get_clique_types_list_with_wrong_focal_point_type(self):
+ self.validate_get_request(clique_types.URL,
+ params={
+ "env_name": base.ENV_NAME,
+ "focal_point_type": clique_types.WRONG_FOCAL_POINT_TYPE
+ },
+ expected_code=base.BAD_REQUEST_CODE)
+
+ @patch(base.RESPONDER_BASE_READ)
+ def test_get_clique_types_list_with_correct_focal_point_type(self, read):
+ self.validate_get_request(clique_types.URL,
+ params={
+ "env_name": base.ENV_NAME,
+ "focal_point_type":
+ clique_types.CORRECT_FOCAL_POINT_POINT_TYPE
+ },
+ mocks={
+ read: clique_types.
+ CLIQUE_TYPES_WITH_SPECIFIC_FOCAL_POINT_TYPE
+ },
+ expected_code=base.SUCCESSFUL_CODE,
+ expected_response=clique_types.
+ CLIQUE_TYPES_WITH_SPECIFIC_FOCAL_POINT_TYPE_RESPONSE
+ )
+
+ def test_get_clique_types_list_with_wrong_link_type(self):
+ self.validate_get_request(clique_types.URL,
+ params={
+ "env_name": base.ENV_NAME,
+ "link_type": clique_types.WRONG_LINK_TYPE
+ },
+ expected_code=base.BAD_REQUEST_CODE)
+
+ @patch(base.RESPONDER_BASE_READ)
+ def test_get_clique_types_list_with_correct_link_type(self, read):
+ self.validate_get_request(clique_types.URL,
+ params={
+ "env_name": base.ENV_NAME,
+ "link_type": base.CORRECT_LINK_TYPE
+ },
+ mocks={
+ read: clique_types.
+ CLIQUE_TYPES_WITH_SPECIFIC_LINK_TYPE
+ },
+ expected_code=base.SUCCESSFUL_CODE,
+ expected_response=clique_types.
+ CLIQUE_TYPES_WITH_SPECIFIC_LINK_TYPE_RESPONSE
+ )
+
+ def test_get_clique_types_list_with_non_int_page(self):
+ self.validate_get_request(clique_types.URL,
+ params={
+ "env_name": base.ENV_NAME,
+ "page": base.NON_INT_PAGE
+ },
+ expected_code=base.BAD_REQUEST_CODE)
+
+ @patch(base.RESPONDER_BASE_READ)
+ def test_get_clique_types_list_with_int_page(self, read):
+ self.validate_get_request(clique_types.URL,
+ params={
+ "env_name": base.ENV_NAME,
+ "page": base.INT_PAGE
+ },
+ mocks={
+ read: clique_types.CLIQUE_TYPES
+ },
+ expected_code=base.SUCCESSFUL_CODE,
+ expected_response=clique_types.CLIQUE_TYPES_RESPONSE)
+
+ def test_get_clique_types_list_with_non_int_page_size(self):
+ self.validate_get_request(clique_types.URL,
+ params={
+ "env_name": base.ENV_NAME,
+ "page_size": base.NON_INT_PAGESIZE
+ },
+ expected_code=base.BAD_REQUEST_CODE)
+
+ @patch(base.RESPONDER_BASE_READ)
+ def test_get_clique_types_list_with_int_page_size(self, read):
+ self.validate_get_request(clique_types.URL,
+ params={
+ "env_name": base.ENV_NAME,
+ "page_size": base.INT_PAGESIZE
+ },
+ mocks={
+ read: clique_types.CLIQUE_TYPES
+ },
+ expected_code=base.SUCCESSFUL_CODE,
+ expected_response=clique_types.CLIQUE_TYPES_RESPONSE)
+
+ @patch(base.RESPONDER_BASE_CHECK_ENVIRONMENT_NAME)
+ @patch(base.RESPONDER_BASE_READ)
+ def test_get_clique_types_list_with_unknown_env_name(self, read, check_env_name):
+ self.validate_get_request(clique_types.URL,
+ params={
+ "env_name": base.UNKNOWN_ENV
+ },
+ mocks={
+ read: [],
+ check_env_name: False
+ },
+ expected_code=base.BAD_REQUEST_CODE)
+
+ @patch(base.RESPONDER_BASE_CHECK_ENVIRONMENT_NAME)
+ @patch(base.RESPONDER_BASE_READ)
+ def test_get_clique_types_list_with_env_name_and_nonexistent_link_type(self, read, check_env_name):
+ self.validate_get_request(clique_types.URL,
+ params={
+ "env_name": base.ENV_NAME,
+ "link_type": clique_types.NONEXISTENT_LINK_TYPE
+ },
+ mocks={
+ read: [],
+ check_env_name: True
+ },
+ expected_code=base.NOT_FOUND_CODE)
+
+ @patch(base.RESPONDER_BASE_CHECK_ENVIRONMENT_NAME)
+ @patch(base.RESPONDER_BASE_READ)
+ def test_get_clique_type_with_unknown_env_name_and_id(self, read, check_env_name):
+ self.validate_get_request(clique_types.URL,
+ params={
+ "env_name": base.UNKNOWN_ENV,
+ "id": clique_types.NONEXISTENT_ID
+ },
+ mocks={
+ read: [],
+ check_env_name: False
+ },
+ expected_code=base.BAD_REQUEST_CODE)
+
+ @patch(base.RESPONDER_BASE_CHECK_ENVIRONMENT_NAME)
+ @patch(base.RESPONDER_BASE_READ)
+ def test_get_clique_type_with_env_name_and_nonexistent_id(self, read, check_env_name):
+ self.validate_get_request(clique_types.URL,
+ params={
+ "env_name": base.ENV_NAME,
+ "id": clique_types.NONEXISTENT_ID
+ },
+ mocks={
+ read: [],
+ check_env_name: True
+ },
+ expected_code=base.NOT_FOUND_CODE)
+
+ def test_post_clique_type_with_non_dict_clique_type(self):
+ self.validate_post_request(clique_types.URL,
+ body=json.dumps(clique_types.NON_DICT_CLIQUE_TYPE),
+ expected_code=base.BAD_REQUEST_CODE)
+
+ def test_post_clique_type_without_env_name(self):
+ self.validate_post_request(clique_types.URL,
+ body=json.dumps(clique_types.CLIQUE_TYPE_WITHOUT_ENVIRONMENT),
+ expected_code=base.BAD_REQUEST_CODE)
+
+ @patch(base.RESPONDER_BASE_CHECK_ENVIRONMENT_NAME)
+ def test_post_clique_type_with_unknown_env_name(self, check_environment_name):
+ self.validate_post_request(clique_types.URL,
+ mocks={
+ check_environment_name: False
+ },
+ body=json.dumps(clique_types.
+ CLIQUE_TYPE_WITH_UNKNOWN_ENVIRONMENT),
+ expected_code=base.BAD_REQUEST_CODE)
+
+ def test_post_clique_type_without_focal_point_type(self):
+ self.validate_post_request(clique_types.URL,
+ body=json.dumps(clique_types.
+ CLIQUE_TYPE_WITHOUT_FOCAL_POINT_TYPE),
+ expected_code=base.BAD_REQUEST_CODE)
+
+ def test_post_clique_type_with_wrong_focal_point_type(self):
+ self.validate_post_request(clique_types.URL,
+ body=json.dumps(clique_types.
+ CLIQUE_TYPE_WITH_WRONG_FOCAL_POINT_TYPE),
+ expected_code=base.BAD_REQUEST_CODE)
+
+ def test_post_clique_type_without_link_types(self):
+ self.validate_post_request(clique_types.URL,
+ body=json.dumps(
+ clique_types.CLIQUE_TYPE_WITHOUT_LINK_TYPES
+ ),
+ expected_code=base.BAD_REQUEST_CODE)
+
+ def test_post_clique_type_with_non_list_link_types(self):
+ self.validate_post_request(clique_types.URL,
+ body=json.dumps(clique_types.
+ CLIQUE_TYPE_WITH_NON_LIST_LINK_TYPES),
+ expected_code=base.BAD_REQUEST_CODE)
+
+ def test_post_clique_type_with_wrong_link_type(self):
+ self.validate_post_request(clique_types.URL,
+ body=json.dumps(clique_types.
+ CLIQUE_TYPE_WITH_WRONG_LINK_TYPE),
+ expected_code=base.BAD_REQUEST_CODE)
+
+ def test_post_clique_type_without_name(self):
+ self.validate_post_request(clique_types.URL,
+ body=json.dumps(clique_types.CLIQUE_TYPE_WITHOUT_NAME),
+ expected_code=base.BAD_REQUEST_CODE)
+
+ @patch(base.RESPONDER_BASE_CHECK_ENVIRONMENT_NAME)
+ @patch(base.RESPONDER_BASE_WRITE)
+ def test_post_clique_type(self, write, check_environment_name):
+ self.validate_post_request(clique_types.URL,
+ body=json.dumps(clique_types.CLIQUE_TYPE),
+ mocks={
+ write: None,
+ check_environment_name: True
+ },
+ expected_code=base.CREATED_CODE)
diff --git a/app/test/api/responders_test/resource/test_cliques.py b/app/test/api/responders_test/resource/test_cliques.py
new file mode 100644
index 0000000..de3576b
--- /dev/null
+++ b/app/test/api/responders_test/resource/test_cliques.py
@@ -0,0 +1,240 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+from test.api.test_base import TestBase
+from test.api.responders_test.test_data import base
+from test.api.responders_test.test_data import cliques
+from unittest.mock import patch
+
+
+class TestCliques(TestBase):
+
+ def test_get_cliques_list_without_env_name(self):
+ self.validate_get_request(cliques.URL,
+ params={},
+ expected_code=base.BAD_REQUEST_CODE)
+
+ def test_get_cliques_list_with_invalid_filter(self):
+ self.validate_get_request(cliques.URL,
+ params={
+ "env_name": base.ENV_NAME,
+ "invalid": "invalid"
+ },
+ expected_code=base.BAD_REQUEST_CODE)
+
+ def test_get_cliques_list_with_non_int_page(self):
+ self.validate_get_request(cliques.URL,
+ params={
+ "env_name": base.ENV_NAME,
+ "page": base.NON_INT_PAGE
+ },
+ expected_code=base.BAD_REQUEST_CODE)
+
+ @patch(base.RESPONDER_BASE_READ)
+ def test_get_cliques_list_with_int_page(self, read):
+ self.validate_get_request(cliques.URL,
+ params={
+ "env_name": base.ENV_NAME,
+ "page": base.INT_PAGE
+ },
+ mocks={
+ read: cliques.CLIQUES
+ },
+ expected_code=base.SUCCESSFUL_CODE,
+ expected_response=cliques.CLIQUES_RESPONSE)
+
+ def test_get_cliques_list_with_non_int_pagesize(self):
+ self.validate_get_request(cliques.URL,
+ params={
+ "env_name": base.ENV_NAME,
+ "page_size": base.NON_INT_PAGESIZE
+ },
+ expected_code=base.BAD_REQUEST_CODE)
+
+ @patch(base.RESPONDER_BASE_READ)
+ def test_get_cliques_list_with_int_pagesize(self, read):
+ self.validate_get_request(cliques.URL,
+ params={
+ "env_name": base.ENV_NAME,
+ "page_size": base.INT_PAGESIZE
+ },
+ mocks={
+ read: cliques.CLIQUES
+ },
+ expected_code=base.SUCCESSFUL_CODE,
+ expected_response=cliques.CLIQUES_RESPONSE)
+
+ def test_get_clique_with_wrong_clique_id(self):
+ self.validate_get_request(cliques.URL,
+ params={
+ 'env_name': base.ENV_NAME,
+ 'id': cliques.WRONG_CLIQUE_ID
+ },
+ expected_code=base.BAD_REQUEST_CODE)
+
+ @patch(base.RESPONDER_BASE_READ)
+ def test_get_clique_with_clique_id(self, read):
+ self.validate_get_request(cliques.URL,
+ params={
+ "env_name": base.ENV_NAME,
+ "id": cliques.CORRECT_CLIQUE_ID
+ },
+ mocks={
+ read: cliques.CLIQUES_WITH_SPECIFIC_ID
+ },
+ expected_code=base.SUCCESSFUL_CODE,
+ expected_response=cliques.CLIQUES_WITH_SPECIFIC_ID[0]
+ )
+
+ def test_get_cliques_list_with_wrong_focal_point(self):
+ self.validate_get_request(cliques.URL,
+ params={
+ "env_name": base.ENV_NAME,
+ "focal_point": cliques.WRONG_FOCAL_POINT
+ },
+ expected_code=base.BAD_REQUEST_CODE)
+
+ @patch(base.RESPONDER_BASE_READ)
+ def test_get_cliques_list_with_focal_point(self, read):
+ self.validate_get_request(cliques.URL,
+ params={
+ "env_name": base.ENV_NAME,
+ "focal_point": cliques.CORRECT_FOCAL_POINT
+ },
+ mocks={
+ read: cliques.CLIQUES_WITH_SPECIFIC_FOCAL_POINT
+ },
+ expected_code=base.SUCCESSFUL_CODE,
+ expected_response=cliques.
+ CLIQUES_WITH_SPECIFIC_FOCAL_POINT_RESPONSE
+ )
+
+ def test_get_cliques_list_with_wrong_focal_point_type(self):
+ self.validate_get_request(cliques.URL,
+ params={
+ "env_name": base.ENV_NAME,
+ "focal_point_type": cliques.WRONG_FOCAL_POINT_TYPE
+ },
+ expected_code=base.BAD_REQUEST_CODE)
+
+ @patch(base.RESPONDER_BASE_READ)
+ def test_get_cliques_list_with_focal_point_type(self, read):
+ self.validate_get_request(cliques.URL,
+ params={
+ "env_name": base.ENV_NAME,
+ "focal_point_type": cliques.CORRECT_FOCAL_POINT_TYPE
+ },
+ mocks={
+ read: cliques.CLIQUES_WITH_SPECIFIC_FOCAL_POINT_TYPE
+ },
+ expected_code=base.SUCCESSFUL_CODE,
+ expected_response=cliques.
+ CLIQUES_WITH_SPECIFIC_FOCAL_POINT_TYPE_RESPONSE
+ )
+
+ def test_get_cliques_list_with_wrong_link_type(self):
+ self.validate_get_request(cliques.URL,
+ params={
+ "env_name": base.ENV_NAME,
+ "link_type": base.WRONG_LINK_TYPE
+ },
+ expected_code=base.BAD_REQUEST_CODE)
+
+ @patch(base.RESPONDER_BASE_READ)
+ def test_get_cliques_list_with_link_type(self, read):
+ self.validate_get_request(cliques.URL,
+ params={
+ "env_name": base.ENV_NAME,
+ "link_type": cliques.CORRECT_LINK_TYPE
+ },
+ mocks={
+ read: cliques.CLIQUES_WITH_SPECIFIC_LINK_TYPE
+ },
+ expected_code=base.SUCCESSFUL_CODE,
+ expected_response=cliques.
+ CLIQUES_WITH_SPECIFIC_LINK_TYPE_RESPONSE
+ )
+
+ def test_get_cliques_list_with_wrong_link_id(self):
+ self.validate_get_request(cliques.URL,
+ {
+ "env_name": base.ENV_NAME,
+ "link_id": cliques.WRONG_LINK_ID
+ },
+ expected_code=base.BAD_REQUEST_CODE)
+
+ @patch(base.RESPONDER_BASE_READ)
+ def test_get_clique_ids_with_correct_link_id(self, read):
+ self.validate_get_request(cliques.URL,
+ params={
+ "env_name": base.ENV_NAME,
+ "link_id": cliques.CORRECT_LINK_ID
+ },
+ mocks={
+ read: cliques.CLIQUES_WITH_SPECIFIC_LINK_ID
+ },
+ expected_code=base.SUCCESSFUL_CODE,
+ expected_response=cliques.
+ CLIQUES_WITH_SPECIFIC_LINK_ID_RESPONSE
+ )
+
+ @patch(base.RESPONDER_BASE_CHECK_ENVIRONMENT_NAME)
+ @patch(base.RESPONDER_BASE_READ)
+ def test_get_cliques_list_with_env_name_and_nonexistent_link_id(self, read, check_env_name):
+ self.validate_get_request(cliques.URL,
+ params={
+ "env_name": base.ENV_NAME,
+ "link_id": cliques.NONEXISTENT_LINK_ID
+ },
+ mocks={
+ read: [],
+ check_env_name: True
+ },
+ expected_code=base.NOT_FOUND_CODE)
+
+ @patch(base.RESPONDER_BASE_CHECK_ENVIRONMENT_NAME)
+ @patch(base.RESPONDER_BASE_READ)
+ def test_get_cliques_list_with_unknown_env_name(self, read, check_env_name):
+ self.validate_get_request(cliques.URL,
+ params={
+ "env_name": base.UNKNOWN_ENV
+ },
+ mocks={
+ read: [],
+ check_env_name: False
+ },
+ expected_code=base.BAD_REQUEST_CODE)
+
+ @patch(base.RESPONDER_BASE_CHECK_ENVIRONMENT_NAME)
+ @patch(base.RESPONDER_BASE_READ)
+ def test_get_clique_with_env_name_and_nonexistent_clique_id(self, read, check_env_name):
+ self.validate_get_request(cliques.URL,
+ params={
+ "env_name": base.ENV_NAME,
+ "id": cliques.NONEXISTENT_CLIQUE_ID
+ },
+ mocks={
+ read: [],
+ check_env_name: True
+ },
+ expected_code=base.NOT_FOUND_CODE)
+
+ @patch(base.RESPONDER_BASE_CHECK_ENVIRONMENT_NAME)
+ @patch(base.RESPONDER_BASE_READ)
+ def test_get_clique_with_unknown_env_name_and_clique_id(self, read, check_env_name):
+ self.validate_get_request(cliques.URL,
+ params={
+ "env_name": base.UNKNOWN_ENV,
+ "id": cliques.NONEXISTENT_CLIQUE_ID
+ },
+ mocks={
+ read: [],
+ check_env_name: False
+ },
+ expected_code=base.BAD_REQUEST_CODE)
diff --git a/app/test/api/responders_test/resource/test_constants.py b/app/test/api/responders_test/resource/test_constants.py
new file mode 100644
index 0000000..0d92ebe
--- /dev/null
+++ b/app/test/api/responders_test/resource/test_constants.py
@@ -0,0 +1,53 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+from test.api.test_base import TestBase
+from test.api.responders_test.test_data import base
+from test.api.responders_test.test_data import constants
+from unittest.mock import patch
+
+
+class TestConstants(TestBase):
+
+ def test_get_constant_without_name(self):
+ self.validate_get_request(constants.URL,
+ params={},
+ expected_code=base.BAD_REQUEST_CODE)
+
+ def test_get_constant_with_unknown_filter(self):
+ self.validate_get_request(constants.URL,
+ params={
+ "unknown": "unknown"
+ },
+ expected_code=base.BAD_REQUEST_CODE)
+
+ @patch(base.RESPONDER_BASE_READ)
+ def test_get_constant_with_unknown_name(self, read):
+ self.validate_get_request(constants.URL,
+ params={
+ "name": constants.UNKNOWN_NAME
+ },
+ mocks={
+ read: []
+ },
+ expected_code=base.NOT_FOUND_CODE)
+
+ @patch(base.RESPONDER_BASE_READ)
+ def test_get_constant(self, read):
+ self.validate_get_request(constants.URL,
+ params={
+ "name": constants.NAME
+ },
+ mocks={
+ read: constants.CONSTANTS_WITH_SPECIFIC_NAME
+ },
+ expected_code=base.SUCCESSFUL_CODE,
+ expected_response=constants.
+ CONSTANTS_WITH_SPECIFIC_NAME[0]
+ )
diff --git a/app/test/api/responders_test/resource/test_environment_configs.py b/app/test/api/responders_test/resource/test_environment_configs.py
new file mode 100644
index 0000000..7002ed7
--- /dev/null
+++ b/app/test/api/responders_test/resource/test_environment_configs.py
@@ -0,0 +1,420 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+import json
+
+from test.api.responders_test.test_data import base
+from test.api.test_base import TestBase
+from test.api.responders_test.test_data import environment_configs
+from utils.constants import EnvironmentFeatures
+from utils.inventory_mgr import InventoryMgr
+from unittest.mock import patch
+
+
+class TestEnvironmentConfigs(TestBase):
+
+ @patch(base.RESPONDER_BASE_READ)
+ def test_get_environment_configs_list(self, read):
+ self.validate_get_request(environment_configs.URL,
+ params={},
+ mocks={
+ read: environment_configs.ENV_CONFIGS
+ },
+ expected_code=base.SUCCESSFUL_CODE,
+ expected_response=environment_configs.
+ ENV_CONFIGS_RESPONSE
+ )
+
+ def test_get_environment_configs_list_with_invalid_filters(self):
+ self.validate_get_request(environment_configs.URL,
+ params={
+ "unknown": "unknown"
+ },
+ expected_code=base.BAD_REQUEST_CODE)
+
+ @patch(base.RESPONDER_BASE_READ)
+ def test_get_environment_configs_list_with_name(self, read):
+ self.validate_get_request(environment_configs.URL,
+ params={
+ "name": environment_configs.NAME
+ },
+ mocks={
+ read: environment_configs.
+ ENV_CONFIGS_WITH_SPECIFIC_NAME
+ },
+ expected_code=base.SUCCESSFUL_CODE,
+ expected_response=environment_configs.
+ ENV_CONFIGS_WITH_SPECIFIC_NAME[0]
+ )
+
+ @patch(base.RESPONDER_BASE_READ)
+ def test_get_environment_configs_list_with_unknown_name(self, read):
+ self.validate_get_request(environment_configs.URL,
+ params={
+ "name": environment_configs.UNKNOWN_NAME
+ },
+ mocks={
+ read: []
+ },
+ expected_code=base.NOT_FOUND_CODE)
+
+ def test_get_environment_configs_list_with_wrong_distribution(self):
+ self.validate_get_request(environment_configs.URL,
+ params={
+ "distribution":
+ environment_configs.WRONG_DISTRIBUTION
+ },
+ expected_code=base.BAD_REQUEST_CODE)
+
+ @patch(base.RESPONDER_BASE_READ)
+ def test_get_environment_configs_list_with_distribution(self, read):
+ self.validate_get_request(environment_configs.URL,
+ params={
+ "distribution":
+ environment_configs.CORRECT_DISTRIBUTION
+ },
+ mocks={
+ read: environment_configs.
+ ENV_CONFIGS_WITH_SPECIFIC_DISTRIBUTION
+ },
+ expected_code=base.SUCCESSFUL_CODE,
+ expected_response=environment_configs.
+ ENV_CONFIGS_WITH_SPECIFIC_DISTRIBUTION_RESPONSE)
+
+ def test_get_environment_configs_list_with_wrong_mechanism_driver(self):
+ self.validate_get_request(environment_configs.URL,
+ params={
+ "mechanism_drivers":
+ environment_configs.WRONG_MECHANISM_DRIVER
+ },
+ expected_code=base.BAD_REQUEST_CODE)
+
+ @patch(base.RESPONDER_BASE_READ)
+ def test_get_environment_configs_list_with_mechanism_driver(self, read):
+ self.validate_get_request(environment_configs.URL,
+ params={
+ "mechanism_drivers":
+ environment_configs.CORRECT_MECHANISM_DRIVER
+ },
+ mocks={
+ read: environment_configs.
+ ENV_CONFIGS_WITH_SPECIFIC_MECHANISM_DRIVER
+ },
+ expected_code=base.SUCCESSFUL_CODE,
+ expected_response=environment_configs.
+ ENV_CONFIGS_WITH_SPECIFIC_MECHANISM_DRIVER_RESPONSE
+ )
+
+ def test_get_environment_configs_list_with_wrong_type_driver(self):
+ self.validate_get_request(environment_configs.URL,
+ params={
+ "type_drivers":
+ environment_configs.WRONG_TYPE_DRIVER
+ },
+ expected_code=base.BAD_REQUEST_CODE)
+
+ @patch(base.RESPONDER_BASE_READ)
+ def test_get_environment_configs_list_with_type_driver(self, read):
+ self.validate_get_request(environment_configs.URL,
+ params={
+ "type_drivers":
+ environment_configs.CORRECT_TYPE_DRIVER
+ },
+ mocks={
+ read: environment_configs.
+ ENV_CONFIGS_WITH_SPECIFIC_TYPE_DRIVER
+ },
+ expected_code=base.SUCCESSFUL_CODE,
+ expected_response=environment_configs.
+ ENV_CONFIGS_WITH_SPECIFIC_TYPE_DRIVER_RESPONSE
+ )
+
+ @patch(base.RESPONDER_BASE_READ)
+ def test_get_environment_configs_list_with_user(self, read):
+ self.validate_get_request(environment_configs.URL,
+ params={
+ "user": environment_configs.USER
+ },
+ mocks={
+ read: environment_configs.
+ ENV_CONFIGS_WITH_SPECIFIC_USER
+ },
+ expected_code=base.SUCCESSFUL_CODE,
+ expected_response=environment_configs.
+ ENV_CONFIGS_WITH_SPECIFIC_USER_RESPONSE
+ )
+
+ def test_get_environment_configs_list_with_non_bool_listen(self):
+ self.validate_get_request(environment_configs.URL,
+ params={
+ "listen": environment_configs.NON_BOOL_LISTEN
+ },
+ expected_code=base.BAD_REQUEST_CODE)
+
+ @patch(base.RESPONDER_BASE_READ)
+ def test_get_environment_configs_list_with_bool_listen(self, read):
+ self.validate_get_request(environment_configs.URL,
+ params={
+ "listen": environment_configs.BOOL_LISTEN
+ },
+ mocks={
+ read: environment_configs.
+ ENV_CONFIGS_WITH_SPECIFIC_LISTEN
+ },
+ expected_code=base.SUCCESSFUL_CODE,
+ expected_response=environment_configs.
+ ENV_CONFIGS_WITH_SPECIFIC_LISTEN_RESPONSE
+ )
+
+ def test_get_environment_configs_list_with_non_bool_scanned(self):
+ self.validate_get_request(environment_configs.URL,
+ params={
+ "scanned": environment_configs.
+ NON_BOOL_SCANNED
+ },
+ expected_code=base.BAD_REQUEST_CODE)
+
+ @patch(base.RESPONDER_BASE_READ)
+ def test_get_environment_configs_list_with_bool_scanned(self, read):
+ self.validate_get_request(environment_configs.URL,
+ params={
+ "scanned": environment_configs.BOOL_SCANNED
+ },
+ mocks={
+ read: environment_configs.
+ ENV_CONFIGS_WITH_SPECIFIC_SCANNED
+ },
+ expected_code=base.SUCCESSFUL_CODE,
+ expected_response=environment_configs.
+ ENV_CONFIGS_WITH_SPECIFIC_SCANNED_RESPONSE
+ )
+
+ def test_get_environment_configs_list_with_non_bool_monitoring_setup_done(self):
+ self.validate_get_request(environment_configs.URL,
+ params={
+ "listen": environment_configs.
+ NON_BOOL_MONITORING_SETUP_DONE
+ },
+ expected_code=base.BAD_REQUEST_CODE)
+
+ @patch(base.RESPONDER_BASE_READ)
+ def test_get_environment_configs_list_with_bool_monitoring_setup_done(self, read):
+ self.validate_get_request(environment_configs.URL,
+ params={
+ "scanned": environment_configs.
+ BOOL_MONITORING_SETUP_DONE
+ },
+ mocks={
+ read: environment_configs.
+ ENV_CONFIGS_WITH_SPECIFIC_MONITORING_SETUP_DONE
+ },
+ expected_code=base.SUCCESSFUL_CODE,
+ expected_response=environment_configs.
+ ENV_CONFIGS_WITH_SPECIFIC_MONITORING_SETUP_DONE_RESPONSE
+ )
+
+ def test_get_environment_configs_list_with_non_int_page(self):
+ self.validate_get_request(environment_configs.URL,
+ params={
+ "page": base.NON_INT_PAGE
+ },
+ expected_code=base.BAD_REQUEST_CODE)
+
+ @patch(base.RESPONDER_BASE_READ)
+ def test_get_environment_configs_list_with_int_page(self, read):
+ self.validate_get_request(environment_configs.URL,
+ params={
+ "page": base.INT_PAGE
+ },
+ mocks={
+ read: environment_configs.ENV_CONFIGS
+ },
+ expected_code=base.SUCCESSFUL_CODE,
+ expected_response=environment_configs.
+ ENV_CONFIGS_RESPONSE
+ )
+
+ def test_get_environment_configs_list_with_non_int_page_size(self):
+ self.validate_get_request(environment_configs.URL,
+ params={
+ "page_size": base.NON_INT_PAGESIZE
+ },
+ expected_code=base.BAD_REQUEST_CODE)
+
+ @patch(base.RESPONDER_BASE_READ)
+ def test_get_environment_configs_list_with_int_page_size(self, read):
+ self.validate_get_request(environment_configs.URL,
+ params={
+ "page_size": base.INT_PAGESIZE
+ },
+ mocks={
+ read: environment_configs.ENV_CONFIGS
+ },
+ expected_code=base.SUCCESSFUL_CODE,
+ expected_response=environment_configs.
+ ENV_CONFIGS_RESPONSE
+ )
+
+ def test_post_environment_config_without_app_path(self):
+ test_data = self.get_updated_data(environment_configs.ENV_CONFIG,
+ deleted_keys=["app_path"])
+ self.validate_post_request(environment_configs.URL,
+ body=json.dumps(test_data),
+ expected_code=base.BAD_REQUEST_CODE)
+
+ def test_post_environment_config_without_configuration(self):
+ test_data = self.get_updated_data(environment_configs.ENV_CONFIG,
+ deleted_keys=["configuration"])
+ self.validate_post_request(environment_configs.URL,
+ body=json.dumps(test_data),
+ expected_code=base.BAD_REQUEST_CODE)
+
+ def test_post_environment_config_without_distribution(self):
+ test_data = self.get_updated_data(environment_configs.ENV_CONFIG,
+ deleted_keys=["distribution"])
+ self.validate_post_request(environment_configs.URL,
+ body=json.dumps(test_data),
+ expected_code=base.BAD_REQUEST_CODE)
+
+ def test_post_environment_config_with_wrong_distribution(self):
+ test_data = self.get_updated_data(environment_configs.ENV_CONFIG,
+ updates={"distribution": environment_configs.WRONG_DISTRIBUTION})
+ self.validate_post_request(environment_configs.URL,
+ body=json.dumps(test_data),
+ expected_code=base.BAD_REQUEST_CODE)
+
+ def test_post_environment_config_without_listen(self):
+ test_data = self.get_updated_data(environment_configs.ENV_CONFIG,
+ deleted_keys=["listen"])
+ self.validate_post_request(environment_configs.URL,
+ body=json.dumps(test_data),
+ expected_code=base.BAD_REQUEST_CODE)
+
+ def test_post_environment_config_with_wrong_listen(self):
+ test_data = self.get_updated_data(environment_configs.ENV_CONFIG,
+ updates={"listen": environment_configs.NON_BOOL_LISTEN})
+ self.validate_post_request(environment_configs.URL,
+ body=json.dumps(test_data),
+ expected_code=base.BAD_REQUEST_CODE)
+
+ def test_post_environment_config_without_mechanism_driver(self):
+ test_data = self.get_updated_data(environment_configs.ENV_CONFIG,
+ deleted_keys=["mechanism_drivers"])
+ self.validate_post_request(environment_configs.URL,
+ body=json.dumps(test_data),
+ expected_code=base.BAD_REQUEST_CODE)
+
+ def test_post_environment_config_with_wrong_mechanism_driver(self):
+ test_data = self.get_updated_data(environment_configs.ENV_CONFIG,
+ updates={
+ "mechanism_drivers":
+ [environment_configs.WRONG_MECHANISM_DRIVER]
+ })
+ self.validate_post_request(environment_configs.URL,
+ body=json.dumps(test_data),
+ expected_code=base.BAD_REQUEST_CODE)
+
+ def test_post_environment_config_without_name(self):
+ test_data = self.get_updated_data(environment_configs.ENV_CONFIG,
+ deleted_keys=["name"])
+ self.validate_post_request(environment_configs.URL,
+ body=json.dumps(test_data),
+ expected_code=base.BAD_REQUEST_CODE)
+
+ def test_post_environment_config_without_operational(self):
+ test_data = self.get_updated_data(environment_configs.ENV_CONFIG,
+ deleted_keys=["operational"])
+ self.validate_post_request(environment_configs.URL,
+ body=json.dumps(test_data),
+ expected_code=base.BAD_REQUEST_CODE)
+
+ def test_post_environment_config_with_wrong_scanned(self):
+ test_data = self.get_updated_data(environment_configs.ENV_CONFIG,
+ updates={
+ "scanned": environment_configs.NON_BOOL_SCANNED
+ })
+ self.validate_post_request(environment_configs.URL,
+ body=json.dumps(test_data),
+ expected_code=base.BAD_REQUEST_CODE)
+
+ def test_post_environment_config_with_wrong_last_scanned(self):
+ test_data = self.get_updated_data(environment_configs.ENV_CONFIG,
+ updates={
+ "last_scanned": base.WRONG_FORMAT_TIME
+ })
+ self.validate_post_request(environment_configs.URL,
+ body=json.dumps(test_data),
+ expected_code=base.BAD_REQUEST_CODE)
+
+ def test_post_environment_config_without_type(self):
+ test_data = self.get_updated_data(environment_configs.ENV_CONFIG,
+ deleted_keys=["type"])
+ self.validate_post_request(environment_configs.URL,
+ body=json.dumps(test_data),
+ expected_code=base.BAD_REQUEST_CODE)
+
+ def test_post_environment_config_without_type_drivers(self):
+ test_data = self.get_updated_data(environment_configs.ENV_CONFIG,
+ deleted_keys=["type_drivers"])
+ self.validate_post_request(environment_configs.URL,
+ body=json.dumps(test_data),
+ expected_code=base.BAD_REQUEST_CODE)
+
+ def test_post_environment_config_with_wrong_type_drivers(self):
+ test_data = self.get_updated_data(environment_configs.ENV_CONFIG,
+ updates={
+ "type_drivers": [environment_configs.WRONG_TYPE_DRIVER]
+ })
+ self.validate_post_request(environment_configs.URL,
+ body=json.dumps(test_data),
+ expected_code=base.BAD_REQUEST_CODE)
+
+ def mock_validate_env_config_with_supported_envs(self, scanning,
+ monitoring, listening):
+ InventoryMgr.is_feature_supported_in_env = lambda self, matches, feature: {
+ EnvironmentFeatures.SCANNING: scanning,
+ EnvironmentFeatures.MONITORING: monitoring,
+ EnvironmentFeatures.LISTENING: listening
+ }[feature]
+
+ @patch(base.RESPONDER_BASE_WRITE)
+ def test_post_environment_config(self, write):
+ self.mock_validate_env_config_with_supported_envs(True, True, True)
+ self.validate_post_request(environment_configs.URL,
+ mocks={
+ write: None
+ },
+ body=json.dumps(environment_configs.ENV_CONFIG),
+ expected_code=base.CREATED_CODE)
+
+ def test_post_unsupported_environment_config(self):
+ test_cases = [
+ {
+ "scanning": False,
+ "monitoring": True,
+ "listening": True
+ },
+ {
+ "scanning": True,
+ "monitoring": False,
+ "listening": True
+ },
+ {
+ "scanning": True,
+ "monitoring": True,
+ "listening": False
+ }
+ ]
+ for test_case in test_cases:
+ self.mock_validate_env_config_with_supported_envs(test_case["scanning"],
+ test_case["monitoring"],
+ test_case["listening"])
+ self.validate_post_request(environment_configs.URL,
+ body=json.dumps(environment_configs.ENV_CONFIG),
+ expected_code=base.BAD_REQUEST_CODE)
diff --git a/app/test/api/responders_test/resource/test_inventory.py b/app/test/api/responders_test/resource/test_inventory.py
new file mode 100644
index 0000000..0ef9089
--- /dev/null
+++ b/app/test/api/responders_test/resource/test_inventory.py
@@ -0,0 +1,162 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+from test.api.responders_test.test_data import base
+from test.api.responders_test.test_data import inventory
+from test.api.test_base import TestBase
+from unittest.mock import patch
+
+
+class TestInventory(TestBase):
+
+ def test_get_objects_list_without_env_name(self):
+ self.validate_get_request(inventory.URL,
+ expected_code=base.BAD_REQUEST_CODE)
+
+ def test_get_objects_list_with_invalid_filter(self):
+ self.validate_get_request(inventory.URL,
+ params={
+ "invalid": "invalid"
+ },
+ expected_code=base.BAD_REQUEST_CODE)
+
+ def test_get_objects_list_with_non_boolean_subtree(self):
+ self.validate_get_request(inventory.URL,
+ params={
+ 'env_name': base.ENV_NAME,
+ 'sub_tree': base.NON_BOOL
+ },
+ expected_code=base.BAD_REQUEST_CODE)
+
+ @patch(base.RESPONDER_BASE_READ)
+ def test_get_objects_list_with_boolean_subtree(self, read):
+ self.validate_get_request(inventory.URL,
+ params={
+ 'env_name': base.ENV_NAME,
+ 'sub_tree': base.BOOL
+ },
+ mocks={
+ read: inventory.OBJECTS_LIST
+ },
+ expected_code=base.SUCCESSFUL_CODE,
+ expected_response=inventory.OBJECT_IDS_RESPONSE
+ )
+
+ def test_get_objects_list_with_non_int_page(self):
+ self.validate_get_request(inventory.URL,
+ params={
+ 'env_name': base.ENV_NAME,
+ 'page': base.NON_INT_PAGE
+ },
+ expected_code=base.BAD_REQUEST_CODE)
+
+ @patch(base.RESPONDER_BASE_READ)
+ def test_get_objects_list_with_int_page(self, read):
+ self.validate_get_request(inventory.URL,
+ params={
+ 'env_name': base.ENV_NAME,
+ 'page': base.INT_PAGE
+ },
+ mocks={
+ read: inventory.OBJECTS_LIST
+ },
+ expected_code=base.SUCCESSFUL_CODE,
+ expected_response=inventory.OBJECT_IDS_RESPONSE
+ )
+
+ def test_get_objects_list_with_non_int_pagesize(self):
+ self.validate_get_request(inventory.URL,
+ params={
+ 'env_name': base.ENV_NAME,
+ 'page_size': base.NON_INT_PAGESIZE
+ },
+ expected_code=base.BAD_REQUEST_CODE)
+
+ @patch(base.RESPONDER_BASE_READ)
+ def test_get_objects_list_with_int_pagesize(self, read):
+ self.validate_get_request(inventory.URL,
+ params={
+ 'env_name': base.ENV_NAME,
+ 'page_size': base.INT_PAGESIZE
+ },
+ mocks={
+ read: inventory.OBJECTS_LIST
+ },
+ expected_code=base.SUCCESSFUL_CODE,
+ expected_response=inventory.OBJECT_IDS_RESPONSE
+ )
+
+ @patch(base.RESPONDER_BASE_READ)
+ @patch(base.RESPONDER_BASE_CHECK_ENVIRONMENT_NAME)
+ def test_get_nonexistent_objects_list_with_env_name(self, check_env_name, read):
+ self.validate_get_request(inventory.URL,
+ params={
+ 'env_name': base.ENV_NAME,
+ },
+ mocks={
+ read: [],
+ check_env_name: True
+ },
+ expected_code=base.NOT_FOUND_CODE,
+ )
+
+ @patch(base.RESPONDER_BASE_READ)
+ @patch(base.RESPONDER_BASE_CHECK_ENVIRONMENT_NAME)
+ def test_get_objects_list_with_unkown_env_name(self, check_env_name, read):
+ self.validate_get_request(inventory.URL,
+ params={
+ 'env_name': base.ENV_NAME,
+ },
+ mocks={
+ read: [],
+ check_env_name: False
+ },
+ expected_code=base.BAD_REQUEST_CODE)
+
+ @patch(base.RESPONDER_BASE_READ)
+ def test_get_object_with_env_name_and_id(self, read):
+ self.validate_get_request(inventory.URL,
+ params={
+ 'env_name': base.ENV_NAME,
+ 'id': inventory.ID
+ },
+ mocks={
+ read: inventory.OBJECTS
+ },
+ expected_code=base.SUCCESSFUL_CODE,
+ expected_response=inventory.OBJECTS[0]
+ )
+
+ @patch(base.RESPONDER_BASE_READ)
+ @patch(base.RESPONDER_BASE_CHECK_ENVIRONMENT_NAME)
+ def test_get_nonexistent_object_with_env_name_and_id(self, check_env_name, read):
+ self.validate_get_request(inventory.URL,
+ params={
+ 'env_name': base.ENV_NAME,
+ 'id': inventory.NONEXISTENT_ID
+ },
+ mocks={
+ read: [],
+ check_env_name: True
+ },
+ expected_code=base.NOT_FOUND_CODE)
+
+ @patch(base.RESPONDER_BASE_READ)
+ @patch(base.RESPONDER_BASE_CHECK_ENVIRONMENT_NAME)
+ def test_get_object_with_unkown_env_name_and_id(self, check_env_name, read):
+ self.validate_get_request(inventory.URL,
+ params={
+ 'env_name': base.UNKNOWN_ENV,
+ 'id': inventory.ID
+ },
+ mocks={
+ read: [],
+ check_env_name: False
+ },
+ expected_code=base.BAD_REQUEST_CODE)
diff --git a/app/test/api/responders_test/resource/test_links.py b/app/test/api/responders_test/resource/test_links.py
new file mode 100644
index 0000000..b312aa1
--- /dev/null
+++ b/app/test/api/responders_test/resource/test_links.py
@@ -0,0 +1,193 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+from test.api.responders_test.test_data import base
+from test.api.responders_test.test_data import links
+from test.api.test_base import TestBase
+from unittest.mock import patch
+
+
+class TestLinks(TestBase):
+
+ def test_get_links_list_without_env_name(self):
+ self.validate_get_request(links.URL,
+ params={},
+ expected_code=base.BAD_REQUEST_CODE)
+
+ def test_get_links_list_with_invalid_filters(self):
+ self.validate_get_request(links.URL,
+ params={
+ 'invalid': 'invalid'
+ },
+ expected_code=base.BAD_REQUEST_CODE)
+
+ def test_get_links_list_with_wrong_link_type(self):
+ self.validate_get_request(links.URL,
+ params={
+ 'env_name': base.ENV_NAME,
+ 'link_type': links.WRONG_TYPE
+ },
+ expected_code=base.BAD_REQUEST_CODE)
+
+ @patch(base.RESPONDER_BASE_READ)
+ def test_get_links_list_with_correct_link_type(self, read):
+ self.validate_get_request(links.URL,
+ params={
+ 'env_name': base.ENV_NAME,
+ 'link_type': links.CORRECT_TYPE
+ },
+ mocks={
+ read: links.LINKS_WITH_SPECIFIC_TYPE
+ },
+ expected_code=base.SUCCESSFUL_CODE,
+ expected_response=links.
+ LINKS_WITH_SPECIFIC_TYPE_RESPONSE
+ )
+
+ def test_get_links_list_with_wrong_state(self):
+ self.validate_get_request(links.URL,
+ params={
+ 'env_name': base.ENV_NAME,
+ 'state': links.WRONG_STATE
+ },
+ expected_code=base.BAD_REQUEST_CODE)
+
+ @patch(base.RESPONDER_BASE_READ)
+ def test_get_links_list_with_correct_state(self, read):
+ self.validate_get_request(links.URL,
+ params={
+ 'env_name': base.ENV_NAME,
+ 'state': links.CORRECT_STATE
+ },
+ mocks={
+ read: links.LINKS_WITH_SPECIFIC_STATE,
+ },
+ expected_code=base.SUCCESSFUL_CODE,
+ expected_response=links.
+ LINKS_WITH_SPECIFIC_STATE_RESPONSE
+ )
+
+ def test_get_link_with_env_name_and_wrong_link_id(self):
+ self.validate_get_request(links.URL,
+ params={
+ 'env_name': base.ENV_NAME,
+ 'id': links.WRONG_LINK_ID
+ },
+ expected_code=base.BAD_REQUEST_CODE)
+
+ @patch(base.RESPONDER_BASE_READ)
+ def test_get_link_with_env_name_and_link_id(self, read):
+ self.validate_get_request(links.URL,
+ params={
+ 'env_name': base.ENV_NAME,
+ 'id': links.LINK_ID
+ },
+ mocks={
+ read: links.LINKS_WITH_SPECIFIC_ID
+ },
+ expected_code=base.SUCCESSFUL_CODE,
+ expected_response=links.
+ LINKS_WITH_SPECIFIC_ID[0]
+ )
+
+ def test_get_links_list_with_non_int_page(self):
+ self.validate_get_request(links.URL,
+ params={
+ 'env_name': base.ENV_NAME,
+ 'page': base.NON_INT_PAGE
+ },
+ expected_code=base.BAD_REQUEST_CODE)
+
+ @patch(base.RESPONDER_BASE_READ)
+ def test_get_links_list_with_int_page(self, read):
+ self.validate_get_request(links.URL,
+ params={
+ 'env_name': base.ENV_NAME,
+ 'page': base.INT_PAGE
+ },
+ mocks={
+ read: links.LINKS
+ },
+ expected_code=base.SUCCESSFUL_CODE,
+ expected_response=links.LINKS_LIST_RESPONSE)
+
+ def test_get_link_ids_with_non_int_page_size(self):
+ self.validate_get_request(links.URL,
+ params={
+ 'env_name': base.ENV_NAME,
+ 'page_size': base.NON_INT_PAGESIZE
+ },
+ expected_code=base.BAD_REQUEST_CODE)
+
+ @patch(base.RESPONDER_BASE_READ)
+ def test_get_links_list_with_int_page_size(self, read):
+ self.validate_get_request(links.URL,
+ params={
+ 'env_name': base.ENV_NAME,
+ 'page_size': base.INT_PAGESIZE
+ },
+ mocks={
+ read: links.LINKS
+ },
+ expected_code=base.SUCCESSFUL_CODE,
+ expected_response=links.LINKS_LIST_RESPONSE)
+
+ @patch(base.RESPONDER_BASE_CHECK_ENVIRONMENT_NAME)
+ @patch(base.RESPONDER_BASE_READ)
+ def test_get_links_list_with_env_name_and_unknown_host(self, read, check_env_name):
+ self.validate_get_request(links.URL,
+ params={
+ 'env_name': base.ENV_NAME,
+ 'host': links.UNKNOWN_HOST
+ },
+ mocks={
+ read: [],
+ check_env_name: True
+ },
+ expected_code=base.NOT_FOUND_CODE)
+
+ @patch(base.RESPONDER_BASE_CHECK_ENVIRONMENT_NAME)
+ @patch(base.RESPONDER_BASE_READ)
+ def test_get_links_list_with_unknown_env_name(self, read, check_env_name):
+ self.validate_get_request(links.URL,
+ params={
+ 'env_name': base.UNKNOWN_ENV
+ },
+ mocks={
+ read: [],
+ check_env_name: False
+ },
+ expected_code=base.BAD_REQUEST_CODE)
+
+ @patch(base.RESPONDER_BASE_CHECK_ENVIRONMENT_NAME)
+ @patch(base.RESPONDER_BASE_READ)
+ def test_get_link_with_env_name_and_nonexistent_link_id(self, read, check_env_name):
+ self.validate_get_request(links.URL,
+ params={
+ 'env_name': base.ENV_NAME,
+ 'id': links.NONEXISTENT_LINK_ID
+ },
+ mocks={
+ read: [],
+ check_env_name: True
+ },
+ expected_code=base.NOT_FOUND_CODE)
+
+ @patch(base.RESPONDER_BASE_CHECK_ENVIRONMENT_NAME)
+ @patch(base.RESPONDER_BASE_READ)
+ def test_get_link_with_unknown_env_name(self, read, check_env_name):
+ self.validate_get_request(links.URL,
+ params={
+ 'env_name': base.UNKNOWN_ENV
+ },
+ mocks={
+ read: [],
+ check_env_name: False
+ },
+ expected_code=base.BAD_REQUEST_CODE)
diff --git a/app/test/api/responders_test/resource/test_messages.py b/app/test/api/responders_test/resource/test_messages.py
new file mode 100644
index 0000000..6999cee
--- /dev/null
+++ b/app/test/api/responders_test/resource/test_messages.py
@@ -0,0 +1,236 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+from test.api.test_base import TestBase
+from test.api.responders_test.test_data import base
+from test.api.responders_test.test_data import messages
+from unittest.mock import patch
+
+
+class TestMessage(TestBase):
+
+ def test_get_messages_list_without_env_name(self):
+ self.validate_get_request(messages.URL,
+ params={},
+ expected_code=base.BAD_REQUEST_CODE)
+
+ def test_get_messages_list_with_invalid_filter(self):
+ self.validate_get_request(messages.URL,
+ params={
+ 'invalid': 'invalid'
+ },
+ expected_code=base.BAD_REQUEST_CODE)
+
+ def test_get_messages_list_with_wrong_format_start_time(self):
+ self.validate_get_request(messages.URL,
+ params={
+ 'env_name': base.ENV_NAME,
+ 'start_time': messages.WRONG_FORMAT_TIME
+ },
+ expected_code=base.BAD_REQUEST_CODE)
+
+ @patch(base.RESPONDER_BASE_READ)
+ def test_get_messages_list_with_correct_format_start_time(self, read):
+ self.validate_get_request(messages.URL,
+ params={
+ 'env_name': base.ENV_NAME,
+ 'start_time': messages.CORRECT_FORMAT_TIME
+ },
+ mocks={
+ read: messages.MESSAGES_WITH_SPECIFIC_TIME
+ },
+ expected_code=base.SUCCESSFUL_CODE,
+ expected_response=
+ messages.MESSAGES_WITH_SPECIFIC_TIME_RESPONSE
+ )
+
+ def test_get_messages_list_with_wrong_format_end_time(self):
+ self.validate_get_request(messages.URL,
+ params={
+ 'env_name': base.ENV_NAME,
+ 'end_time': messages.WRONG_FORMAT_TIME
+ },
+ expected_code=base.BAD_REQUEST_CODE)
+
+ @patch(base.RESPONDER_BASE_READ)
+ def test_get_messages_list_with_correct_format_end_time(self, read):
+ self.validate_get_request(messages.URL,
+ params={
+ 'env_name': base.ENV_NAME,
+ 'end_time': messages.CORRECT_FORMAT_TIME
+ },
+ mocks={
+ read: messages.MESSAGES_WITH_SPECIFIC_TIME
+ },
+ expected_code=base.SUCCESSFUL_CODE,
+ expected_response=
+ messages.MESSAGES_WITH_SPECIFIC_TIME_RESPONSE
+ )
+
+ def test_get_messages_list_with_wrong_level(self):
+ self.validate_get_request(messages.URL,
+ params={
+ 'env_name': base.ENV_NAME,
+ 'level': messages.WRONG_SEVERITY
+ },
+ expected_code=base.BAD_REQUEST_CODE)
+
+ @patch(base.RESPONDER_BASE_READ)
+ def test_get_messages_list_with_level(self, read):
+ self.validate_get_request(messages.URL,
+ params={
+ 'env_name': base.ENV_NAME,
+ 'level': messages.CORRECT_SEVERITY
+ },
+ mocks={
+ read: messages.MESSAGES_WITH_SPECIFIC_SEVERITY
+ },
+ expected_code=base.SUCCESSFUL_CODE,
+ expected_response=messages.
+ MESSAGES_WITH_SPECIFIC_SEVERITY_RESPONSE
+ )
+
+ def test_get_messages_list_with_wrong_related_object_type(self):
+ self.validate_get_request(messages.URL,
+ params={
+ 'env_name': base.ENV_NAME,
+ 'related_object_type':
+ messages.WRONG_RELATED_OBJECT_TYPE
+ },
+ expected_code=base.BAD_REQUEST_CODE)
+
+ @patch(base.RESPONDER_BASE_READ)
+ def test_get_messages_list_with_correct_related_object_type(self, read):
+ self.validate_get_request(messages.URL,
+ params={
+ 'env_name': base.ENV_NAME,
+ 'related_object_type':
+ messages.CORRECT_RELATED_OBJECT_TYPE
+ },
+ mocks={
+ read: messages.
+ MESSAGES_WITH_SPECIFIC_RELATED_OBJECT_TYPE
+ },
+ expected_code=base.SUCCESSFUL_CODE,
+ expected_response=messages.
+ MESSAGES_WITH_SPECIFIC_RELATED_OBJECT_TYPE_RESPONSE
+ )
+
+ def test_get_messages_list_with_non_int_page(self):
+ self.validate_get_request(messages.URL,
+ params={
+ 'env_name': base.ENV_NAME,
+ 'page': base.NON_INT_PAGE
+ },
+ expected_code=base.BAD_REQUEST_CODE)
+
+ @patch(base.RESPONDER_BASE_READ)
+ def test_get_messages_list_with_int_page(self, read):
+ self.validate_get_request(messages.URL,
+ params={
+ 'env_name': base.ENV_NAME,
+ 'page': base.INT_PAGE
+ },
+ mocks={
+ read: messages.MESSAGES
+ },
+ expected_code=base.SUCCESSFUL_CODE,
+ expected_response=messages.MESSAGES_RESPONSE)
+
+ def test_get_messages_list_with_non_int_page_size(self):
+ self.validate_get_request(messages.URL,
+ params={
+ 'env_name': base.ENV_NAME,
+ 'page_size': base.NON_INT_PAGESIZE
+ },
+ expected_code=base.BAD_REQUEST_CODE)
+
+ @patch(base.RESPONDER_BASE_READ)
+ def test_get_messages_list_with_int_pagesize(self, read):
+ self.validate_get_request(messages.URL,
+ params={
+ 'env_name': base.ENV_NAME,
+ 'page_size': base.INT_PAGESIZE
+ },
+ mocks={
+ read: messages.MESSAGES
+ },
+ expected_code=base.SUCCESSFUL_CODE,
+ expected_response=messages.MESSAGES_RESPONSE)
+
+ @patch(base.RESPONDER_BASE_CHECK_ENVIRONMENT_NAME)
+ @patch(base.RESPONDER_BASE_READ)
+ def test_get_messages_list_with_env_name_and_nonexistent_related_object(self, read, check_env_name):
+ self.validate_get_request(messages.URL,
+ params={
+ 'env_name': base.ENV_NAME,
+ 'related_object': messages.NONEXISTENT_RELATED_OBJECT
+ },
+ mocks={
+ read: [],
+ check_env_name: True
+ },
+ expected_code=base.NOT_FOUND_CODE)
+
+ @patch(base.RESPONDER_BASE_CHECK_ENVIRONMENT_NAME)
+ @patch(base.RESPONDER_BASE_READ)
+ def test_get_messages_list_with_unknown_env_name(self, read, check_env_name):
+ self.validate_get_request(messages.URL,
+ params={
+ 'env_name': base.UNKNOWN_ENV,
+ 'related_object': messages.RELATED_OBJECT
+ },
+ mocks={
+ read: [],
+ check_env_name: False
+ },
+ expected_code=base.BAD_REQUEST_CODE)
+
+ @patch(base.RESPONDER_BASE_CHECK_ENVIRONMENT_NAME)
+ @patch(base.RESPONDER_BASE_READ)
+ def test_get_message_with_env_name_and_nonexistent_id(self, read, check_env_name):
+ self.validate_get_request(messages.URL,
+ params={
+ 'env_name': base.ENV_NAME,
+ 'id': messages.NONEXISTENT_MESSAGE_ID
+ },
+ mocks={
+ read: [],
+ check_env_name: True
+ },
+ expected_code=base.NOT_FOUND_CODE)
+
+ @patch(base.RESPONDER_BASE_CHECK_ENVIRONMENT_NAME)
+ @patch(base.RESPONDER_BASE_READ)
+ def test_get_message_with_unknown_env_name_and_id(self, read, check_env_name):
+ self.validate_get_request(messages.URL,
+ params={
+ 'env_name': base.UNKNOWN_ENV,
+ 'id': messages.MESSAGE_ID
+ },
+ mocks={
+ read: [],
+ check_env_name: False
+ },
+ expected_code=base.BAD_REQUEST_CODE)
+
+ @patch(base.RESPONDER_BASE_CHECK_ENVIRONMENT_NAME)
+ @patch(base.RESPONDER_BASE_READ)
+ def test_get_message_with_env_name_and_id(self, read, check_env_name):
+ self.validate_get_request(messages.URL,
+ params={
+ 'env_name': base.ENV_NAME,
+ 'id': messages.MESSAGE_ID
+ },
+ mocks={
+ read: messages.MESSAGES_WITH_SPECIFIC_ID,
+ check_env_name: False
+ },
+ expected_code=base.SUCCESSFUL_CODE,
+ expected_response=messages.MESSAGES_WITH_SPECIFIC_ID[0])
diff --git a/app/test/api/responders_test/resource/test_monitoring_config_templates.py b/app/test/api/responders_test/resource/test_monitoring_config_templates.py
new file mode 100644
index 0000000..04f413e
--- /dev/null
+++ b/app/test/api/responders_test/resource/test_monitoring_config_templates.py
@@ -0,0 +1,156 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+from test.api.test_base import TestBase
+from test.api.responders_test.test_data import base
+from test.api.responders_test.test_data import monitoring_config_templates
+from unittest.mock import patch
+
+
+class TestMonitoringConfigTemplates(TestBase):
+
+ def test_get_templates_list_with_unknown_filter(self):
+ self.validate_get_request(monitoring_config_templates.URL,
+ params={
+ "unknown": "unknown"
+ },
+ expected_code=base.BAD_REQUEST_CODE)
+
+ def test_get_templates_list_with_non_int_order(self):
+ self.validate_get_request(monitoring_config_templates.URL,
+ params={
+ "order": monitoring_config_templates.NON_INT_ORDER
+ },
+ expected_code=base.BAD_REQUEST_CODE)
+
+ @patch(base.RESPONDER_BASE_READ)
+ def test_get_templates_list_with_order(self, read):
+ self.validate_get_request(monitoring_config_templates.URL,
+ params={
+ "order": monitoring_config_templates.INT_ORDER
+ },
+ mocks={
+ read: monitoring_config_templates.
+ TEMPLATES_WITH_SPECIFIC_ORDER
+ },
+ expected_code=base.SUCCESSFUL_CODE,
+ expected_response=monitoring_config_templates.
+ TEMPLATES_WITH_SPECIFIC_ORDER_RESPONSE
+ )
+
+ def test_get_templates_list_with_wrong_side(self):
+ self.validate_get_request(monitoring_config_templates.URL,
+ params={
+ "side": monitoring_config_templates.WRONG_SIDE
+ },
+ expected_code=base.BAD_REQUEST_CODE)
+
+ @patch(base.RESPONDER_BASE_READ)
+ def test_get_templates_list_with_side(self, read):
+ self.validate_get_request(monitoring_config_templates.URL,
+ params={
+ "side": monitoring_config_templates.CORRECT_SIDE
+ },
+ mocks={
+ read: monitoring_config_templates.
+ TEMPLATES_WITH_SPECIFIC_SIDE
+ },
+ expected_code=base.SUCCESSFUL_CODE,
+ expected_response=monitoring_config_templates.
+ TEMPLATES_WITH_SPECIFIC_SIDE_RESPONSE
+ )
+
+ @patch(base.RESPONDER_BASE_READ)
+ def test_get_templates_list_with_type(self, read):
+ self.validate_get_request(monitoring_config_templates.URL,
+ params={
+ "type": monitoring_config_templates.TYPE
+ },
+ mocks={
+ read: monitoring_config_templates.
+ TEMPLATES_WITH_SPECIFIC_TYPE
+ },
+ expected_code=base.SUCCESSFUL_CODE,
+ expected_response=monitoring_config_templates.
+ TEMPLATES_WITH_SPECIFIC_TYPE_RESPONSE
+ )
+
+ def test_get_templates_list_with_non_int_page(self):
+ self.validate_get_request(monitoring_config_templates.URL,
+ params={
+ "page": base.NON_INT_PAGE
+ },
+ expected_code=base.BAD_REQUEST_CODE)
+
+ @patch(base.RESPONDER_BASE_READ)
+ def test_get_templates_list_with_int_page(self, read):
+ self.validate_get_request(monitoring_config_templates.URL,
+ params={
+ "page": base.INT_PAGE
+ },
+ mocks={
+ read: monitoring_config_templates.TEMPLATES
+ },
+ expected_code=base.SUCCESSFUL_CODE,
+ expected_response=monitoring_config_templates.
+ TEMPLATES_RESPONSE
+ )
+
+ def test_get_templates_list_with_non_int_pagesize(self):
+ self.validate_get_request(monitoring_config_templates.URL,
+ params={
+ "page_size": base.NON_INT_PAGESIZE
+ },
+ expected_code=base.BAD_REQUEST_CODE)
+
+ @patch(base.RESPONDER_BASE_READ)
+ def test_get_templates_list_with_int_pagesize(self, read):
+ self.validate_get_request(monitoring_config_templates.URL,
+ params={
+ "page_size": base.INT_PAGESIZE
+ },
+ mocks={
+ read: monitoring_config_templates.TEMPLATES
+ },
+ expected_code=base.SUCCESSFUL_CODE,
+ expected_response=monitoring_config_templates.
+ TEMPLATES_RESPONSE
+ )
+
+ def test_get_template_with_wrong_id(self):
+ self.validate_get_request(monitoring_config_templates.URL,
+ params={
+ "id": monitoring_config_templates.WRONG_ID
+ },
+ expected_code=base.BAD_REQUEST_CODE)
+
+ @patch(base.RESPONDER_BASE_READ)
+ def test_get_template_with_unknown_id(self, read):
+ self.validate_get_request(monitoring_config_templates.URL,
+ params={
+ "id": monitoring_config_templates.UNKNOWN_ID
+ },
+ mocks={
+ read: []
+ },
+ expected_code=base.NOT_FOUND_CODE)
+
+ @patch(base.RESPONDER_BASE_READ)
+ def test_get_template_with_id(self, read):
+ self.validate_get_request(monitoring_config_templates.URL,
+ params={
+ "id": monitoring_config_templates.CORRECT_ID
+ },
+ mocks={
+ read: monitoring_config_templates.TEMPLATES_WITH_SPECIFIC_ID
+ },
+ expected_code=base.SUCCESSFUL_CODE,
+ expected_response=monitoring_config_templates.
+ TEMPLATES_WITH_SPECIFIC_ID[0]
+ )
diff --git a/app/test/api/responders_test/resource/test_scans.py b/app/test/api/responders_test/resource/test_scans.py
new file mode 100644
index 0000000..708cd54
--- /dev/null
+++ b/app/test/api/responders_test/resource/test_scans.py
@@ -0,0 +1,239 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+import json
+
+from test.api.responders_test.test_data import base
+from test.api.test_base import TestBase
+from test.api.responders_test.test_data import scans
+from unittest.mock import patch
+
+
+class TestScans(TestBase):
+
+ def test_get_scans_list_without_env_name(self):
+ self.validate_get_request(scans.URL,
+ params={},
+ expected_code=base.BAD_REQUEST_CODE)
+
+ def test_get_scans_list_with_invalid_filter(self):
+ self.validate_get_request(scans.URL,
+ params={
+ "env_name": base.ENV_NAME,
+ "invalid": "invalid"
+ },
+ expected_code=base.BAD_REQUEST_CODE)
+
+ def test_get_scans_list_with_non_int_page(self):
+ self.validate_get_request(scans.URL,
+ params={
+ "env_name": base.ENV_NAME,
+ "page": base.NON_INT_PAGE
+ },
+ expected_code=base.BAD_REQUEST_CODE)
+
+ @patch(base.RESPONDER_BASE_READ)
+ def test_get_scans_list_with_int_page(self, read):
+ self.validate_get_request(scans.URL,
+ params={
+ "env_name": base.ENV_NAME,
+ "page": base.INT_PAGE
+ },
+ mocks={
+ read: scans.SCANS
+ },
+ expected_code=base.SUCCESSFUL_CODE,
+ expected_response=scans.SCANS_RESPONSE)
+
+ def test_get_scans_list_with_non_int_pagesize(self):
+ self.validate_get_request(scans.URL,
+ params={
+ "env_name": base.ENV_NAME,
+ "page_size": base.NON_INT_PAGESIZE
+ },
+ expected_code=base.BAD_REQUEST_CODE)
+
+ @patch(base.RESPONDER_BASE_READ)
+ def test_get_scans_list_with_int_pagesize(self, read):
+ self.validate_get_request(scans.URL,
+ params={
+ "env_name": base.ENV_NAME,
+ "page_size": base.INT_PAGESIZE
+ },
+ mocks={
+ read: scans.SCANS
+ },
+ expected_code=base.SUCCESSFUL_CODE,
+ expected_response=scans.SCANS_RESPONSE)
+
+ @patch(base.RESPONDER_BASE_CHECK_ENVIRONMENT_NAME)
+ @patch(base.RESPONDER_BASE_READ)
+ def test_get_scans_list_with_unknown_env(self, read, check_env_name):
+ self.validate_get_request(scans.URL,
+ params={
+ "env_name": base.UNKNOWN_ENV
+ },
+ mocks={
+ read: [],
+ check_env_name: False
+ },
+ expected_code=base.BAD_REQUEST_CODE)
+
+ @patch(base.RESPONDER_BASE_READ)
+ def test_get_scans_list_with_base_object(self, read):
+ self.validate_get_request(scans.URL,
+ params={
+ "env_name": base.ENV_NAME,
+ "base_object": scans.BASE_OBJECT
+ },
+ mocks={
+ read: scans.SCANS_WITH_SPECIFIC_BASE_OBJ
+ },
+ expected_code=base.SUCCESSFUL_CODE,
+ expected_response=scans.
+ SCANS_WITH_SPECIFIC_BASE_OBJ_RESPONSE
+ )
+
+ def test_get_scans_list_with_wrong_status(self):
+ self.validate_get_request(scans.URL,
+ params={
+ "env_name": base.ENV_NAME,
+ "status": scans.WRONG_STATUS
+ },
+ expected_code=base.BAD_REQUEST_CODE)
+
+ @patch(base.RESPONDER_BASE_READ)
+ def test_get_scans_list_with_status(self, read):
+ self.validate_get_request(scans.URL,
+ params={
+ "env_name": base.ENV_NAME,
+ "status": scans.CORRECT_STATUS
+ },
+ mocks={
+ read: scans.SCANS_WITH_SPECIFIC_STATUS,
+ },
+ expected_code=base.SUCCESSFUL_CODE,
+ expected_response=scans.
+ SCANS_WITH_SPECIFIC_STATUS_RESPONSE
+ )
+
+ def test_get_scan_with_wrong_id(self):
+ self.validate_get_request(scans.URL,
+ params={
+ "env_name": base.ENV_NAME,
+ "id": scans.WRONG_ID
+ },
+ expected_code=base.BAD_REQUEST_CODE)
+
+ @patch(base.RESPONDER_BASE_CHECK_ENVIRONMENT_NAME)
+ @patch(base.RESPONDER_BASE_READ)
+ def test_get_scan_with_nonexistent_id(self, read, check_env_name):
+ self.validate_get_request(scans.URL,
+ params={
+ "env_name": base.ENV_NAME,
+ "id": scans.NONEXISTENT_ID
+ },
+ mocks={
+ read: [],
+ check_env_name: True
+ },
+ expected_code=base.NOT_FOUND_CODE)
+
+ @patch(base.RESPONDER_BASE_CHECK_ENVIRONMENT_NAME)
+ @patch(base.RESPONDER_BASE_READ)
+ def test_get_scan_with_unknown_env_and_nonexistent_id(self, read, check_env_name):
+ self.validate_get_request(scans.URL,
+ params={
+ "env_name": base.UNKNOWN_ENV,
+ "id": scans.NONEXISTENT_ID
+ },
+ mocks={
+ read: [],
+ check_env_name: False
+ },
+ expected_code=base.BAD_REQUEST_CODE)
+
+ @patch(base.RESPONDER_BASE_READ)
+ def test_get_scan_with_id(self, read):
+ self.validate_get_request(scans.URL,
+ params={
+ "env_name": base.ENV_NAME,
+ "id": scans.CORRECT_ID
+ },
+ mocks={
+ read: scans.SCANS_WITH_SPECIFIC_ID
+ },
+ expected_code=base.SUCCESSFUL_CODE,
+ expected_response=scans.SCANS_WITH_SPECIFIC_ID[0]
+ )
+
+ def test_post_scan_with_non_dict_scan(self):
+ self.validate_post_request(scans.URL,
+ body=json.dumps(scans.NON_DICT_SCAN),
+ expected_code=base.BAD_REQUEST_CODE)
+
+ def test_post_scan_without_env_name(self):
+ self.validate_post_request(scans.URL,
+ body=json.dumps(scans.SCAN_WITHOUT_ENV),
+ expected_code=base.BAD_REQUEST_CODE)
+
+ @patch(base.RESPONDER_BASE_CHECK_ENVIRONMENT_NAME)
+ def test_post_scan_with_unknown_env_name(self, check_environment_name):
+ self.validate_post_request(scans.URL,
+ mocks={
+ check_environment_name: False
+ },
+ body=json.dumps(scans.SCAN_WITH_UNKNOWN_ENV),
+ expected_code=base.BAD_REQUEST_CODE)
+
+ def test_post_scan_without_status(self):
+ self.validate_post_request(scans.URL,
+ body=json.dumps(scans.SCAN_WITHOUT_STATUS),
+ expected_code=base.BAD_REQUEST_CODE)
+
+ def test_post_scan_with_wrong_status(self):
+ self.validate_post_request(scans.URL,
+ body=json.dumps(scans.SCAN_WITH_WRONG_STATUS),
+ expected_code=base.BAD_REQUEST_CODE)
+
+ def test_post_scan_with_wrong_log_level(self):
+ self.validate_post_request(scans.URL,
+ body=json.dumps(scans.SCAN_WITH_WRONG_LOG_LEVEL),
+ expected_code=base.BAD_REQUEST_CODE)
+
+ def test_post_scan_with_non_bool_clear(self):
+ self.validate_post_request(scans.URL,
+ body=json.dumps(scans.SCAN_WITH_NON_BOOL_CLEAR),
+ expected_code=base.BAD_REQUEST_CODE)
+
+ def test_post_scan_with_non_bool_scan_only_inventory(self):
+ self.validate_post_request(scans.URL,
+ body=json.dumps(scans.SCAN_WITH_NON_BOOL_SCAN_ONLY_INVENTORY),
+ expected_code=base.BAD_REQUEST_CODE)
+
+ def test_post_scan_with_non_bool_scan_only_links(self):
+ self.validate_post_request(scans.URL,
+ body=json.dumps(scans.SCAN_WITH_NON_BOOL_SCAN_ONLY_LINKS),
+ expected_code=base.BAD_REQUEST_CODE)
+
+ def test_post_scan_with_non_bool_scan_only_cliques(self):
+ self.validate_post_request(scans.URL,
+ body=json.dumps(scans.SCAN_WITH_NON_BOOL_SCAN_ONLY_CLIQUES),
+ expected_code=base.BAD_REQUEST_CODE)
+
+ @patch(base.RESPONDER_BASE_CHECK_ENVIRONMENT_NAME)
+ @patch(base.RESPONDER_BASE_WRITE)
+ def test_post_scan(self, write, check_env_name):
+ self.validate_post_request(scans.URL,
+ mocks={
+ check_env_name: True,
+ write: None
+ },
+ body=json.dumps(scans.SCAN),
+ expected_code=base.CREATED_CODE)
diff --git a/app/test/api/responders_test/resource/test_scheduled_scans.py b/app/test/api/responders_test/resource/test_scheduled_scans.py
new file mode 100644
index 0000000..23c38de
--- /dev/null
+++ b/app/test/api/responders_test/resource/test_scheduled_scans.py
@@ -0,0 +1,247 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+import json
+
+from test.api.responders_test.test_data import base
+from test.api.test_base import TestBase
+from test.api.responders_test.test_data import scheduled_scans
+from unittest.mock import patch
+
+
+class TestScheduledScans(TestBase):
+ def test_get_scheduled_scans_list_without_env_name(self):
+ self.validate_get_request(scheduled_scans.URL,
+ params={},
+ expected_code=base.BAD_REQUEST_CODE)
+
+ def test_get_scheduled_scans_list_with_invalid_filter(self):
+ self.validate_get_request(scheduled_scans.URL,
+ params={
+ "environment": base.ENV_NAME,
+ "invalid": "invalid"
+ },
+ expected_code=base.BAD_REQUEST_CODE)
+
+ def test_get_scheduled_scans_list_with_non_int_page(self):
+ self.validate_get_request(scheduled_scans.URL,
+ params={
+ "environment": base.ENV_NAME,
+ "page": base.NON_INT_PAGE
+ },
+ expected_code=base.BAD_REQUEST_CODE)
+
+ @patch(base.RESPONDER_BASE_READ)
+ def test_get_scheduled_scans_list_with_int_page(self, read):
+ self.validate_get_request(scheduled_scans.URL,
+ params={
+ "environment": base.ENV_NAME,
+ "page": base.INT_PAGE
+ },
+ mocks={
+ read: scheduled_scans.SCHEDULED_SCANS
+ },
+ expected_code=base.SUCCESSFUL_CODE,
+ expected_response=scheduled_scans.
+ SCHEDULED_SCANS_RESPONSE
+ )
+
+ def test_get_scheduled_scans_list_with_non_int_pagesize(self):
+ self.validate_get_request(scheduled_scans.URL,
+ params={
+ "environment": base.ENV_NAME,
+ "page_size": base.NON_INT_PAGESIZE
+ },
+ expected_code=base.BAD_REQUEST_CODE)
+
+ @patch(base.RESPONDER_BASE_READ)
+ def test_get_scheduled_scans_list_with_int_pagesize(self, read):
+ self.validate_get_request(scheduled_scans.URL,
+ params={
+ "environment": base.ENV_NAME,
+ "page_size": base.INT_PAGESIZE
+ },
+ mocks={
+ read: scheduled_scans.SCHEDULED_SCANS
+ },
+ expected_code=base.SUCCESSFUL_CODE,
+ expected_response=scheduled_scans.
+ SCHEDULED_SCANS_RESPONSE
+ )
+
+ @patch(base.RESPONDER_BASE_CHECK_ENVIRONMENT_NAME)
+ @patch(base.RESPONDER_BASE_READ)
+ def test_get_scheduled_scans_list_with_unknown_env(self, read, check_env_name):
+ self.validate_get_request(scheduled_scans.URL,
+ params={
+ "environment": base.UNKNOWN_ENV
+ },
+ mocks={
+ read: [],
+ check_env_name: False
+ },
+ expected_code=base.BAD_REQUEST_CODE)
+
+ def test_get_scheduled_scans_list_with_wrong_freq(self):
+ self.validate_get_request(scheduled_scans.URL,
+ params={
+ "environment": base.ENV_NAME,
+ "freq": scheduled_scans.WRONG_FREQ
+ },
+ expected_code=base.BAD_REQUEST_CODE)
+
+ @patch(base.RESPONDER_BASE_READ)
+ def test_get_scheduled_scans_list_with_freq(self, read):
+ self.validate_get_request(scheduled_scans.URL,
+ params={
+ "environment": base.ENV_NAME,
+ "freq": scheduled_scans.CORRECT_FREQ
+ },
+ mocks={
+ read: scheduled_scans.
+ SCHEDULED_SCAN_WITH_SPECIFIC_FREQ,
+ },
+ expected_code=base.SUCCESSFUL_CODE,
+ expected_response=scheduled_scans.
+ SCHEDULED_SCAN_WITH_SPECIFIC_FREQ_RESPONSE
+ )
+
+ def test_get_scheduled_scan_with_wrong_id(self):
+ self.validate_get_request(scheduled_scans.URL,
+ params={
+ "environment": base.ENV_NAME,
+ "id": scheduled_scans.WRONG_ID
+ },
+ expected_code=base.BAD_REQUEST_CODE)
+
+ @patch(base.RESPONDER_BASE_CHECK_ENVIRONMENT_NAME)
+ @patch(base.RESPONDER_BASE_READ)
+ def test_get_scan_with_nonexistent_id(self, read, check_env_name):
+ self.validate_get_request(scheduled_scans.URL,
+ params={
+ "environment": base.ENV_NAME,
+ "id": scheduled_scans.NONEXISTENT_ID
+ },
+ mocks={
+ read: [],
+ check_env_name: True
+ },
+ expected_code=base.NOT_FOUND_CODE)
+
+ @patch(base.RESPONDER_BASE_CHECK_ENVIRONMENT_NAME)
+ @patch(base.RESPONDER_BASE_READ)
+ def test_get_scheduled_scan_with_unknown_env_and_nonexistent_id(self, read,
+ check_env_name):
+ self.validate_get_request(scheduled_scans.URL,
+ params={
+ "environment": base.UNKNOWN_ENV,
+ "id": scheduled_scans.NONEXISTENT_ID
+ },
+ mocks={
+ read: [],
+ check_env_name: False
+ },
+ expected_code=base.BAD_REQUEST_CODE)
+
+ @patch(base.RESPONDER_BASE_READ)
+ def test_get_scheduled_scan_with_id(self, read):
+ self.validate_get_request(scheduled_scans.URL,
+ params={
+ "environment": base.ENV_NAME,
+ "id": scheduled_scans.CORRECT_ID
+ },
+ mocks={
+ read: scheduled_scans.
+ SCHEDULED_SCAN_WITH_SPECIFIC_ID
+ },
+ expected_code=base.SUCCESSFUL_CODE,
+ expected_response=scheduled_scans.
+ SCHEDULED_SCAN_WITH_SPECIFIC_ID[0]
+ )
+
+ def test_post_scheduled_scan_with_non_dict_scheduled_scan(self):
+ self.validate_post_request(scheduled_scans.URL,
+ body=json.dumps(scheduled_scans.
+ NON_DICT_SCHEDULED_SCAN),
+ expected_code=base.BAD_REQUEST_CODE)
+
+ def test_post_bad_scheduled_scans(self):
+ test_cases = [
+ {
+ "body": scheduled_scans.
+ SCHEDULED_SCAN_WITHOUT_ENV
+ },
+ {
+ "body": scheduled_scans.
+ SCHEDULED_SCAN_WITHOUT_FREQ
+ },
+ {
+ "body": scheduled_scans.
+ SCHEDULED_SCAN_WITH_WRONG_FREQ
+ },
+ {
+ "body": scheduled_scans.
+ SCHEDULED_SCAN_WITH_WRONG_LOG_LEVEL
+ },
+ {
+ "body": scheduled_scans.
+ SCHEDULED_SCAN_WITHOUT_SUBMIT_TIMESTAMP
+ },
+ {
+ "body": scheduled_scans.
+ SCHEDULED_SCAN_WITH_WRONG_SUBMIT_TIMESTAMP
+ },
+ {
+ "body": scheduled_scans.
+ SCHEDULED_SCAN_WITH_NON_BOOL_CLEAR
+ },
+ {
+ "body": scheduled_scans.
+ SCHEDULED_SCAN_WITH_NON_BOOL_SCAN_ONLY_LINKS
+ },
+ {
+ "body": scheduled_scans.
+ SCHEDULED_SCAN_WITH_NON_BOOL_SCAN_ONLY_CLIQUES
+ },
+ {
+ "body": scheduled_scans.
+ SCHEDULED_SCAN_WITH_NON_BOOL_SCAN_ONLY_INVENTORY
+ },
+ {
+ "body": scheduled_scans.
+ SCHEDULED_SCAN_WITH_EXTRA_SCAN_ONLY_FLAGS
+ }
+ ]
+ for test_case in test_cases:
+ self.validate_post_request(scheduled_scans.URL,
+ body=json.dumps(test_case["body"]),
+ expected_code=base.BAD_REQUEST_CODE)
+
+ @patch(base.RESPONDER_BASE_CHECK_ENVIRONMENT_NAME)
+ def test_post_scheduled_scan_with_unknown_env_name(self,
+ check_environment_name):
+ self.validate_post_request(scheduled_scans.URL,
+ mocks={
+ check_environment_name: False
+ },
+ body=json.dumps(scheduled_scans.
+ SCHEDULED_SCAN_WITH_UNKNOWN_ENV),
+ expected_code=base.BAD_REQUEST_CODE)
+
+ @patch(base.RESPONDER_BASE_CHECK_ENVIRONMENT_NAME)
+ @patch(base.RESPONDER_BASE_WRITE)
+ def test_post_scheduled_scan(self, write, check_env_name):
+ self.validate_post_request(scheduled_scans.URL,
+ mocks={
+ check_env_name: True,
+ write: None
+ },
+ body=json.dumps(scheduled_scans.
+ SCHEDULED_SCAN),
+ expected_code=base.CREATED_CODE)
diff --git a/app/test/api/responders_test/test_data/__init__.py b/app/test/api/responders_test/test_data/__init__.py
new file mode 100644
index 0000000..1e85a2a
--- /dev/null
+++ b/app/test/api/responders_test/test_data/__init__.py
@@ -0,0 +1,10 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+
diff --git a/app/test/api/responders_test/test_data/aggregates.py b/app/test/api/responders_test/test_data/aggregates.py
new file mode 100644
index 0000000..52ce985
--- /dev/null
+++ b/app/test/api/responders_test/test_data/aggregates.py
@@ -0,0 +1,67 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+URL = "/aggregates"
+
+CONSTANT_TYPE = "constant"
+ENV_TYPE = "environment"
+MESSAGE_TYPE = "message"
+UNKNOWN_TYPE = "unknown"
+
+CONSTANT_AGGREGATES = [
+ {"name": "type_drivers", "total": 5},
+ {"name": "environment_monitoring_types", "total": 1},
+ {"name": "link_states", "total": 2}
+]
+ENVIRONMENT_AGGREGATES = [
+ {'_id': 'otep', 'total': 3},
+ {'_id': 'instance', 'total': 2},
+ {'_id': 'network_agent', 'total': 6}
+]
+MESSAGE_ENV_AGGREGATES = [
+ {'_id': 'Mirantis-Liberty-API', 'total': 15}
+]
+MESSAGE_LEVEL_AGGREGATES = [
+ {'_id': 'info', 'total': 15}
+]
+
+CONSTANT_AGGREGATES_RESPONSE = {
+ "type": "constant",
+ "aggregates": {
+ "names": {
+ "type_drivers": 5,
+ "environment_monitoring_types": 1,
+ "link_states": 2
+ }
+ }
+ }
+
+ENVIRONMENT_AGGREGATES_RESPONSE = {
+ "aggregates": {
+ "object_types": {
+ "otep": 3,
+ "instance": 2,
+ "network_agent": 6
+ }
+ },
+ "env_name": "Mirantis-Liberty-API",
+ "type": "environment"
+}
+
+MESSAGE_AGGREGATES_RESPONSE = {
+ "aggregates": {
+ "environments": {
+ "Mirantis-Liberty-API": 15
+ },
+ "levels": {
+ "info": 15
+ }
+ },
+ "type": "message"
+ }
diff --git a/app/test/api/responders_test/test_data/base.py b/app/test/api/responders_test/test_data/base.py
new file mode 100644
index 0000000..1e85800
--- /dev/null
+++ b/app/test/api/responders_test/test_data/base.py
@@ -0,0 +1,179 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+# HTTP status code
+SUCCESSFUL_CODE = "200"
+NOT_FOUND_CODE = "404"
+CONFLICT_CODE = "409"
+BAD_REQUEST_CODE = "400"
+UNAUTHORIZED_CODE = "401"
+CREATED_CODE = "201"
+
+ENV_NAME = "Mirantis-Liberty-API"
+UNKNOWN_ENV = "Unkown-Environment"
+NON_INT_PAGE = 1.4
+INT_PAGE = 1
+NON_INT_PAGESIZE = 2.4
+INT_PAGESIZE = 2
+
+WRONG_LINK_TYPE = "instance-host"
+CORRECT_LINK_TYPE= "instance-vnic"
+
+WRONG_LINK_STATE = "wrong"
+CORRECT_LINK_STATE = "up"
+
+WRONG_SCAN_STATUS = "error"
+CORRECT_SCAN_STATUS = "completed"
+
+WRONG_MONITORING_SIDE = "wrong-side"
+CORRECT_MONITORING_SIDE = "client"
+
+WRONG_MESSAGE_SEVERITY = "wrong-severity"
+CORRECT_MESSAGE_SEVERITY = "warn"
+
+WRONG_TYPE_DRIVER = "wrong_type"
+CORRECT_TYPE_DRIVER = "local"
+
+WRONG_MECHANISM_DRIVER = "wrong-mechanism-dirver"
+CORRECT_MECHANISM_DRIVER = "ovs"
+
+WRONG_LOG_LEVEL = "wrong-log-level"
+CORRECT_LOG_LEVEL = "critical"
+
+WRONG_OBJECT_TYPE = "wrong-object-type"
+CORRECT_OBJECT_TYPE = "vnic"
+
+WRONG_ENV_TYPE = ""
+CORRECT_ENV_TYPE = "development"
+
+WRONG_DISTRIBUTION = "wrong-environment"
+CORRECT_DISTRIBUTION = "Mirantis-6.0"
+
+WRONG_OBJECT_ID = "58a2406e6a283a8bee15d43"
+CORRECT_OBJECT_ID = "58a2406e6a283a8bee15d43f"
+
+WRONG_FORMAT_TIME = "2017-01-25T23:34:333+TX0012"
+CORRECT_FORMAT_TIME = "2017-01-25T14:28:32.400Z"
+
+NON_BOOL = "falses"
+BOOL = False
+NON_DICT_OBJ = ""
+
+# fake constants
+CONSTANTS_BY_NAMES = {
+ "link_types": [
+ "instance-vnic",
+ "otep-vconnector",
+ "otep-pnic",
+ "pnic-network",
+ "vedge-otep",
+ "vnic-vconnector",
+ "vconnector-pnic",
+ "vconnector-vedge",
+ "vnic-vedge",
+ "vedge-pnic",
+ "vservice-vnic"
+ ],
+ "link_states": [
+ "up",
+ "down"
+ ],
+ "scan_statuses": [
+ "draft",
+ "pending",
+ "running",
+ "completed",
+ "failed",
+ "aborted"
+ ],
+ "monitoring_sides": [
+ "client",
+ "server"
+ ],
+ "messages_severity": [
+ "panic",
+ "alert",
+ "crit",
+ "error",
+ "warn",
+ "notice",
+ "info",
+ "debug"
+ ],
+ "type_drivers": [
+ "local",
+ "vlan",
+ "vxlan",
+ "gre",
+ "flat"
+ ],
+ "mechanism_drivers": [
+ "ovs",
+ "vpp",
+ "LinuxBridge",
+ "Arista",
+ "Nexus"
+ ],
+ "log_levels": [
+ "critical",
+ "error",
+ "warning",
+ "info",
+ "debug",
+ "notset"
+ ],
+ "object_types": [
+ "vnic",
+ "vconnector",
+ "vedge",
+ "instance",
+ "vservice",
+ "pnic",
+ "network",
+ "port",
+ "otep",
+ "agent"
+ ],
+ "env_types": [
+ "development",
+ "testing",
+ "staging",
+ "production"
+ ],
+ "distributions": [
+ "Mirantis-6.0",
+ "Mirantis-7.0",
+ "Mirantis-8.0",
+ "Mirantis-9.0",
+ "RDO-Juno"
+ ],
+ "environment_operational_status": [
+ "stopped",
+ "running",
+ "error"
+ ],
+ "environment_provision_types": [
+ "None",
+ "Deploy",
+ "Files",
+ "DB"
+ ],
+ "environment_monitoring_types": [
+ "Sensu"
+ ]
+}
+
+# path info
+RESPONDER_BASE_PATH = "api.responders.responder_base.ResponderBase"
+RESPONDER_BASE_GET_OBJECTS_LIST = RESPONDER_BASE_PATH + ".get_objects_list"
+RESPONDER_BASE_GET_OBJECT_BY_ID = RESPONDER_BASE_PATH + ".get_object_by_id"
+RESPONDER_BASE_CHECK_ENVIRONMENT_NAME = RESPONDER_BASE_PATH + ".check_environment_name"
+RESPONDER_BASE_READ = RESPONDER_BASE_PATH + ".read"
+RESPONDER_BASE_WRITE = RESPONDER_BASE_PATH + ".write"
+RESPONDER_BASE_AGGREGATE = RESPONDER_BASE_PATH + ".aggregate"
diff --git a/app/test/api/responders_test/test_data/clique_constraints.py b/app/test/api/responders_test/test_data/clique_constraints.py
new file mode 100644
index 0000000..6f867ae
--- /dev/null
+++ b/app/test/api/responders_test/test_data/clique_constraints.py
@@ -0,0 +1,74 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+from test.api.responders_test.test_data import base
+
+URL = "/clique_constraints"
+
+WRONG_ID = base.WRONG_OBJECT_ID
+NONEXISTENT_ID = "576a4176a83d5313f21971f0"
+CORRECT_ID = base.CORRECT_OBJECT_ID
+
+WRONG_FOCAL_POINT_TYPE = base.WRONG_OBJECT_TYPE
+CORRECT_FOCAL_POINT_TYPE = base.CORRECT_OBJECT_TYPE
+
+CONSTRAINT = "network"
+
+CLIQUE_CONSTRAINTS_WITH_SPECIFIC_ID = [
+ {
+ "id": CORRECT_ID
+ }
+]
+
+CLIQUE_CONSTRAINTS_WITH_SPECIFIC_FOCAL_POINT_TYPE = [
+ {
+ "id": "576a4176a83d5313f21971f5",
+ "focal_point_type": CORRECT_FOCAL_POINT_TYPE
+ },
+ {
+ "id": "576ac7069f6ba3074882b2eb",
+ "focal_point_type": CORRECT_FOCAL_POINT_TYPE
+ }
+]
+
+CLIQUE_CONSTRAINTS_WITH_SPECIFIC_FOCAL_POINT_TYPE_RESPONSE = {
+ "clique_constraints": CLIQUE_CONSTRAINTS_WITH_SPECIFIC_FOCAL_POINT_TYPE
+}
+
+CLIQUE_CONSTRAINTS_WITH_SPECIFIC_CONSTRAINT = [
+ {
+ "id": "576a4176a83d5313f21971f5",
+ "constraints": [
+ CONSTRAINT
+ ]
+ },
+ {
+ "id": "576ac7069f6ba3074882b2eb",
+ "constraints": [
+ CONSTRAINT
+ ]
+ }
+]
+
+CLIQUE_CONSTRAINTS_WITH_SPECIFIC_CONSTRAINT_RESPONSE = {
+ "clique_constraints": CLIQUE_CONSTRAINTS_WITH_SPECIFIC_CONSTRAINT
+}
+
+CLIQUE_CONSTRAINTS = [
+ {
+ "id": "576a4176a83d5313f21971f5"
+ },
+ {
+ "id": "576ac7069f6ba3074882b2eb"
+ }
+]
+
+CLIQUE_CONSTRAINTS_RESPONSE = {
+ "clique_constraints": CLIQUE_CONSTRAINTS
+}
diff --git a/app/test/api/responders_test/test_data/clique_types.py b/app/test/api/responders_test/test_data/clique_types.py
new file mode 100644
index 0000000..0fbe839
--- /dev/null
+++ b/app/test/api/responders_test/test_data/clique_types.py
@@ -0,0 +1,170 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+from test.api.responders_test.test_data import base
+
+
+URL = "/clique_types"
+
+WRONG_ID = base.WRONG_OBJECT_ID
+NONEXISTENT_ID = "58ca73ae3a8a836d10ff3b44"
+CORRECT_ID = base.CORRECT_OBJECT_ID
+
+WRONG_FOCAL_POINT_TYPE = base.WRONG_OBJECT_TYPE
+CORRECT_FOCAL_POINT_POINT_TYPE = base.CORRECT_OBJECT_TYPE
+
+WRONG_LINK_TYPE = base.WRONG_LINK_TYPE
+NONEXISTENT_LINK_TYPE = "otep-pnic"
+CORRECT_LINK_TYPE = base.CORRECT_LINK_TYPE
+
+CLIQUE_TYPES_WITH_SPECIFIC_ID = [
+ {
+ "environment": "Mirantis-Liberty-API",
+ "focal_point_type": "pnic",
+ "id": CORRECT_ID
+ }
+]
+
+CLIQUE_TYPES_WITH_SPECIFIC_FOCAL_POINT_TYPE = [
+ {
+ "environment": "Mirantis-Liberty-API",
+ "focal_point_type": CORRECT_FOCAL_POINT_POINT_TYPE,
+ "id": "58ca73ae3a8a836d10ff3b80"
+ },
+ {
+ "environment": "Mirantis-Liberty-API",
+ "focal_point_type": CORRECT_FOCAL_POINT_POINT_TYPE,
+ "id": "58ca73ae3a8a836d10ff3b81"
+ }
+]
+
+CLIQUE_TYPES_WITH_SPECIFIC_FOCAL_POINT_TYPE_RESPONSE = {
+ "clique_types": CLIQUE_TYPES_WITH_SPECIFIC_FOCAL_POINT_TYPE
+}
+
+CLIQUE_TYPES_WITH_SPECIFIC_LINK_TYPE = [
+ {
+ "environment": "Mirantis-Liberty-API",
+ "link_types": [
+ CORRECT_LINK_TYPE
+ ],
+ "id": "58ca73ae3a8a836d10ff3b80"
+ },
+ {
+ "environment": "Mirantis-Liberty-API",
+ "link_types": [
+ CORRECT_LINK_TYPE
+ ],
+ "id": "58ca73ae3a8a836d10ff3b81"
+ }
+]
+
+CLIQUE_TYPES_WITH_SPECIFIC_LINK_TYPE_RESPONSE = {
+ "clique_types": CLIQUE_TYPES_WITH_SPECIFIC_LINK_TYPE
+}
+
+CLIQUE_TYPES = [
+ {
+ "environment": "Mirantis-Liberty-API",
+ "focal_point_type": "vnic",
+ "id": "58ca73ae3a8a836d10ff3b80"
+ },
+ {
+ "environment": "Mirantis-Liberty-API",
+ "focal_point_type": "vnic",
+ "id": "58ca73ae3a8a836d10ff3b81"
+ }
+]
+
+CLIQUE_TYPES_RESPONSE = {
+ "clique_types": CLIQUE_TYPES
+}
+
+NON_DICT_CLIQUE_TYPE = base.NON_DICT_OBJ
+
+CLIQUE_TYPE_WITHOUT_ENVIRONMENT = {
+ "name": "instance_vconnector_clique",
+ "link_types": [
+ "instance-vnic",
+ "vnic-vconnector"
+ ],
+ "focal_point_type": "instance"
+}
+
+CLIQUE_TYPE_WITH_UNKNOWN_ENVIRONMENT = {
+ "environment": base.UNKNOWN_ENV,
+ "id": "589a3969761b0555a3ef6093",
+ "name": "instance_vconnector_clique",
+ "link_types": [
+ "instance-vnic",
+ "vnic-vconnector"
+ ],
+ "focal_point_type": "instance"
+}
+
+CLIQUE_TYPE_WITHOUT_FOCAL_POINT_TYPE = {
+ "environment": "Mirantis-Liberty-API",
+ "name": "instance_vconnector_clique",
+ "link_types": [
+ "instance-vnic",
+ "vnic-vconnector"
+ ]
+}
+
+CLIQUE_TYPE_WITH_WRONG_FOCAL_POINT_TYPE = {
+ "environment": "Mirantis-Liberty-API",
+ "name": "instance_vconnector_clique",
+ "link_types": [
+ "instance-vnic",
+ "vnic-vconnector"
+ ],
+ "focal_point_type": WRONG_FOCAL_POINT_TYPE
+}
+
+CLIQUE_TYPE_WITHOUT_LINK_TYPES = {
+ "environment": "Mirantis-Liberty-API",
+ "name": "instance_vconnector_clique",
+ "focal_point_type": "instance"
+}
+
+CLIQUE_TYPE_WITH_NON_LIST_LINK_TYPES = {
+ "environment": "Mirantis-Liberty-API",
+ "name": "instance_vconnector_clique",
+ "link_types": "instance-vnic",
+ "focal_point_type": "instance"
+}
+
+CLIQUE_TYPE_WITH_WRONG_LINK_TYPE = {
+ "environment": "Mirantis-Liberty-API",
+ "name": "instance_vconnector_clique",
+ "link_types": [
+ WRONG_LINK_TYPE,
+ "vnic-vconnector"
+ ],
+ "focal_point_type": "instance"
+}
+
+CLIQUE_TYPE_WITHOUT_NAME = {
+ "environment": "Mirantis-Liberty-API",
+ "link_types": [
+ "instance-vnic",
+ "vnic-vconnector",
+ ],
+ "focal_point_type": "instance"
+}
+
+CLIQUE_TYPE = {
+ "environment": "Mirantis-Liberty-API",
+ "name": "instance_vconnector_clique",
+ "link_types": [
+ "instance-vnic",
+ "vnic-vconnector"
+ ],
+ "focal_point_type": "instance"
+}
diff --git a/app/test/api/responders_test/test_data/cliques.py b/app/test/api/responders_test/test_data/cliques.py
new file mode 100644
index 0000000..e1995cd
--- /dev/null
+++ b/app/test/api/responders_test/test_data/cliques.py
@@ -0,0 +1,171 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+from test.api.responders_test.test_data import base
+
+URL = "/cliques"
+
+WRONG_CLIQUE_ID = "58a2406e6a283a8bee15d43"
+CORRECT_CLIQUE_ID = "58a2406e6a283a8bee15d43f"
+NONEXISTENT_CLIQUE_ID = "58a2406e6a283a8bee15d43e"
+
+WRONG_FOCAL_POINT = "58a2406e6a283a8bee15d43"
+CORRECT_FOCAL_POINT = "58a2406e6a283a8bee15d43f"
+
+WRONG_LINK_ID = "58a2406e6a283a8bee15d43"
+CORRECT_LINK_ID = "58a2406e6a283a8bee15d43f"
+NONEXISTENT_LINK_ID = "58a2406e6a283a8bee15d43e"
+
+WRONG_FOCAL_POINT_TYPE = base.WRONG_OBJECT_TYPE
+CORRECT_FOCAL_POINT_TYPE = base.CORRECT_OBJECT_TYPE
+
+WRONG_LINK_TYPE = base.WRONG_LINK_TYPE
+CORRECT_LINK_TYPE = base.CORRECT_LINK_TYPE
+
+CLIQUES_WITH_SPECIFIC_ID = [
+ {
+ "environment": "Mirantis-Liberty-API",
+ "focal_point_type": "vnic",
+ "id": CORRECT_CLIQUE_ID
+ }
+]
+
+CLIQUES_WITH_SPECIFIC_FOCAL_POINT_TYPE = [
+ {
+ "environment": "Mirantis-Liberty-API",
+ "focal_point_type": CORRECT_FOCAL_POINT_TYPE,
+ "id": "576c119a3f4173144c7a75c5"
+ },
+ {
+ "environment": "Mirantis-Liberty-API",
+ "focal_point_type": CORRECT_FOCAL_POINT_TYPE,
+ "id": "576c119a3f4173144c7a75cc6"
+ }
+]
+
+CLIQUES_WITH_SPECIFIC_FOCAL_POINT_TYPE_RESPONSE = {
+ "cliques": CLIQUES_WITH_SPECIFIC_FOCAL_POINT_TYPE
+}
+
+CLIQUES_WITH_SPECIFIC_FOCAL_POINT = [
+ {
+ "environment": "Mirantis-Liberty-API",
+ "focal_point": CORRECT_FOCAL_POINT,
+ "id": "576c119a3f4173144c7a75c5"
+ },
+ {
+ "environment": "Mirantis-Liberty-API",
+ "focal_point": CORRECT_FOCAL_POINT,
+ "id": "576c119a3f4173144c7a758e"
+ }
+]
+
+CLIQUES_WITH_SPECIFIC_FOCAL_POINT_RESPONSE = {
+ "cliques": CLIQUES_WITH_SPECIFIC_FOCAL_POINT
+}
+
+CLIQUES_WITH_SPECIFIC_LINK_TYPE = [
+ {
+ "links_detailed": [
+ {
+ "link_type": CORRECT_LINK_TYPE,
+ "_id": "58a2405a6a283a8bee15d42f"
+ },
+ {
+ "link_type": "vnic-vconnector",
+ "_id": "58a240056a283a8bee15d3f2"
+ }
+ ],
+ "environment": "Mirantis-Liberty-API",
+ "focal_point_type": "vnic",
+ "id": "576c119a3f4173144c7a75c5"
+ },
+ {
+ "links_detailed": [
+ {
+ "link_type": CORRECT_LINK_TYPE,
+ "_id": "58a2405a6a283a8bee15d42f"
+ }
+ ],
+ "environment": "Mirantis-Liberty-API",
+ "focal_point_type": "pnic",
+ "id": "576c119a3f4173144c7a75c7"
+ }
+]
+
+CLIQUES_WITH_SPECIFIC_LINK_TYPE_RESPONSE = {
+ "cliques": CLIQUES_WITH_SPECIFIC_LINK_TYPE
+}
+
+CLIQUES_WITH_SPECIFIC_LINK_ID = [
+ {
+ "links_detailed": [
+ {
+ "_id": CORRECT_LINK_ID
+ },
+ {
+ "_id": "58a240056a283a8bee15d3f2"
+ }
+ ],
+ "environment": "Mirantis-Liberty-API",
+ "focal_point_type": "vnic",
+ "id": "576c119a3f4173144c7a75c5"
+ },
+ {
+ "links_detailed": [
+ {
+ "_id": CORRECT_LINK_ID
+ }
+ ],
+ "environment": "Mirantis-Liberty-API",
+ "focal_point_type": "pnic",
+ "id": "576c119a3f4173144c7a75c7"
+ }
+]
+
+CLIQUES_WITH_SPECIFIC_LINK_ID_RESPONSE = {
+ "cliques": CLIQUES_WITH_SPECIFIC_LINK_ID
+}
+
+# response
+CLIQUES = [{
+ "links_detailed": [
+ {
+ "link_type": "instance-vnic",
+ "_id": "58a2405a6a283a8bee15d42f"
+ },
+ {
+ "link_type": "vnic-vconnector",
+ "_id": "58a240056a283a8bee15d3f2"
+ }
+ ],
+ "environment": "Mirantis-Liberty-API",
+ "focal_point_type": "vnic",
+ "id": "576c119a3f4173144c7a75c5"
+ },
+ {
+ "links_detailed": [
+ {
+ "link_type": "instance-vnic",
+ "_id": "58a2405a6a283a8bee15d42f"
+ },
+ {
+ "link_type": "vnic-vconnector",
+ "_id": "58a240056a283a8bee15d3f2"
+ }
+ ],
+ "environment": "Miratis-Liberty-API",
+ "focal_point_type": "pnic",
+ "id": "576c119a3f4173144c7a75c6"
+ }
+]
+
+CLIQUES_RESPONSE = {
+ "cliques": CLIQUES
+}
diff --git a/app/test/api/responders_test/test_data/constants.py b/app/test/api/responders_test/test_data/constants.py
new file mode 100644
index 0000000..9293209
--- /dev/null
+++ b/app/test/api/responders_test/test_data/constants.py
@@ -0,0 +1,23 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+URL = "/constants"
+UNKNOWN_NAME = "unknown constant"
+NAME = "distributions"
+CONSTANTS_WITH_SPECIFIC_NAME = [{
+ "id": "YmPDAQAchr39KjECQ",
+ "name": NAME,
+ "data": [{
+ "value": "Canonical-icehouse",
+ "label": "Canonical-icehouse"
+ }, {
+ "value": "Canonical-juno",
+ "label": "Canonical-juno"
+ }],
+}]
diff --git a/app/test/api/responders_test/test_data/environment_configs.py b/app/test/api/responders_test/test_data/environment_configs.py
new file mode 100644
index 0000000..2a67fb6
--- /dev/null
+++ b/app/test/api/responders_test/test_data/environment_configs.py
@@ -0,0 +1,221 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+from test.api.responders_test.test_data import base
+
+
+URL = "/environment_configs"
+
+NAME = "Mirantis-Liberty-API"
+UNKNOWN_NAME = "UNKNOWN NAME"
+WRONG_DISTRIBUTION = base.WRONG_DISTRIBUTION
+CORRECT_DISTRIBUTION = base.CORRECT_DISTRIBUTION
+WRONG_MECHANISM_DRIVER = base.WRONG_MECHANISM_DRIVER
+CORRECT_MECHANISM_DRIVER = base.CORRECT_MECHANISM_DRIVER
+WRONG_TYPE_DRIVER = base.WRONG_TYPE_DRIVER
+CORRECT_TYPE_DRIVER = base.CORRECT_TYPE_DRIVER
+USER = "WS7j8oTbWPf3LbNne"
+NON_BOOL_LISTEN = NON_BOOL_SCANNED = \
+ NON_BOOL_MONITORING_SETUP_DONE = base.NON_BOOL
+
+BOOL_LISTEN = BOOL_SCANNED = \
+ BOOL_MONITORING_SETUP_DONE = base.BOOL
+
+ENV_CONFIGS = [
+ {
+ "distribution": "Mirantis-8.0",
+ "name": "Mirantis-Liberty-API"
+ },
+ {
+ "distribution": "Mirantis-9.0",
+ "name": "Mirantis-Liberty"
+ }
+]
+
+ENV_CONFIGS_RESPONSE = {
+ "environment_configs": ENV_CONFIGS
+}
+
+ENV_CONFIGS_WITH_SPECIFIC_NAME = [
+ {
+ "distribution": "Mirantis-8.0",
+ "name": NAME
+ }
+]
+
+ENV_CONFIGS_WITH_SPECIFIC_DISTRIBUTION = [
+ {
+ "distribution": CORRECT_DISTRIBUTION,
+ "name": "Mirantis-Liberty-API",
+ },
+ {
+ "distribution": CORRECT_DISTRIBUTION,
+ "name": "Mirantis-Liberty"
+ }
+]
+
+ENV_CONFIGS_WITH_SPECIFIC_DISTRIBUTION_RESPONSE = {
+ "environment_configs": ENV_CONFIGS_WITH_SPECIFIC_DISTRIBUTION
+}
+
+ENV_CONFIGS_WITH_SPECIFIC_MECHANISM_DRIVER = [
+ {
+ "name": "Mirantis-Liberty-API",
+ "mechanism_drivers": [
+ CORRECT_MECHANISM_DRIVER
+ ]
+ },
+ {
+ "name": "Mirantis-Liberty",
+ "mechanism_drivers": [
+ CORRECT_MECHANISM_DRIVER
+ ]
+ }
+]
+
+ENV_CONFIGS_WITH_SPECIFIC_MECHANISM_DRIVER_RESPONSE = {
+ "environment_configs": ENV_CONFIGS_WITH_SPECIFIC_MECHANISM_DRIVER
+}
+
+ENV_CONFIGS_WITH_SPECIFIC_TYPE_DRIVER = [
+ {
+ "type_drivers": CORRECT_TYPE_DRIVER,
+ "name": "Mirantis-Liberty-API",
+ },
+ {
+ "type_drivers": CORRECT_TYPE_DRIVER,
+ "name": "Mirantis-Liberty"
+ }
+]
+
+ENV_CONFIGS_WITH_SPECIFIC_TYPE_DRIVER_RESPONSE = {
+ 'environment_configs': ENV_CONFIGS_WITH_SPECIFIC_TYPE_DRIVER
+}
+
+ENV_CONFIGS_WITH_SPECIFIC_USER = [
+ {
+ "user": USER,
+ "name": "Mirantis-Liberty-API",
+ },
+ {
+ "user": USER,
+ "name": "Mirantis-Liberty"
+ }
+]
+
+ENV_CONFIGS_WITH_SPECIFIC_USER_RESPONSE = {
+ "environment_configs": ENV_CONFIGS_WITH_SPECIFIC_USER
+}
+
+ENV_CONFIGS_WITH_SPECIFIC_LISTEN = [
+ {
+ "listen": BOOL_LISTEN,
+ "name": "Mirantis-Liberty-API",
+ },
+ {
+ "listen": BOOL_LISTEN,
+ "name": "Mirantis-Liberty"
+ }
+]
+
+ENV_CONFIGS_WITH_SPECIFIC_LISTEN_RESPONSE = {
+ "environment_configs": ENV_CONFIGS_WITH_SPECIFIC_LISTEN
+}
+
+ENV_CONFIGS_WITH_SPECIFIC_SCANNED = [
+ {
+ "scanned": BOOL_SCANNED,
+ "name": "Mirantis-Liberty-API",
+ },
+ {
+ "scanned": BOOL_SCANNED,
+ "name": "Mirantis-Liberty"
+ }
+]
+
+ENV_CONFIGS_WITH_SPECIFIC_SCANNED_RESPONSE = {
+ "environment_configs": ENV_CONFIGS_WITH_SPECIFIC_SCANNED
+}
+
+ENV_CONFIGS_WITH_SPECIFIC_MONITORING_SETUP_DONE = [
+ {
+ "monitoring_setup_done": BOOL_MONITORING_SETUP_DONE,
+ "name": "Mirantis-Liberty-API",
+ },
+ {
+ "monitoring_setup_done": BOOL_MONITORING_SETUP_DONE,
+ "name": "Mirantis-Liberty"
+ }
+]
+
+ENV_CONFIGS_WITH_SPECIFIC_MONITORING_SETUP_DONE_RESPONSE = {
+ "environment_configs": ENV_CONFIGS_WITH_SPECIFIC_MONITORING_SETUP_DONE
+}
+
+ENV_CONFIG = {
+ "app_path": "/home/korenlev/Calipso/app/",
+ "configuration": [
+ {
+ "host": "10.56.20.239",
+ "name": "mysql",
+ "password": "G1VKEbcqKZXoPthrtNma2D9Y",
+ "port": "3307",
+ "user": "root"
+ },
+ {
+ "name": "OpenStack",
+ "host": "10.56.20.239",
+ "admin_token": "wLWefGuD0uYJ7tqkeEScdnNo",
+ "port": "5000",
+ "user": "admin",
+ "pwd": "admin"
+ },
+ {
+ "host": "10.56.20.239",
+ "key": "/etc/calipso/keys/Mirantis-Liberty-id_rsa",
+ "name": "CLI",
+ "user": "root"
+ },
+ {
+ "host": "10.56.20.239",
+ "name": "AMQP",
+ "password": "YVWMiKMshZhlJCGqFu5PdT9d",
+ "port": "5673",
+ "user": "nova"
+ },
+ {
+ "config_folder": "/tmp/sensu_test",
+ "provision": "None",
+ "env_type": "development",
+ "name": "Monitoring",
+ "api_port": "4567",
+ "rabbitmq_port": "5671",
+ "rabbitmq_pass": "sensuaccess",
+ "rabbitmq_user": "sensu",
+ "ssh_port": "20022",
+ "ssh_user": "root",
+ "ssh_password": "calipso",
+ "server_ip": "korlev-calipso-staging1.cisco.com",
+ "server_name": "calipso-sensu",
+ "type": "Sensu"
+ }
+ ],
+ "distribution": "Mirantis-8.0",
+ "last_scanned": "2017-03-16T11:14:54Z",
+ "listen": True,
+ "mechanism_drivers": [
+ "ovs"
+ ],
+ "name": "Mirantis-Liberty",
+ "operational": "running",
+ "scanned": True,
+ "type": "environment",
+ "type_drivers": "vxlan",
+ "user": "WS7j8oTbWPf3LbNne"
+}
diff --git a/app/test/api/responders_test/test_data/inventory.py b/app/test/api/responders_test/test_data/inventory.py
new file mode 100644
index 0000000..47d611d
--- /dev/null
+++ b/app/test/api/responders_test/test_data/inventory.py
@@ -0,0 +1,37 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+URL = "/inventory"
+
+ID = "RegionOne-aggregates"
+NONEXISTENT_ID = "Unkown-Id"
+
+
+OBJECTS_LIST = [
+ {
+ "id": "Mirantis-Liberty-regions",
+ "name": "Regions",
+ "name_path": "/Mirantis-Liberty-API/Regions"
+ },
+ {
+ "id": "Mirantis-Liberty-projects",
+ "name": "Projects",
+ "name_path": "/Mirantis-Liberty-API/Projects"
+ }
+]
+
+OBJECT_IDS_RESPONSE = {
+ "objects": OBJECTS_LIST
+}
+
+
+OBJECTS = [{
+ "environment": "Mirantis-Liberty-API",
+ "id": "RegionOne-aggregates"
+}]
diff --git a/app/test/api/responders_test/test_data/links.py b/app/test/api/responders_test/test_data/links.py
new file mode 100644
index 0000000..e71c02d
--- /dev/null
+++ b/app/test/api/responders_test/test_data/links.py
@@ -0,0 +1,90 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+from test.api.responders_test.test_data import base
+
+
+URL = "/links"
+
+UNKNOWN_HOST = "unknown host"
+
+WRONG_TYPE = base.WRONG_LINK_TYPE
+CORRECT_TYPE = base.CORRECT_LINK_TYPE
+
+WRONG_STATE = base.WRONG_LINK_STATE
+CORRECT_STATE = base.CORRECT_LINK_STATE
+
+LINK_ID = "58ca73ae3a8a836d10ff3b45"
+WRONG_LINK_ID = "58ca73ae3a8a836d10ff3b4"
+NONEXISTENT_LINK_ID = "58ca73ae3a8a836d10ff3b46"
+
+LINKS_WITH_SPECIFIC_TYPE = [
+ {
+ "id": "58ca73ae3a8a836d10ff3bb5",
+ "host": "node-1.cisco.com",
+ "link_type": CORRECT_TYPE,
+ "link_name": "Segment-103",
+ "environment": "Mirantis-Liberty-API"
+ },
+ {
+ "id": "58ca73ae3a8a836d10ff3b4d",
+ "host": "node-1.cisco.com",
+ "link_type": CORRECT_TYPE,
+ "link_name": "Segment-104",
+ "environment": "Mirantis-Liberty-API"
+ }
+]
+
+
+LINKS_WITH_SPECIFIC_STATE = [
+ {
+ "id": "58ca73ae3a8a836d10ff3bb5",
+ "host": "node-1.cisco.com",
+ "state": CORRECT_STATE,
+ "environment": "Mirantis-Liberty-API"
+ },
+ {
+ "id": "58ca73ae3a8a836d10ff3b4d",
+ "host": "node-1.cisco.com",
+ "state": CORRECT_STATE,
+ "environment": "Mirantis-Liberty-API"
+ }
+]
+
+LINKS_WITH_SPECIFIC_STATE_RESPONSE = {
+ "links": LINKS_WITH_SPECIFIC_STATE
+}
+
+LINKS_WITH_SPECIFIC_TYPE_RESPONSE = {
+ "links": LINKS_WITH_SPECIFIC_TYPE
+}
+
+LINKS_WITH_SPECIFIC_ID = [
+ {
+ "id": LINK_ID,
+ "host": "node-1.cisco.com",
+ "link_type": "pnic-network",
+ "link_name": "Segment-103",
+ "environment": "Mirantis-Liberty-API"
+ }
+]
+
+LINKS = [
+ {
+ "id": "58ca73ae3a8a836d10ff3b45",
+ "host": "node-1.cisco.com",
+ "link_type": "pnic-network",
+ "link_name": "Segment-103",
+ "environment": "Mirantis-Liberty-API"
+ }
+]
+
+LINKS_LIST_RESPONSE = {
+ "links": LINKS
+}
diff --git a/app/test/api/responders_test/test_data/messages.py b/app/test/api/responders_test/test_data/messages.py
new file mode 100644
index 0000000..b7b5abd
--- /dev/null
+++ b/app/test/api/responders_test/test_data/messages.py
@@ -0,0 +1,108 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+from test.api.responders_test.test_data import base
+
+# url
+URL = "/messages"
+
+NONEXISTENT_MESSAGE_ID = "80b5e074-0f1a-4b67-810c-fa9c92d41a9f"
+MESSAGE_ID = "80b5e074-0f1a-4b67-810c-fa9c92d41a98"
+
+WRONG_SEVERITY = base.WRONG_MESSAGE_SEVERITY
+CORRECT_SEVERITY = base.CORRECT_MESSAGE_SEVERITY
+
+WRONG_RELATED_OBJECT_TYPE = base.WRONG_OBJECT_TYPE
+CORRECT_RELATED_OBJECT_TYPE = base.CORRECT_OBJECT_TYPE
+
+RELATED_OBJECT = "instance"
+NONEXISTENT_RELATED_OBJECT = "nonexistent-instance"
+
+WRONG_FORMAT_TIME = base.WRONG_FORMAT_TIME
+CORRECT_FORMAT_TIME = base.CORRECT_FORMAT_TIME
+
+MESSAGES_WITH_SPECIFIC_TIME = [
+ {
+ "level": "info",
+ "environment": "Mirantis-Liberty-API",
+ "id": "3c64fe31-ca3b-49a3-b5d3-c485d7a452e7",
+ "source_system": "OpenStack",
+ "timestamp": CORRECT_FORMAT_TIME
+ }
+]
+
+MESSAGES_WITH_SPECIFIC_TIME_RESPONSE = {
+ "messages": MESSAGES_WITH_SPECIFIC_TIME
+}
+
+MESSAGES_WITH_SPECIFIC_SEVERITY = [
+ {
+ "level": CORRECT_SEVERITY,
+ "environment": "Mirantis-Liberty-API",
+ "id": "3c64fe31-ca3b-49a3-b5d3-c485d7a452e7",
+ "source_system": "OpenStack"
+ },
+ {
+ "level": CORRECT_SEVERITY,
+ "environment": "Mirantis-Liberty-API",
+ "id": "c7071ec0-04db-4820-92ff-3ed2b916738f",
+ "source_system": "OpenStack"
+ },
+]
+
+MESSAGES_WITH_SPECIFIC_SEVERITY_RESPONSE = {
+ "messages": MESSAGES_WITH_SPECIFIC_SEVERITY
+}
+
+MESSAGES_WITH_SPECIFIC_RELATED_OBJECT_TYPE = [
+ {
+ "level": "info",
+ "environment": "Mirantis-Liberty-API",
+ "related_object_type": CORRECT_RELATED_OBJECT_TYPE,
+ "id": "3c64fe31-ca3b-49a3-b5d3-c485d7a452e7"
+ },
+ {
+ "level": "error",
+ "environment": "Mirantis-Liberty-API",
+ "related_object_type": CORRECT_RELATED_OBJECT_TYPE,
+ "id": "c7071ec0-04db-4820-92ff-3ed2b916738f"
+ },
+]
+
+MESSAGES_WITH_SPECIFIC_RELATED_OBJECT_TYPE_RESPONSE = {
+ "messages": MESSAGES_WITH_SPECIFIC_RELATED_OBJECT_TYPE
+}
+
+MESSAGES_WITH_SPECIFIC_ID = [
+ {
+ "level": "info",
+ "environment": "Mirantis-Liberty",
+ "id": MESSAGE_ID,
+ "source_system": "OpenStack"
+ }
+]
+
+MESSAGES = [
+ {
+ "level": "info",
+ "environment": "Mirantis-Liberty",
+ "id": "3c64fe31-ca3b-49a3-b5d3-c485d7a452e7",
+ "source_system": "OpenStack"
+ },
+ {
+ "level": "info",
+ "environment": "Mirantis-Liberty",
+ "id": "c7071ec0-04db-4820-92ff-3ed2b916738f",
+ "source_system": "OpenStack"
+ },
+]
+
+MESSAGES_RESPONSE = {
+ "messages": MESSAGES
+}
diff --git a/app/test/api/responders_test/test_data/monitoring_config_templates.py b/app/test/api/responders_test/test_data/monitoring_config_templates.py
new file mode 100644
index 0000000..0f387a4
--- /dev/null
+++ b/app/test/api/responders_test/test_data/monitoring_config_templates.py
@@ -0,0 +1,98 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+from test.api.responders_test.test_data import base
+
+
+URL = "/monitoring_config_templates"
+
+WRONG_ID = base.WRONG_OBJECT_ID
+UNKNOWN_ID = "583711893e149c14785d6da5"
+CORRECT_ID = base.CORRECT_OBJECT_ID
+
+NON_INT_ORDER = 1.3
+INT_ORDER = 1
+
+WRONG_SIDE = base.WRONG_MONITORING_SIDE
+CORRECT_SIDE = base.CORRECT_MONITORING_SIDE
+
+TYPE = "client.json"
+
+TEMPLATES_WITH_SPECIFIC_ORDER = [
+ {
+ "order": INT_ORDER,
+ "id": "583711893e149c14785d6daa"
+ },
+ {
+ "order": INT_ORDER,
+ "id": "583711893e149c14785d6da7"
+ }
+]
+
+TEMPLATES_WITH_SPECIFIC_ORDER_RESPONSE = {
+ "monitoring_config_templates":
+ TEMPLATES_WITH_SPECIFIC_ORDER
+}
+
+TEMPLATES_WITH_SPECIFIC_SIDE = [
+ {
+ "side": CORRECT_SIDE,
+ "id": "583711893e149c14785d6daa"
+ },
+ {
+ "side": CORRECT_SIDE,
+ "id": "583711893e149c14785d6da7"
+ }
+]
+
+TEMPLATES_WITH_SPECIFIC_SIDE_RESPONSE = {
+ "monitoring_config_templates":
+ TEMPLATES_WITH_SPECIFIC_SIDE
+}
+
+TEMPLATES_WITH_SPECIFIC_TYPE = [
+ {
+ "type": TYPE,
+ "id": "583711893e149c14785d6daa"
+ },
+ {
+ "type": TYPE,
+ "id": "583711893e149c14785d6da7"
+ }
+]
+
+TEMPLATES_WITH_SPECIFIC_TYPE_RESPONSE = {
+ "monitoring_config_templates":
+ TEMPLATES_WITH_SPECIFIC_TYPE
+}
+
+TEMPLATES_WITH_SPECIFIC_ID = [
+ {
+ "type": "rabbitmq.json",
+ "side": "client",
+ "id": CORRECT_ID
+ }
+]
+
+TEMPLATES = [
+ {
+ "type": "rabbitmq.json",
+ "side": "client",
+ "id": "583711893e149c14785d6daa"
+ },
+ {
+ "type": "rabbitmq.json",
+ "side": "client",
+ "id": "583711893e149c14785d6da7"
+ }
+]
+
+TEMPLATES_RESPONSE = {
+ "monitoring_config_templates": TEMPLATES
+}
diff --git a/app/test/api/responders_test/test_data/scans.py b/app/test/api/responders_test/test_data/scans.py
new file mode 100644
index 0000000..479d371
--- /dev/null
+++ b/app/test/api/responders_test/test_data/scans.py
@@ -0,0 +1,187 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+from test.api.responders_test.test_data import base
+
+URL = "/scans"
+
+WRONG_ID = base.WRONG_OBJECT_ID
+NONEXISTENT_ID = "58c96a075eb66a121cc4e750"
+CORRECT_ID = base.CORRECT_OBJECT_ID
+
+BASE_OBJECT = "node-2.cisco.com"
+
+WRONG_STATUS = base.WRONG_SCAN_STATUS
+CORRECT_STATUS = base.CORRECT_SCAN_STATUS
+
+SCANS = [
+ {
+ "status": "pending",
+ "environment": "Mirantis-Liberty-API",
+ "id": "58c96a075eb66a121cc4e75f",
+ },
+ {
+ "status": "completed",
+ "environment": "Mirantis-Liberty-API",
+ "id": "58c96a075eb66a121cc4e75e",
+ "scan_completed": True
+ }
+]
+
+SCANS_RESPONSE = {
+ "scans": SCANS
+}
+
+SCANS_WITH_SPECIFIC_ID = [
+ {
+ "status": "pending",
+ "environment": "Mirantis-Liberty-API",
+ "id": CORRECT_ID,
+ }
+]
+
+SCANS_WITH_SPECIFIC_BASE_OBJ = [
+ {
+ "status": "pending",
+ "environment": "Mirantis-Liberty-API",
+ "id": "58c96a075eb66a121cc4e75f",
+ "object_id": BASE_OBJECT
+ },
+ {
+ "status": "completed",
+ "environment": "Mirantis-Liberty-API",
+ "id": "58c96a075eb66a121cc4e75e",
+ "object_id": BASE_OBJECT,
+ "scan_completed": True
+ }
+]
+
+SCANS_WITH_SPECIFIC_BASE_OBJ_RESPONSE = {
+ "scans": SCANS_WITH_SPECIFIC_BASE_OBJ
+}
+
+SCANS_WITH_SPECIFIC_STATUS = [
+ {
+ "status": CORRECT_STATUS,
+ "environment": "Mirantis-Liberty-API",
+ "id": "58c96a075eb66a121cc4e75f",
+ "scan_completed": True
+ },
+ {
+ "status": CORRECT_STATUS,
+ "environment": "Mirantis-Liberty-API",
+ "id": "58c96a075eb66a121cc4e75e",
+ "scan_completed": True
+ }
+]
+
+SCANS_WITH_SPECIFIC_STATUS_RESPONSE = {
+ "scans": SCANS_WITH_SPECIFIC_STATUS
+}
+
+NON_DICT_SCAN = base.NON_DICT_OBJ
+
+SCAN = {
+ "status": "pending",
+ "log_level": "warning",
+ "clear": True,
+ "scan_only_inventory": True,
+ "environment": "Mirantis-Liberty-API",
+ "inventory": "inventory",
+ "object_id": "ff"
+}
+
+SCAN_WITHOUT_ENV = {
+ "status": "pending",
+ "log_level": "warning",
+ "clear": True,
+ "scan_only_inventory": True,
+ "inventory": "inventory",
+ "object_id": "ff"
+}
+
+SCAN_WITH_UNKNOWN_ENV = {
+ "status": "pending",
+ "log_level": "warning",
+ "clear": True,
+ "scan_only_inventory": True,
+ "environment": base.UNKNOWN_ENV,
+ "inventory": "inventory",
+ "object_id": "ff"
+}
+
+SCAN_WITHOUT_STATUS = {
+ "log_level": "warning",
+ "clear": True,
+ "scan_only_inventory": True,
+ "environment": "Mirantis-Liberty-API",
+ "inventory": "inventory",
+ "object_id": "ff"
+}
+
+SCAN_WITH_WRONG_STATUS = {
+ "status": WRONG_STATUS,
+ "log_level": "warning",
+ "clear": True,
+ "scan_only_inventory": True,
+ "environment": "Mirantis-Liberty-API",
+ "inventory": "inventory",
+ "object_id": "ff"
+}
+
+SCAN_WITH_WRONG_LOG_LEVEL = {
+ "status": "pending",
+ "log_level": base.WRONG_LOG_LEVEL,
+ "clear": True,
+ "scan_only_inventory": True,
+ "environment": "Mirantis-Liberty-API",
+ "inventory": "inventory",
+ "object_id": "ff"
+}
+
+SCAN_WITH_NON_BOOL_CLEAR = {
+ "status": "pending",
+ "log_level": "warning",
+ "clear": base.NON_BOOL,
+ "scan_only_inventory": True,
+ "environment": "Mirantis-Liberty-API",
+ "inventory": "inventory",
+ "object_id": "ff"
+}
+
+
+SCAN_WITH_NON_BOOL_SCAN_ONLY_INVENTORY = {
+ "status": "pending",
+ "log_level": "warning",
+ "clear": True,
+ "scan_only_inventory": base.NON_BOOL,
+ "environment": "Mirantis-Liberty-API",
+ "inventory": "inventory",
+ "object_id": "ff"
+}
+
+SCAN_WITH_NON_BOOL_SCAN_ONLY_LINKS = {
+ "status": "pending",
+ "log_level": "warning",
+ "clear": True,
+ "scan_only_links": base.NON_BOOL,
+ "environment": "Mirantis-Liberty-API",
+ "inventory": "inventory",
+ "object_id": "ff"
+}
+
+SCAN_WITH_NON_BOOL_SCAN_ONLY_CLIQUES = {
+ "status": "pending",
+ "log_level": "warning",
+ "clear": True,
+ "scan_only_cliques": base.NON_BOOL,
+ "environment": "Mirantis-Liberty-API",
+ "inventory": "inventory",
+ "object_id": "ff"
+}
diff --git a/app/test/api/responders_test/test_data/scheduled_scans.py b/app/test/api/responders_test/test_data/scheduled_scans.py
new file mode 100644
index 0000000..1019572
--- /dev/null
+++ b/app/test/api/responders_test/test_data/scheduled_scans.py
@@ -0,0 +1,138 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+from test.api.responders_test.test_data import base
+
+URL = "/scheduled_scans"
+WRONG_FREQ = "wrong_freq"
+CORRECT_FREQ = "WEEKLY"
+WRONG_ID = base.WRONG_OBJECT_ID
+NONEXISTENT_ID = "58c96a075eb66a121cc4e750"
+CORRECT_ID = "ff4d3e80e42e886bef13a084"
+NON_DICT_SCHEDULED_SCAN = ""
+
+
+SCHEDULED_SCANS = [
+ {
+ "id": "ff4d3e80e42e886bef13a084",
+ "environment": base.ENV_NAME,
+ "scheduled_timestamp": "2017-07-24T12:45:03.784+0000",
+ "freq": "WEEKLY"
+ },
+ {
+ "id": "58e4e1aa6df71e971324ea62",
+ "environment": base.ENV_NAME,
+ "scheduled_timestamp": "2017-07-24T12:45:03.784+0000",
+ "freq": "WEEKLY"
+ }
+]
+
+SCHEDULED_SCANS_RESPONSE = {
+ "scheduled_scans": SCHEDULED_SCANS
+}
+
+SCHEDULED_SCAN_WITH_SPECIFIC_FREQ = [{
+ "id": "ff4d3e80e42e886bef13a084",
+ "environment": base.ENV_NAME,
+ "scheduled_timestamp": "2017-07-24T12:45:03.784+0000",
+ "freq": CORRECT_FREQ
+}]
+
+SCHEDULED_SCAN_WITH_SPECIFIC_FREQ_RESPONSE = {
+ "scheduled_scans": SCHEDULED_SCAN_WITH_SPECIFIC_FREQ
+}
+
+SCHEDULED_SCAN_WITH_SPECIFIC_ID = [{
+ "id": CORRECT_ID,
+ "environment": base.ENV_NAME,
+ "scheduled_timestamp": "2017-07-24T12:45:03.784+0000",
+ "freq": CORRECT_FREQ
+}]
+
+SCHEDULED_SCAN = {
+ "environment": base.ENV_NAME,
+ "freq": CORRECT_FREQ,
+ "submit_timestamp": "2017-07-24T12:45:03.784+0000"
+}
+
+SCHEDULED_SCAN_WITHOUT_ENV = {
+ "freq": CORRECT_FREQ,
+ "submit_timestamp": "2017-07-24T12:45:03.784+0000"
+}
+
+SCHEDULED_SCAN_WITH_UNKNOWN_ENV = {
+ "environment": base.UNKNOWN_ENV,
+ "freq": CORRECT_FREQ,
+ "submit_timestamp": "2017-07-24T12:45:03.784+0000"
+}
+
+SCHEDULED_SCAN_WITHOUT_FREQ = {
+ "environment": base.ENV_NAME,
+ "submit_timestamp": "2017-07-24T12:45:03.784+0000"
+}
+
+SCHEDULED_SCAN_WITHOUT_SUBMIT_TIMESTAMP = {
+ "environment": base.ENV_NAME,
+ "freq": CORRECT_FREQ,
+}
+
+SCHEDULED_SCAN_WITH_WRONG_FREQ = {
+ "environment": base.ENV_NAME,
+ "freq": WRONG_FREQ,
+ "submit_timestamp": "2017-07-24T12:45:03.784+0000"
+}
+
+SCHEDULED_SCAN_WITH_WRONG_LOG_LEVEL = {
+ "environment": base.ENV_NAME,
+ "freq": CORRECT_FREQ,
+ "log_level": base.WRONG_LOG_LEVEL,
+ "submit_timestamp": "2017-07-24T12:45:03.784+0000"
+}
+
+SCHEDULED_SCAN_WITH_WRONG_SUBMIT_TIMESTAMP = {
+ "environment": base.ENV_NAME,
+ "freq": CORRECT_FREQ,
+ "submit_timestamp": base.WRONG_FORMAT_TIME
+}
+
+SCHEDULED_SCAN_WITH_NON_BOOL_CLEAR = {
+ "environment": base.ENV_NAME,
+ "freq": CORRECT_FREQ,
+ "submit_timestamp": "2017-07-24T12:45:03.784+0000",
+ "clear": base.NON_BOOL
+}
+
+SCHEDULED_SCAN_WITH_NON_BOOL_SCAN_ONLY_LINKS = {
+ "environment": base.ENV_NAME,
+ "freq": CORRECT_FREQ,
+ "submit_timestamp": "2017-07-24T12:45:03.784+0000",
+ "scan_only_links": base.NON_BOOL
+}
+
+SCHEDULED_SCAN_WITH_NON_BOOL_SCAN_ONLY_CLIQUES = {
+ "environment": base.ENV_NAME,
+ "freq": CORRECT_FREQ,
+ "submit_timestamp": "2017-07-24T12:45:03.784+0000",
+ "scan_only_cliques": base.NON_BOOL
+}
+
+SCHEDULED_SCAN_WITH_NON_BOOL_SCAN_ONLY_INVENTORY = {
+ "environment": base.ENV_NAME,
+ "freq": CORRECT_FREQ,
+ "submit_timestamp": "2017-07-24T12:45:03.784+0000",
+ "scan_only_inventory": base.NON_BOOL
+}
+
+SCHEDULED_SCAN_WITH_EXTRA_SCAN_ONLY_FLAGS = {
+ "environment": base.ENV_NAME,
+ "freq": CORRECT_FREQ,
+ "submit_timestamp": "2017-07-24T12:45:03.784+0000",
+ "scan_only_links": True,
+ "scan_only_inventory": True
+}
diff --git a/app/test/api/responders_test/test_data/tokens.py b/app/test/api/responders_test/test_data/tokens.py
new file mode 100644
index 0000000..8d9960d
--- /dev/null
+++ b/app/test/api/responders_test/test_data/tokens.py
@@ -0,0 +1,83 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+URL = '/auth/tokens'
+
+AUTH_OBJ_WITHOUT_AUTH = {
+
+}
+
+AUTH_OBJ_WITHOUT_METHODS = {
+ 'auth': {}
+}
+
+AUTH_OBJ_WITHOUT_CREDENTIALS = {
+ 'auth': {
+ 'methods': ['credentials']
+ }
+}
+
+AUTH_OBJ_WITHOUT_TOKEN = {
+ 'auth': {
+ 'methods': ['token']
+ }
+}
+
+AUTH_OBJ_WITH_WRONG_CREDENTIALS = {
+ 'auth': {
+ 'methods': ['credentials'],
+ 'credentials': {
+ 'username': 'wrong_user',
+ 'password': 'password'
+ }
+ }
+}
+
+AUTH_OBJ_WITH_WRONG_TOKEN = {
+ 'auth': {
+ 'methods': ['token'],
+ 'token': 'wrong_token'
+ }
+}
+
+AUTH_OBJ_WITH_CORRECT_CREDENTIALS = {
+ 'auth': {
+ 'methods': ['credentials'],
+ 'credentials': {
+ 'username': 'wrong_user',
+ 'password': 'password'
+ }
+ }
+}
+
+AUTH_OBJ_WITH_CORRECT_TOKEN = {
+ 'auth': {
+ 'methods': ['token'],
+ 'token': '17dfa88789aa47f6bb8501865d905f13'
+ }
+}
+
+HEADER_WITHOUT_TOKEN = {
+
+}
+
+HEADER_WITH_WRONG_TOKEN = {
+ 'X-Auth-Token': 'wrong token'
+}
+
+HEADER_WITH_CORRECT_TOKEN = {
+ 'X-Auth-Token': '17dfa88789aa47f6bb8501865d905f13'
+}
+
+AUTH_BASE_PATH = 'api.auth.auth.Auth'
+AUTH_GET_TOKEN = AUTH_BASE_PATH + '.get_token'
+AUTH_WRITE_TOKEN = AUTH_BASE_PATH + '.write_token'
+AUTH_DELETE_TOKEN = AUTH_BASE_PATH + '.delete_token'
+AUTH_VALIDATE_CREDENTIALS = AUTH_BASE_PATH + '.validate_credentials'
+AUTH_VALIDATE_TOKEN = AUTH_BASE_PATH + '.validate_token'
diff --git a/app/test/api/test_base.py b/app/test/api/test_base.py
new file mode 100644
index 0000000..c126b2b
--- /dev/null
+++ b/app/test/api/test_base.py
@@ -0,0 +1,101 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+import copy
+
+
+from api.app import App
+from api.middleware.authentication import AuthenticationMiddleware
+from api.responders.responder_base import ResponderBase
+from api.backends.ldap_access import LDAPAccess
+from falcon.testing import TestCase
+from test.api.responders_test.test_data import base
+from unittest.mock import MagicMock
+from utils.mongo_access import MongoAccess
+
+
+def mock_auth_method(*args):
+ return None
+
+
+class TestBase(TestCase):
+
+ def setUp(self, authenticate=False):
+ super().setUp()
+ # mock
+ self.authenticate = authenticate
+ if not authenticate:
+ self.original_auth_method = AuthenticationMiddleware.process_request
+ AuthenticationMiddleware.process_request = mock_auth_method
+
+ ResponderBase.get_constants_by_name = MagicMock(side_effect=
+ lambda name: base.CONSTANTS_BY_NAMES[name])
+ # mock mongo access
+ MongoAccess.mongo_connect = MagicMock()
+ MongoAccess.db = MagicMock()
+ MongoAccess.client = MagicMock()
+ # mock ldap access
+ LDAPAccess.get_ldap_params = MagicMock()
+ LDAPAccess.connect_ldap_server = MagicMock()
+
+ log_level = 'debug'
+ self.app = App(log_level=log_level).get_app()
+
+ def validate_get_request(self, url, params={}, headers=None, mocks={},
+ side_effects={},
+ expected_code=base.SUCCESSFUL_CODE,
+ expected_response=None):
+ self.validate_request("GET", url, params, headers, "",
+ mocks, side_effects,
+ expected_code,
+ expected_response)
+
+ def validate_request(self, action, url, params, headers, body,
+ mocks, side_effects, expected_code,
+ expected_response):
+ for mock_method, mock_data in mocks.items():
+ mock_method.return_value = mock_data
+
+ for mock_method, side_effect in side_effects.items():
+ mock_method.side_effect = side_effect
+
+ result = self.simulate_request(action, url, params=params, headers=headers, body=body)
+ self.assertEqual(result.status, expected_code)
+ if expected_response:
+ self.assertEqual(result.json, expected_response)
+
+ def validate_post_request(self, url, headers={}, body="", mocks={},
+ side_effects={},
+ expected_code=base.CREATED_CODE, expected_response=None):
+ self.validate_request("POST", url, {}, headers, body, mocks, side_effects,
+ expected_code, expected_response)
+
+ def validate_delete_request(self, url, params={}, headers={}, mocks={},
+ side_effects={},
+ expected_code=base.SUCCESSFUL_CODE, expected_response=None):
+ self.validate_request("DELETE", url, params, headers, "",
+ mocks, side_effects,
+ expected_code,
+ expected_response)
+
+ def get_updated_data(self, original_data, deleted_keys=[], updates={}):
+ copy_data = copy.deepcopy(original_data)
+
+ for key in deleted_keys:
+ del copy_data[key]
+
+ for key, value in updates.items():
+ copy_data[key] = value
+
+ return copy_data
+
+ def tearDown(self):
+ # if the authentication method has been mocked, it needs to be reset after test
+ if not self.authenticate:
+ AuthenticationMiddleware.process_request = self.original_auth_method
diff --git a/app/test/event_based_scan/__init__.py b/app/test/event_based_scan/__init__.py
new file mode 100644
index 0000000..1e85a2a
--- /dev/null
+++ b/app/test/event_based_scan/__init__.py
@@ -0,0 +1,10 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+
diff --git a/app/test/event_based_scan/config/__init__.py b/app/test/event_based_scan/config/__init__.py
new file mode 100644
index 0000000..1e85a2a
--- /dev/null
+++ b/app/test/event_based_scan/config/__init__.py
@@ -0,0 +1,10 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+
diff --git a/app/test/event_based_scan/config/test_config.py b/app/test/event_based_scan/config/test_config.py
new file mode 100644
index 0000000..176fd48
--- /dev/null
+++ b/app/test/event_based_scan/config/test_config.py
@@ -0,0 +1,17 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+# local config info for test.
+
+
+MONGODB_CONFIG = 'your-mongo-config-path-here'
+
+ENV_CONFIG = 'your-env-name-here'
+
+COLLECTION_CONFIG = 'your-inventory-collection-name-here'
diff --git a/app/test/event_based_scan/test_data/__init__.py b/app/test/event_based_scan/test_data/__init__.py
new file mode 100644
index 0000000..1e85a2a
--- /dev/null
+++ b/app/test/event_based_scan/test_data/__init__.py
@@ -0,0 +1,10 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+
diff --git a/app/test/event_based_scan/test_data/event_payload_instance_add.py b/app/test/event_based_scan/test_data/event_payload_instance_add.py
new file mode 100644
index 0000000..316444a
--- /dev/null
+++ b/app/test/event_based_scan/test_data/event_payload_instance_add.py
@@ -0,0 +1,122 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+from test.event_based_scan.config.test_config import ENV_CONFIG
+
+EVENT_PAYLOAD_INSTANCE_ADD = {
+ 'publisher_id': 'compute.node-251.cisco.com', '_context_resource_uuid': None,
+ '_context_instance_lock_checked': False,
+ '_context_project_id': '75c0eb79ff4a42b0ae4973c8375ddf40',
+ '_context_tenant': '75c0eb79ff4a42b0ae4973c8375ddf40',
+ '_context_request_id': 'req-432fccc8-4d13-4e62-8639-c99acee82cb3',
+ '_context_show_deleted': False,
+ '_context_timestamp': '2016-09-08T22:01:41.724236',
+ '_unique_id': '537fc5b27c244479a69819a4a435723b',
+ '_context_roles': ['_member_', 'admin'], '_context_read_only': False,
+ '_context_user_id': '13baa553aae44adca6615e711fd2f6d9',
+ '_context_project_name': 'calipso-project',
+ '_context_project_domain': None, 'event_type': 'compute.instance.update',
+ '_context_service_catalog': [{'endpoints': [
+ {'internalURL': 'http://192.168.0.2:8776/v2/75c0eb79ff4a42b0ae4973c8375ddf40',
+ 'publicURL': 'http://172.16.0.3:8776/v2/75c0eb79ff4a42b0ae4973c8375ddf40',
+ 'adminURL': 'http://192.168.0.2:8776/v2/75c0eb79ff4a42b0ae4973c8375ddf40',
+ 'region': 'RegionOne'}],
+ 'type': 'volumev2',
+ 'name': 'cinderv2'},
+ {'endpoints': [{
+ 'internalURL': 'http://192.168.0.2:8776/v1/75c0eb79ff4a42b0ae4973c8375ddf40',
+ 'publicURL': 'http://172.16.0.3:8776/v1/75c0eb79ff4a42b0ae4973c8375ddf40',
+ 'adminURL': 'http://192.168.0.2:8776/v1/75c0eb79ff4a42b0ae4973c8375ddf40',
+ 'region': 'RegionOne'}],
+ 'type': 'volume',
+ 'name': 'cinder'}],
+ 'payload': {'instance_type': 'm1.micro', 'progress': '', 'display_name': 'test8',
+ 'kernel_id': '',
+ 'new_task_state': None, 'old_display_name': 'name-change',
+ 'state_description': '',
+ 'old_state': 'building', 'ramdisk_id': '',
+ 'created_at': '2016-09-08 16:32:46+00:00',
+ 'os_type': None,
+ 'ephemeral_gb': 0, 'launched_at': '2016-09-08T16:25:08.000000',
+ 'instance_flavor_id': 'f068e24b-5d7e-4819-b5ca-89a33834a918',
+ 'image_meta': {'min_ram': '64', 'container_format': 'bare', 'min_disk': '0',
+ 'disk_format': 'qcow2',
+ 'base_image_ref': 'c6f490c4-3656-43c6-8d03-b4e66bd249f9'},
+ 'audit_period_beginning': '2016-09-01T00:00:00.000000', 'memory_mb': 64,
+ 'cell_name': '',
+ 'access_ip_v6': None, 'instance_type_id': 6, 'reservation_id': 'r-bycutzve',
+ 'access_ip_v4': None,
+ 'hostname': 'chengli-test-vm1', 'metadata': {},
+ 'user_id': '13baa553aae44adca6615e711fd2f6d9',
+ 'availability_zone': 'calipso-zone',
+ 'instance_id': '27a87908-bc1b-45cc-9238-09ad1ae686a7', 'deleted_at': '',
+ 'image_ref_url': 'http://172.16.0.4:9292/images/c6f490c4-3656-43c6-8d03-b4e66bd249f9',
+ 'host': 'node-252.cisco.com', 'vcpus': 1, 'state': 'active',
+ 'old_task_state': None,
+ 'architecture': None,
+ 'terminated_at': '', 'root_gb': 0,
+ 'tenant_id': '75c0eb79ff4a42b0ae4973c8375ddf40',
+ 'node': 'node-252.cisco.com', 'bandwidth': {}, 'disk_gb': 0,
+ 'audit_period_ending': '2016-09-08T22:01:43.165282'},
+ '_context_quota_class': None,
+ '_context_is_admin': True, '_context_read_deleted': 'no',
+ 'timestamp': '2016-09-08 22:01:43.189907',
+ 'message_id': '4a9068c6-dcd1-4d6c-81d7-db866e07c1ff', 'priority': 'INFO',
+ '_context_domain': None,
+ '_context_user': '13baa553aae44adca6615e711fd2f6d9',
+ '_context_user_identity': '13baa553aae44adca6615e711fd2f6d9 75c0eb79ff4a42b0ae4973c8375ddf40 - - -',
+ '_context_remote_address': '192.168.0.2', '_context_user_domain': None,
+ '_context_auth_token': '''gAAAAABX0d-R0Q4zIrznmZ_L8BT0m4r_lp-7eOr4IenbKz511g2maNo8qhIb86HtA7S
+ VGsEJvy4KRcNIGlVRdmGyXBYm3kEuakQXTsXLxvyQeTtgZ9UgnLLXhQvMLbA2gwaimVpyRljq92R7Y7CwnNFLjibhOiYs
+ NlvBqitJkaRaQa4sg4xCN2tBj32Re-jRu6dR_sIA-haT''',
+ '_context_user_name': 'admin'}
+
+INSTANCES_ROOT = {
+ "create_object": True,
+ "environment": ENV_CONFIG,
+ "id": "node-252.cisco.com-instances",
+ "id_path": "/" + ENV_CONFIG + "/" + ENV_CONFIG + "-regions/RegionOne/RegionOne-availability_zones" +
+ "/calipso-zone/node-252.cisco.com/node-252.cisco.com-instances",
+ "name": "Instances",
+ "name_path": "/" + ENV_CONFIG + "/Regions/RegionOne/Availability Zones/calipso-zone/node-252.cisco.com/Instances",
+ "object_name": "Instances",
+ "parent_id": "node-252.cisco.com",
+ "parent_type": "host",
+ "show_in_tree": True,
+ "text": "Instances",
+ "type": "instances_folder"
+}
+
+INSTANCE_DOCUMENT = {
+ 'projects': ['calipso-project'],
+ 'network': [],
+ 'host': 'node-252.cisco.com', 'parent_type': 'instances_folder',
+ '_id': '57e421194a0a8a3fbe3bd2d0', 'mac_address': 'fa:16:3e:5e:9e:db', 'type': 'instance',
+ 'name': 'name-change',
+ 'uuid': '27a87908-bc1b-45cc-9238-09ad1ae686a7', 'environment': ENV_CONFIG,
+ 'ip_address': '192.168.0.4', 'local_name': 'instance-00000020', 'object_name': 'test8',
+ 'parent_id': 'node-223.cisco.com-instances', 'project_id': '75c0eb79ff4a42b0ae4973c8375ddf40',
+ 'name_path': '/'+ENV_CONFIG+'/Regions/RegionOne/Availability Zones' +
+ '/calipso-zone/node-252.cisco.com/Instances/name-change',
+ 'id': '27a87908-bc1b-45cc-9238-09ad1ae686a7',
+ 'id_path': '/'+ENV_CONFIG+'/'+ENV_CONFIG+'-regions/RegionOne/RegionOne-availability_zones/calipso-zone' +
+ '/node-223.cisco.com/node-223.cisco.com-instances/27a87908-bc1b-45cc-9238-09ad1ae686a7',
+ 'show_in_tree': True}
+
+HOST = {
+ 'name_path': '/'+ ENV_CONFIG +'/Regions/RegionOne/Availability Zones/calipso-zone/node-252.cisco.com',
+ 'id_path': '/'+ENV_CONFIG+ '/'+ENV_CONFIG+'-regions/RegionOne/' +
+ 'RegionOne-availability_zones/calipso-zone/node-252.cisco.com',
+ 'object_name': 'node-252.cisco.com', 'last_scanned': 0,
+ 'type': 'host', 'environment': ENV_CONFIG, 'host': 'node-252.cisco.com', 'id': 'node-252.cisco.com',
+ 'ip_address': '192.168.0.4', 'name': 'node-252.cisco.com', 'host_type': ['Compute'],
+ 'services': {'nova-compute': {'updated_at': '2016-09-26T22:47:09.000000', 'active': True, 'available': True}},
+ 'show_in_tree': True, 'zone': 'calipso-zone', 'os_id': '1',
+ 'parent_type': 'availability_zone', 'parent_id': 'calipso-zone'
+}
diff --git a/app/test/event_based_scan/test_data/event_payload_instance_delete.py b/app/test/event_based_scan/test_data/event_payload_instance_delete.py
new file mode 100644
index 0000000..a94de63
--- /dev/null
+++ b/app/test/event_based_scan/test_data/event_payload_instance_delete.py
@@ -0,0 +1,97 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+from test.event_based_scan.config.test_config import ENV_CONFIG
+
+
+EVENT_PAYLOAD_INSTANCE_DELETE = {
+ 'publisher_id': 'compute.node-253.cisco.com', '_context_resource_uuid': None,
+ '_context_instance_lock_checked': False,
+ '_context_project_id': '75c0eb79ff4a42b0ae4973c8375ddf40',
+ '_context_tenant': '75c0eb79ff4a42b0ae4973c8375ddf40',
+ '_context_request_id': 'req-432fccc8-4d13-4e62-8639-c99acee82cb3',
+ '_context_show_deleted': False,
+ '_context_timestamp': '2016-09-08T22:01:41.724236',
+ '_unique_id': '537fc5b27c244479a69819a4a435723b',
+ '_context_roles': ['_member_', 'admin'], '_context_read_only': False,
+ '_context_user_id': '13baa553aae44adca6615e711fd2f6d9',
+ '_context_project_name': 'calipso-project',
+ '_context_project_domain': None, 'event_type': 'compute.instance.update',
+ '_context_service_catalog': [{'endpoints': [
+ {'internalURL': 'http://192.168.0.2:8776/v2/75c0eb79ff4a42b0ae4973c8375ddf40',
+ 'publicURL': 'http://172.16.0.3:8776/v2/75c0eb79ff4a42b0ae4973c8375ddf40',
+ 'adminURL': 'http://192.168.0.2:8776/v2/75c0eb79ff4a42b0ae4973c8375ddf40',
+ 'region': 'RegionOne'}],
+ 'type': 'volumev2',
+ 'name': 'cinderv2'},
+ {'endpoints': [{
+ 'internalURL': 'http://192.168.0.2:8776/v1/75c0eb79ff4a42b0ae4973c8375ddf40',
+ 'publicURL': 'http://172.16.0.3:8776/v1/75c0eb79ff4a42b0ae4973c8375ddf40',
+ 'adminURL': 'http://192.168.0.2:8776/v1/75c0eb79ff4a42b0ae4973c8375ddf40',
+ 'region': 'RegionOne'}],
+ 'type': 'volume',
+ 'name': 'cinder'}],
+ 'payload': {'instance_type': 'm1.micro', 'progress': '', 'display_name': 'test8',
+ 'kernel_id': '',
+ 'new_task_state': None, 'old_display_name': 'name-change',
+ 'state_description': '',
+ 'old_state': 'active', 'ramdisk_id': '',
+ 'created_at': '2016-09-08 16:32:46+00:00',
+ 'os_type': None,
+ 'ephemeral_gb': 0, 'launched_at': '2016-09-08T16:25:08.000000',
+ 'instance_flavor_id': 'f068e24b-5d7e-4819-b5ca-89a33834a918',
+ 'image_meta': {'min_ram': '64', 'container_format': 'bare',
+ 'min_disk': '0',
+ 'disk_format': 'qcow2',
+ 'base_image_ref': 'c6f490c4-3656-43c6-8d03-b4e66bd249f9'},
+ 'audit_period_beginning': '2016-09-01T00:00:00.000000', 'memory_mb': 64,
+ 'cell_name': '',
+ 'access_ip_v6': None, 'instance_type_id': 6,
+ 'reservation_id': 'r-bycutzve',
+ 'access_ip_v4': None,
+ 'hostname': 'chengli-test-vm1', 'metadata': {},
+ 'user_id': '13baa553aae44adca6615e711fd2f6d9',
+ 'availability_zone': 'calipso-zone',
+ 'instance_id': '27a87908-bc1b-45cc-9238-09ad1ae686a7', 'deleted_at': '',
+ 'image_ref_url': 'http://172.16.0.4:9292/images/c6f490c4-3656-43c6-8d03-b4e66bd249f9',
+ 'host': 'node-252.cisco.com', 'vcpus': 1, 'state': 'deleted',
+ 'old_task_state': None,
+ 'architecture': None,
+ 'terminated_at': '', 'root_gb': 0,
+ 'tenant_id': '75c0eb79ff4a42b0ae4973c8375ddf40',
+ 'node': 'node-252.cisco.com', 'bandwidth': {}, 'disk_gb': 0,
+ 'audit_period_ending': '2016-09-08T22:01:43.165282'},
+ '_context_quota_class': None,
+ '_context_is_admin': True, '_context_read_deleted': 'no',
+ 'timestamp': '2016-09-08 22:01:43.189907',
+ 'message_id': '4a9068c6-dcd1-4d6c-81d7-db866e07c1ff', 'priority': 'INFO',
+ '_context_domain': None,
+ '_context_user': '13baa553aae44adca6615e711fd2f6d9',
+ '_context_user_identity': '13baa553aae44adca6615e711fd2f6d9 75c0eb79ff4a42b0ae4973c8375ddf40 - - -',
+ '_context_remote_address': '192.168.0.2', '_context_user_domain': None,
+ '_context_auth_token': '''gAAAAABX0d-R0Q4zIrznmZ_L8BT0m4r_lp-7eOr4IenbKz511g2maNo8qhIb86HtA7S
+ VGsEJvy4KRcNIGlVRdmGyXBYm3kEuakQXTsXLxvyQeTtgZ9UgnLLXhQvMLbA2gwaimVpyRljq92R7Y7CwnNFLjibhOiYs
+ NlvBqitJkaRaQa4sg4xCN2tBj32Re-jRu6dR_sIA-haT''',
+ '_context_user_name': 'admin'}
+
+
+INSTANCE_DOCUMENT = {
+ 'projects': ['calipso-project'],
+ 'network': ['b6fd5175-4b22-4256-9b1a-9fc4b9dce1fe', '7e59b726-d6f4-451a-a574-c67a920ff627'],
+ 'host': 'node-252.cisco.com', 'parent_type': 'instances_folder',
+ '_id': '57e421194a0a8a3fbe3bd2d0', 'mac_address': 'fa:16:3e:5e:9e:db', 'type': 'instance',
+ 'name': 'test8',
+ 'uuid': '27a87908-bc1b-45cc-9238-09ad1ae686a7', 'environment': ENV_CONFIG,
+ 'ip_address': '192.168.0.4', 'local_name': 'instance-00000020', 'object_name': 'test8',
+ 'parent_id': 'node-252.cisco.com-instances', 'project_id': '75c0eb79ff4a42b0ae4973c8375ddf40',
+ 'name_path': '/' + ENV_CONFIG + '/Regions/RegionOne/Availability Zones/calipso-zone/node-252.cisco.com/Instances/test8',
+ 'id': '27a87908-bc1b-45cc-9238-09ad1ae686a7',
+ 'id_path': '/' + ENV_CONFIG + '/' + ENV_CONFIG + '-regions/RegionOne/RegionOne-availability_zones/calipso-zone/'+
+ 'node-252.cisco.com/node-252.cisco.com-instances/27a87908-bc1b-45cc-9238-09ad1ae686a7',
+ 'show_in_tree': True}
diff --git a/app/test/event_based_scan/test_data/event_payload_instance_update.py b/app/test/event_based_scan/test_data/event_payload_instance_update.py
new file mode 100644
index 0000000..8b4f1af
--- /dev/null
+++ b/app/test/event_based_scan/test_data/event_payload_instance_update.py
@@ -0,0 +1,99 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+from test.event_based_scan.config.test_config import ENV_CONFIG
+
+
+EVENT_PAYLOAD_INSTANCE_UPDATE = {
+ 'publisher_id': 'compute.node-222.cisco.com', '_context_resource_uuid': None,
+ '_context_instance_lock_checked': False,
+ '_context_project_id': '75c0eb79ff4a42b0ae4973c8375ddf40',
+ '_context_tenant': '75c0eb79ff4a42b0ae4973c8375ddf40',
+ '_context_request_id': 'req-432fccc8-4d13-4e62-8639-c99acee82cb3',
+ '_context_show_deleted': False,
+ '_context_timestamp': '2016-09-08T22:01:41.724236',
+ '_unique_id': '537fc5b27c244479a69819a4a435723b',
+ '_context_roles': ['_member_', 'admin'], '_context_read_only': False,
+ '_context_user_id': '13baa553aae44adca6615e711fd2f6d9',
+ '_context_project_name': 'calipso-project',
+ '_context_project_domain': None, 'event_type': 'compute.instance.update',
+ '_context_service_catalog': [
+ {'endpoints': [
+ {'internalURL': 'http://192.168.0.2:8776/v2/75c0eb79ff4a42b0ae4973c8375ddf40',
+ 'publicURL': 'http://172.16.0.3:8776/v2/75c0eb79ff4a42b0ae4973c8375ddf40',
+ 'adminURL': 'http://192.168.0.2:8776/v2/75c0eb79ff4a42b0ae4973c8375ddf40',
+ 'region': 'RegionOne'}],
+ 'type': 'volumev2',
+ 'name': 'cinderv2'},
+ {'endpoints': [{
+ 'internalURL': 'http://192.168.0.2:8776/v1/75c0eb79ff4a42b0ae4973c8375ddf40',
+ 'publicURL': 'http://172.16.0.3:8776/v1/75c0eb79ff4a42b0ae4973c8375ddf40',
+ 'adminURL': 'http://192.168.0.2:8776/v1/75c0eb79ff4a42b0ae4973c8375ddf40',
+ 'region': 'RegionOne'}],
+ 'type': 'volume',
+ 'name': 'cinder'}],
+ 'payload': {'instance_type': 'm1.micro', 'progress': '', 'display_name': 'test8',
+ 'kernel_id': '',
+ 'new_task_state': None, 'old_display_name': 'name-change',
+ 'state_description': '',
+ 'old_state': 'active', 'ramdisk_id': '',
+ 'created_at': '2016-09-08 16:32:46+00:00',
+ 'os_type': None,
+ 'ephemeral_gb': 0, 'launched_at': '2016-09-08T16:25:08.000000',
+ 'instance_flavor_id': 'f068e24b-5d7e-4819-b5ca-89a33834a918',
+ 'image_meta': {'min_ram': '64', 'container_format': 'bare',
+ 'min_disk': '0',
+ 'disk_format': 'qcow2',
+ 'base_image_ref': 'c6f490c4-3656-43c6-8d03-b4e66bd249f9'},
+ 'audit_period_beginning': '2016-09-01T00:00:00.000000', 'memory_mb': 64,
+ 'cell_name': '',
+ 'access_ip_v6': None, 'instance_type_id': 6,
+ 'reservation_id': 'r-bycutzve',
+ 'access_ip_v4': None,
+ 'hostname': 'chengli-test-vm1', 'metadata': {},
+ 'user_id': '13baa553aae44adca6615e711fd2f6d9',
+ 'availability_zone': 'calipso-zone',
+ 'instance_id': '27a87908-bc1b-45cc-9238-09ad1ae686a7', 'deleted_at': '',
+ 'image_ref_url': 'http://172.16.0.4:9292/images/c6f490c4-3656-43c6-8d03-b4e66bd249f9',
+ 'host': 'node-223.cisco.com', 'vcpus': 1, 'state': 'active',
+ 'old_task_state': None,
+ 'architecture': None,
+ 'terminated_at': '', 'root_gb': 0,
+ 'tenant_id': '75c0eb79ff4a42b0ae4973c8375ddf40',
+ 'node': 'node-223.cisco.com', 'bandwidth': {}, 'disk_gb': 0,
+ 'audit_period_ending': '2016-09-08T22:01:43.165282'},
+ '_context_quota_class': None,
+ '_context_is_admin': True, '_context_read_deleted': 'no',
+ 'timestamp': '2016-09-08 22:01:43.189907',
+ 'message_id': '4a9068c6-dcd1-4d6c-81d7-db866e07c1ff', 'priority': 'INFO',
+ '_context_domain': None,
+ '_context_user': '13baa553aae44adca6615e711fd2f6d9',
+ '_context_user_identity': '13baa553aae44adca6615e711fd2f6d9 75c0eb79ff4a42b0ae4973c8375ddf40 - - -',
+ '_context_remote_address': '192.168.0.2', '_context_user_domain': None,
+ '_context_auth_token': '''gAAAAABX0d-R0Q4zIrznmZ_L8BT0m4r_lp-7eOr4IenbKz511g2maNo8qhIb86HtA7S
+ VGsEJvy4KRcNIGlVRdmGyXBYm3kEuakQXTsXLxvyQeTtgZ9UgnLLXhQvMLbA2gwaimVpyRljq92R7Y7CwnNFLjibhOiYs
+ NlvBqitJkaRaQa4sg4xCN2tBj32Re-jRu6dR_sIA-haT''',
+ '_context_user_name': 'admin'}
+
+
+INSTANCE_DOCUMENT = {
+ 'projects': ['calipso-project'],
+ 'network': ['b6fd5175-4b22-4256-9b1a-9fc4b9dce1fe', '7e59b726-d6f4-451a-a574-c67a920ff627'],
+ 'host': 'node-223.cisco.com', 'parent_type': 'instances_folder',
+ '_id': '57e421194a0a8a3fbe3bd2d0', 'mac_address': 'fa:16:3e:5e:9e:db', 'type': 'instance',
+ 'name': 'name-change',
+ 'uuid': '27a87908-bc1b-45cc-9238-09ad1ae686a7', 'environment': ENV_CONFIG,
+ 'ip_address': '192.168.0.4', 'local_name': 'instance-00000020', 'object_name': 'name-change',
+ 'parent_id': 'node-223.cisco.com-instances', 'project_id': '75c0eb79ff4a42b0ae4973c8375ddf40',
+ 'name_path': '/'+ENV_CONFIG+'/Regions/RegionOne/Availability Zones' +
+ '/calipso-zone/node-223.cisco.com/Instances/name-change',
+ 'id': '27a87908-bc1b-45cc-9238-09ad1ae686a7',
+ 'id_path': '/'+ENV_CONFIG+'/'+ENV_CONFIG+'-regions/RegionOne/RegionOne-availability_zones/calipso-zone' +
+ '/node-223.cisco.com/node-223.cisco.com-instances/27a87908-bc1b-45cc-9238-09ad1ae686a7',
+ 'show_in_tree': True}
diff --git a/app/test/event_based_scan/test_data/event_payload_interface_add.py b/app/test/event_based_scan/test_data/event_payload_interface_add.py
new file mode 100644
index 0000000..263b010
--- /dev/null
+++ b/app/test/event_based_scan/test_data/event_payload_interface_add.py
@@ -0,0 +1,350 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+from test.event_based_scan.config.test_config import ENV_CONFIG
+
+EVENT_PAYLOAD_INTERFACE_ADD = {
+ '_context_timestamp': '2016-10-26 21:52:18.893134', '_context_project_name': 'calipso-project',
+ 'publisher_id': 'network.node-251.cisco.com', 'timestamp': '2016-10-26 21:52:22.377165',
+ '_context_user_name': 'admin',
+ '_context_roles': ['_member_', 'admin'], '_context_tenant_id': '75c0eb79ff4a42b0ae4973c8375ddf40',
+ '_unique_id': '44d8a3be1078455b9f73e76cdda9f67a', 'priority': 'INFO',
+ '_context_tenant': '75c0eb79ff4a42b0ae4973c8375ddf40',
+ '_context_user_identity': '13baa553aae44adca6615e711fd2f6d9 75c0eb79ff4a42b0ae4973c8375ddf40 - - -',
+ '_context_user_id': '13baa553aae44adca6615e711fd2f6d9', '_context_user_domain': None,
+ '_context_show_deleted': False,
+ '_context_project_id': '75c0eb79ff4a42b0ae4973c8375ddf40', '_context_user': '13baa553aae44adca6615e711fd2f6d9',
+ '_context_is_admin': True, 'message_id': 'b81eb79f-f5d2-4bc8-b68e-81650cca1c92', 'payload': {
+ 'router_interface': {'port_id': '1233445-75b6-4c05-9480-4bc648845c6f',
+ 'id': 'c57216ca-c1c4-430d-a045-32851ca879e3',
+ 'subnet_ids': ['6f6ef3b5-76c9-4f70-81e5-f3cc196db025'],
+ 'tenant_id': '75c0eb79ff4a42b0ae4973c8375ddf40',
+ 'subnet_id': '6f6ef3b5-76c9-4f70-81e5-f3cc196db025'}}, '_context_domain': None,
+ '_context_read_only': False, '_context_resource_uuid': None, 'event_type': 'router.interface.create',
+ '_context_request_id': 'req-260fe6fd-0e14-42de-8dbc-acd480015166', '_context_project_domain': None,
+ '_context_tenant_name': 'calipso-project',
+ '_context_auth_token': 'gAAAAABYERgkK8sR80wFsQywjt8vwG0caJW5oxfsWNURcDaYAxy0O6P0u2QQczoMuHBAZa-Ga8T1b3O-5p7p' +
+ 'jw-vAyI1z5whuY7i-hJSl2II6WUX2-9dy7BALQgxhCGpe60atLcyTl-rW6o_TKc3f-ppvqtiul4UTlzH9OtY' +
+ 'N7b-CezaywYDCIMuzGbThPARd9ilQR2B6DuE'}
+
+NETWORK_DOC = {
+ "admin_state_up": True,
+ "cidrs": [
+ "172.16.12.0/24"
+ ],
+ "environment": ENV_CONFIG,
+ "id": "55550a69-24eb-47f5-a458-3aa086cc71c2",
+ "id_path": "/" + ENV_CONFIG + "/" + ENV_CONFIG + "-projects/75c0eb79ff4a42b0ae4973c8375ddf40/75c0eb79ff4a42b0a" +
+ "e4973c8375ddf40-networks/55550a69-24eb-47f5-a458-3aa086cc71c2",
+ "last_scanned": 0,
+ "mtu": 0,
+ "name": "please_connect",
+ "name_path": "/" + ENV_CONFIG + "/Projects/calipso-project/Networks/please_connect",
+ "network": "55550a69-24eb-47f5-a458-3aa086cc71c2",
+ "object_name": "please_connect",
+ "parent_id": "75c0eb79ff4a42b0ae4973c8375ddf40-networks",
+ "parent_text": "Networks",
+ "parent_type": "networks_folder",
+ "port_security_enabled": True,
+ "project": "calipso-project",
+ "provider:network_type": "vxlan",
+ "provider:physical_network": None,
+ "provider:segmentation_id": 23,
+ "router:external": False,
+ "shared": False,
+ "show_in_tree": True,
+ "status": "ACTIVE",
+ "subnet_ids": [
+ "6f6ef3b5-76c9-4f70-81e5-f3cc196db025"
+ ],
+ "subnets": {
+ "1234": {
+ "cidr": "172.16.12.0/24",
+ "network_id": "55550a69-24eb-47f5-a458-3aa086cc71c2",
+ "allocation_pools": [
+ {
+ "start": "172.16.12.2",
+ "end": "172.16.12.254"
+ }
+ ],
+ "id": "6f6ef3b5-76c9-4f70-81e5-f3cc196db025",
+ "enable_dhcp": True,
+ "ipv6_address_mode": None,
+ "name": "1234",
+ "host_routes": [
+
+ ],
+ "ipv6_ra_mode": None,
+ "gateway_ip": "172.16.12.1",
+ "ip_version": 4,
+ "subnetpool_id": None,
+ "dns_nameservers": [
+
+ ],
+ "tenant_id": "75c0eb79ff4a42b0ae4973c8375ddf40"
+ }
+ },
+ "tenant_id": "75c0eb79ff4a42b0ae4973c8375ddf40",
+ "type": "network"
+}
+
+EVENT_PAYLOAD_REGION = {
+ 'RegionOne': {
+ 'object_name': 'RegionOne', 'id': 'RegionOne', 'name': 'RegionOne',
+ 'environment': ENV_CONFIG,
+ 'last_scanned': 0,
+ 'name_path': '/' + ENV_CONFIG + '/Regions/RegionOne',
+ 'parent_id': ENV_CONFIG + '-regions', 'parent_type': 'regions_folder',
+ 'endpoints': {'nova': {'id': '274cbbd9fd6d4311b78e78dd3a1df51f',
+ 'adminURL': 'http://192.168.0.2:8774/v2/8c1751e0ce714736a63fee3c776164da',
+ 'service_type': 'compute',
+ 'publicURL': 'http://172.16.0.3:8774/v2/8c1751e0ce714736a63fee3c776164da',
+ 'internalURL': 'http://192.168.0.2:8774/v2/8c1751e0ce714736a63fee3c776164da'},
+ 'heat-cfn': {'id': '0f04ec6ed49f4940822161bf677bdfb2',
+ 'adminURL': 'http://192.168.0.2:8000/v1',
+ 'service_type': 'cloudformation',
+ 'publicURL': 'http://172.16.0.3:8000/v1',
+ 'internalURL': 'http://192.168.0.2:8000/v1'},
+ 'nova_ec2': {'id': '390dddc753cc4d378b489129d06c4b7d',
+ 'adminURL': 'http://192.168.0.2:8773/services/Admin',
+ 'service_type': 'ec2',
+ 'publicURL': 'http://172.16.0.3:8773/services/Cloud',
+ 'internalURL': 'http://192.168.0.2:8773/services/Cloud'},
+ 'glance': {'id': '475c6c77a94e4e63a5a0f0e767f697a8',
+ 'adminURL': 'http://192.168.0.2:9292',
+ 'service_type': 'image',
+ 'publicURL': 'http://172.16.0.3:9292',
+ 'internalURL': 'http://192.168.0.2:9292'},
+ 'swift': {'id': '12e78e06595f48339baebdb5d4309c70',
+ 'adminURL': 'http://192.168.0.2:8080/v1/AUTH_8c1751e0ce714736a63fee3c776164da',
+ 'service_type': 'object-store',
+ 'publicURL': 'http://172.16.0.3:8080/v1/AUTH_8c1751e0ce714736a63fee3c776164da',
+ 'internalURL': 'http://192.168.0.2:8080/v1/AUTH_8c1751e0ce714736a63fee3c776164da'},
+ 'swift_s3': {'id': '4f655c8f2bef46a0a7ba4a20bba53666',
+ 'adminURL': 'http://192.168.0.2:8080',
+ 'service_type': 's3',
+ 'publicURL': 'http://172.16.0.3:8080',
+ 'internalURL': 'http://192.168.0.2:8080'},
+ 'keystone': {'id': '404cceb349614eb39857742970408301',
+ 'adminURL': 'http://192.168.0.2:35357/v2.0',
+ 'service_type': 'identity',
+ 'publicURL': 'http://172.16.0.3:5000/v2.0',
+ 'internalURL': 'http://192.168.0.2:5000/v2.0'},
+ 'cinderv2': {'id': '2c30937688e944889db4a64fab6816e6',
+ 'adminURL': 'http://192.168.0.2:8776/v2/8c1751e0ce714736a63fee3c776164da',
+ 'service_type': 'volumev2',
+ 'publicURL': 'http://172.16.0.3:8776/v2/8c1751e0ce714736a63fee3c776164da',
+ 'internalURL': 'http://192.168.0.2:8776/v2/8c1751e0ce714736a63fee3c776164da'},
+ 'novav3': {'id': '1df917160dfb4ce5b469764fde22b3ab',
+ 'adminURL': 'http://192.168.0.2:8774/v3',
+ 'service_type': 'computev3',
+ 'publicURL': 'http://172.16.0.3:8774/v3',
+ 'internalURL': 'http://192.168.0.2:8774/v3'},
+ 'ceilometer': {'id': '617177a3dcb64560a5a79ab0a91a7225',
+ 'adminURL': 'http://192.168.0.2:8777',
+ 'service_type': 'metering',
+ 'publicURL': 'http://172.16.0.3:8777',
+ 'internalURL': 'http://192.168.0.2:8777'},
+ 'neutron': {'id': '8dc28584da224c4b9671171ead3c982a',
+ 'adminURL': 'http://192.168.0.2:9696',
+ 'service_type': 'network',
+ 'publicURL': 'http://172.16.0.3:9696',
+ 'internalURL': 'http://192.168.0.2:9696'},
+ 'cinder': {'id': '05643f2cf9094265b432376571851841',
+ 'adminURL': 'http://192.168.0.2:8776/v1/8c1751e0ce714736a63fee3c776164da',
+ 'service_type': 'volume',
+ 'publicURL': 'http://172.16.0.3:8776/v1/8c1751e0ce714736a63fee3c776164da',
+ 'internalURL': 'http://192.168.0.2:8776/v1/8c1751e0ce714736a63fee3c776164da'},
+ 'heat': {'id': '9e60268a5aaf422d9e42f0caab0a19b4',
+ 'adminURL': 'http://192.168.0.2:8004/v1/8c1751e0ce714736a63fee3c776164da',
+ 'service_type': 'orchestration',
+ 'publicURL': 'http://172.16.0.3:8004/v1/8c1751e0ce714736a63fee3c776164da',
+ 'internalURL': 'http://192.168.0.2:8004/v1/8c1751e0ce714736a63fee3c776164da'}},
+ 'show_in_tree': True,
+ 'id_path': '/' + ENV_CONFIG + '/' + ENV_CONFIG + '-regions/RegionOne',
+ 'type': 'region'}}
+
+PORT_DOC = {
+ "admin_state_up": True,
+ "allowed_address_pairs": [
+
+ ],
+ "binding:host_id": "",
+ "binding:profile": {
+
+ },
+ "binding:vif_details": {
+
+ },
+ "binding:vif_type": "unbound",
+ "binding:vnic_type": "normal",
+ "device_id": "c57216ca-c1c4-430d-a045-32851ca879e3",
+ "device_owner": "network:router_interface",
+ "dns_assignment": [
+ {
+ "hostname": "host-172-16-10-1",
+ "ip_address": "172.16.10.1",
+ "fqdn": "host-172-16-10-1.openstacklocal."
+ }
+ ],
+ "dns_name": "",
+ "environment": ENV_CONFIG,
+ "extra_dhcp_opts": [
+
+ ],
+ "fixed_ips": [
+ {
+ "ip_address": "172.16.10.1",
+ "subnet_id": "6f6ef3b5-76c9-4f70-81e5-f3cc196db025"
+ }
+ ],
+ "id": "1233445-75b6-4c05-9480-4bc648845c6f",
+ "id_path": "/" + ENV_CONFIG + "/" + ENV_CONFIG + "-projects/75c0eb79ff4a42b0ae4973c8375ddf40/75c0eb79ff4a42b0a" +
+ "e4973c8375ddf40-networks/55550a69-24eb-47f5-a458-3aa086cc71c2/55550a69-24eb-47f5-a458-3aa086cc71c2" +
+ "-ports/1233445-75b6-4c05-9480-4bc648845c6f",
+ "last_scanned": 0,
+ "mac_address": "fa:16:3e:13:b2:aa",
+ "master_parent_id": "55550a69-24eb-47f5-a458-3aa086cc71c2",
+ "master_parent_type": "network",
+ "name": "fa:16:3e:13:b2:aa",
+ "name_path": "/" + ENV_CONFIG + "/Projects/calipso-project/Networks/test_interface/Ports" +
+ "/1233445-75b6-4c05-9480-4bc648845c6f",
+ "network_id": "55550a69-24eb-47f5-a458-3aa086cc71c2",
+ "object_name": "1233445-75b6-4c05-9480-4bc648845c6f",
+ "parent_id": "55550a69-24eb-47f5-a458-3aa086cc71c2-ports",
+ "parent_text": "Ports",
+ "parent_type": "ports_folder",
+ "port_security_enabled": False,
+ "project": "calipso-project",
+ "security_groups": [
+
+ ],
+ "status": "DOWN",
+ "tenant_id": "75c0eb79ff4a42b0ae4973c8375ddf40",
+ "type": "port"
+}
+
+ROUTER_DOCUMENT = {
+ "admin_state_up": True,
+ "enable_snat": 1,
+ "environment": ENV_CONFIG,
+ "gw_port_id": "e2f31c24-d0f9-499e-a8b1-883941543aa4",
+ "host": "node-251.cisco.com",
+ "id": "node-251.cisco.com-qrouter-c57216ca-c1c4-430d-a045-32851ca879e3",
+ "id_path": "/" + ENV_CONFIG + "/" + ENV_CONFIG + "-regions/RegionOne/RegionOne-availability_zones/internal" +
+ "/node-251.cisco.com/node-251.cisco.com-vservices/node-251.cisco.com-vservices-routers/node-251.cisco.com-qrouter-bde87" +
+ "a5a-7968-4f3b-952c-e87681a96078",
+ "last_scanned": 0,
+ "local_service_id": "node-251.cisco.com-qrouter-c57216ca-c1c4-430d-a045-32851ca879e3",
+ "master_parent_id": "node-251.cisco.com-vservices",
+ "master_parent_type": "vservices_folder",
+ "name": "1234",
+ "name_path": "/" + ENV_CONFIG + "/Regions/RegionOne/Availability Zones/internal/node-251.cisco.com/" +
+ "Vservices/Gateways/router-1234",
+ "network": [
+ "55550a69-24eb-47f5-a458-3aa086cc71c2"
+ ],
+ "object_name": "router-1234",
+ "parent_id": "node-251.cisco.com-vservices-routers",
+ "parent_text": "Gateways",
+ "parent_type": "vservice_routers_folder",
+ "service_type": "router",
+ "show_in_tree": True,
+ "status": "ACTIVE",
+ "tenant_id": "75c0eb79ff4a42b0ae4973c8375ddf40",
+ "type": "vservice"
+}
+
+HOST = {
+ "config": {
+ "use_namespaces": True,
+ "handle_internal_only_routers": True,
+ "ex_gw_ports": 2,
+ "agent_mode": "legacy",
+ "log_agent_heartbeats": False,
+ "floating_ips": 1,
+ "external_network_bridge": "",
+ "router_id": "",
+ "gateway_external_network_id": "",
+ "interface_driver": "neutron.agent.linux.interface.OVSInterfaceDriver",
+ "routers": 2,
+ "interfaces": 4
+ },
+ "environment": ENV_CONFIG,
+ "host": "node-251.cisco.com",
+ "host_type": [
+ "Controller",
+ "Network"
+ ],
+ "id": "node-251.cisco.com",
+ "id_path": "/" + ENV_CONFIG + "/" + ENV_CONFIG + "-regions/RegionOne/RegionOne-availability_zones" +
+ "/internal/node-251.cisco.com",
+ "last_scanned": 0,
+ "name": "node-251.cisco.com",
+ "name_path": "/" + ENV_CONFIG + "/Regions/RegionOne/Availability Zones/internal/node-251.cisco.com",
+ "object_name": "node-251.cisco.com",
+ "parent_id": "internal",
+ "parent_type": "availability_zone",
+ "services": {
+ "nova-conductor": {
+ "available": True,
+ "active": True,
+ "updated_at": "2016-11-08T19:12:08.000000"
+ },
+ "nova-scheduler": {
+ "available": True,
+ "active": True,
+ "updated_at": "2016-11-08T19:12:38.000000"
+ },
+ "nova-cert": {
+ "available": True,
+ "active": True,
+ "updated_at": "2016-11-08T19:12:29.000000"
+ },
+ "nova-consoleauth": {
+ "available": True,
+ "active": True,
+ "updated_at": "2016-11-08T19:12:37.000000"
+ }
+ },
+ "show_in_tree": True,
+ "type": "host",
+ "zone": "internal"
+}
+
+VNIC_DOCS = [{
+ "IP Address": "172.16.10.2",
+ "IPv6 Address": "fe80::f816:3eff:fe96:5066/64",
+ "cidr": "172.16.10.0/25",
+ "data": "Link encap:Ethernet HWaddr fa:16:3e:96:50:66\ninet addr:172.16.10.2 Bcast:172.16.10.127 " +
+ "Mask:255.255.255.128\ninet6 addr: fe80::f816:3eff:fe96:5066/64 Scope:Link\nUP BROADCAST RUNNING " +
+ "MULTICAST MTU:1450 Metric:1\nRX packets:17 errors:0 dropped:2 overruns:0 frame:0\nTX packets:8 " +
+ "errors:0 dropped:0 overruns:0 carrier:0\ncollisions:0 txqueuelen:0\nRX bytes:1593 " +
+ "(1.5 KB) TX bytes:648 (648.0 B)\n",
+ "environment": ENV_CONFIG,
+ "host": "node-251.cisco.com",
+ "id": "tapca33c645-5b",
+ "id_path": "/" + ENV_CONFIG + "/" + ENV_CONFIG + "-regions/RegionOne/RegionOne-availability_zones/internal" +
+ "/node-251.cisco.com/node-251.cisco.com-vservices/node-251.cisco.com-vservices-dhcps/qdhcp-911fe57e" +
+ "-1ddd-4151-9dc7-6b578ab357b1/qdhcp-911fe57e-1ddd-4151-9dc7-6b578ab357b1-vnics/tapca33c645-5b",
+ "last_scanned": 0,
+ "mac_address": "fa:16:3e:13:b2:aa",
+ "name": "tapca33c645-5b",
+ "name_path": "/" + ENV_CONFIG + "/Regions/RegionOne/Availability Zones/internal/node-251.cisco.com/" +
+ "Vservices/DHCP servers/dhcp-test_interface/vNICs/tapca33c645-5b",
+ "netmask": "255.255.255.128",
+ "network": "911fe57e-1ddd-4151-9dc7-6b578ab357b1",
+ "object_name": "tapca33c645-5b",
+ "parent_id": "qdhcp-911fe57e-1ddd-4151-9dc7-6b578ab357b1-vnics",
+ "parent_text": "vNICs",
+ "parent_type": "vnics_folder",
+ "show_in_tree": True,
+ "type": "vnic",
+ "vnic_type": "vservice_vnic"
+}]
diff --git a/app/test/event_based_scan/test_data/event_payload_interface_delete.py b/app/test/event_based_scan/test_data/event_payload_interface_delete.py
new file mode 100644
index 0000000..5dbed2c
--- /dev/null
+++ b/app/test/event_based_scan/test_data/event_payload_interface_delete.py
@@ -0,0 +1,350 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+from test.event_based_scan.config.test_config import ENV_CONFIG
+
+EVENT_PAYLOAD_INTERFACE_DELETE = {
+ 'message_id': 'da190e5f-127d-4e85-a813-bbdbbb35a2d0', '_context_tenant': '75c0eb79ff4a42b0ae4973c8375ddf40',
+ '_context_user_domain': None, '_context_resource_uuid': None, '_context_timestamp': '2016-11-07 23:41:04.169781',
+ '_unique_id': 'a4c4ef7a07c9431299047a59d4e0730c', 'event_type': 'router.interface.delete',
+ '_context_user_identity': '13baa553aae44adca6615e711fd2f6d9 75c0eb79ff4a42b0ae4973c8375ddf40 - - -',
+ '_context_request_id': 'req-e7d61486-2c0a-40cd-b346-1ec884b0e8d0', '_context_is_admin': True,
+ '_context_project_name': 'calipso-project', 'timestamp': '2016-11-07 23:41:09.769453',
+ '_context_auth_token': 'gAAAAABYIQ_M00kY8hGiB6zsD8BWn7b5x0aKIEv1HHQ3Ty0t5MAK68Lm7E6E0pDgJVqBSQsqZsNDqQkn4M0spYlz' +
+ 'WPTEdl3L4d-7ihNmWGOE76J4EtP9tl6nw22ZzdUhu4V8dVB5dqC3lhf61Ot9OsU7XRjp2zfVTb_Ip2yI2_auqUWZ' +
+ 'f6ryWdFPUZqbpJ3-jrVpUP1iXxlT',
+ '_context_roles': ['_member_', 'admin'], '_context_domain': None,
+ '_context_user_id': '13baa553aae44adca6615e711fd2f6d9', '_context_project_domain': None,
+ '_context_user': '13baa553aae44adca6615e711fd2f6d9', '_context_user_name': 'admin',
+ 'publisher_id': 'network.node-251.cisco.com', '_context_tenant_id': '75c0eb79ff4a42b0ae4973c8375ddf40',
+ '_context_tenant_name': 'calipso-project', 'priority': 'INFO',
+ '_context_project_id': '75c0eb79ff4a42b0ae4973c8375ddf40',
+ '_context_show_deleted': False,
+ 'payload': {'router_interface': {'port_id': '1233445-75b6-4c05-9480-4bc648845c6f',
+ 'id': 'c57216ca-c1c4-430d-a045-32851ca879e3',
+ 'subnet_ids': ['6f6ef3b5-76c9-4f70-81e5-f3cc196db025'],
+ 'tenant_id': '75c0eb79ff4a42b0ae4973c8375ddf40',
+ 'subnet_id': '6f6ef3b5-76c9-4f70-81e5-f3cc196db025'}},
+ '_context_read_only': False}
+
+NETWORK_DOC = {
+ "admin_state_up": True,
+ "cidrs": [
+ "172.16.12.0/24"
+ ],
+ "environment": ENV_CONFIG,
+ "id": "55550a69-24eb-47f5-a458-3aa086cc71c2",
+ "id_path": "/" + ENV_CONFIG + "/" + ENV_CONFIG + "-projects/75c0eb79ff4a42b0ae4973c8375ddf40/75c0eb79ff4a42b0ae49" +
+ "73c8375ddf40-networks/55550a69-24eb-47f5-a458-3aa086cc71c2",
+ "last_scanned": 0,
+ "mtu": 0,
+ "name": "please_connect",
+ "name_path": "/" + ENV_CONFIG + "/Projects/calipso-project/Networks/please_connect",
+ "network": "55550a69-24eb-47f5-a458-3aa086cc71c2",
+ "object_name": "please_connect",
+ "parent_id": "75c0eb79ff4a42b0ae4973c8375ddf40-networks",
+ "parent_text": "Networks",
+ "parent_type": "networks_folder",
+ "port_security_enabled": True,
+ "project": "calipso-project",
+ "provider:network_type": "vxlan",
+ "provider:physical_network": None,
+ "provider:segmentation_id": 23,
+ "router:external": False,
+ "shared": False,
+ "show_in_tree": True,
+ "status": "ACTIVE",
+ "subnet_ids": [
+ "6f6ef3b5-76c9-4f70-81e5-f3cc196db025"
+ ],
+ "subnets": {
+ "1234": {
+ "cidr": "172.16.12.0/24",
+ "network_id": "55550a69-24eb-47f5-a458-3aa086cc71c2",
+ "allocation_pools": [
+ {
+ "start": "172.16.12.2",
+ "end": "172.16.12.254"
+ }
+ ],
+ "id": "6f6ef3b5-76c9-4f70-81e5-f3cc196db025",
+ "enable_dhcp": True,
+ "ipv6_address_mode": None,
+ "name": "1234",
+ "host_routes": [
+
+ ],
+ "ipv6_ra_mode": None,
+ "gateway_ip": "172.16.12.1",
+ "ip_version": 4,
+ "subnetpool_id": None,
+ "dns_nameservers": [
+
+ ],
+ "tenant_id": "75c0eb79ff4a42b0ae4973c8375ddf40"
+ }
+ },
+ "tenant_id": "75c0eb79ff4a42b0ae4973c8375ddf40",
+ "type": "network"
+}
+
+EVENT_PAYLOAD_REGION = {
+ 'RegionOne': {
+ 'object_name': 'RegionOne', 'id': 'RegionOne', 'name': 'RegionOne',
+ 'environment': ENV_CONFIG,
+ 'last_scanned': 0,
+ 'name_path': '/' + ENV_CONFIG + '/Regions/RegionOne',
+ 'parent_id': ENV_CONFIG + '-regions', 'parent_type': 'regions_folder',
+ 'endpoints': {'nova': {'id': '274cbbd9fd6d4311b78e78dd3a1df51f',
+ 'adminURL': 'http://192.168.0.2:8774/v2/8c1751e0ce714736a63fee3c776164da',
+ 'service_type': 'compute',
+ 'publicURL': 'http://172.16.0.3:8774/v2/8c1751e0ce714736a63fee3c776164da',
+ 'internalURL': 'http://192.168.0.2:8774/v2/8c1751e0ce714736a63fee3c776164da'},
+ 'heat-cfn': {'id': '0f04ec6ed49f4940822161bf677bdfb2',
+ 'adminURL': 'http://192.168.0.2:8000/v1',
+ 'service_type': 'cloudformation',
+ 'publicURL': 'http://172.16.0.3:8000/v1',
+ 'internalURL': 'http://192.168.0.2:8000/v1'},
+ 'nova_ec2': {'id': '390dddc753cc4d378b489129d06c4b7d',
+ 'adminURL': 'http://192.168.0.2:8773/services/Admin',
+ 'service_type': 'ec2',
+ 'publicURL': 'http://172.16.0.3:8773/services/Cloud',
+ 'internalURL': 'http://192.168.0.2:8773/services/Cloud'},
+ 'glance': {'id': '475c6c77a94e4e63a5a0f0e767f697a8',
+ 'adminURL': 'http://192.168.0.2:9292',
+ 'service_type': 'image',
+ 'publicURL': 'http://172.16.0.3:9292',
+ 'internalURL': 'http://192.168.0.2:9292'},
+ 'swift': {'id': '12e78e06595f48339baebdb5d4309c70',
+ 'adminURL': 'http://192.168.0.2:8080/v1/AUTH_8c1751e0ce714736a63fee3c776164da',
+ 'service_type': 'object-store',
+ 'publicURL': 'http://172.16.0.3:8080/v1/AUTH_8c1751e0ce714736a63fee3c776164da',
+ 'internalURL': 'http://192.168.0.2:8080/v1/AUTH_8c1751e0ce714736a63fee3c776164da'},
+ 'swift_s3': {'id': '4f655c8f2bef46a0a7ba4a20bba53666',
+ 'adminURL': 'http://192.168.0.2:8080',
+ 'service_type': 's3',
+ 'publicURL': 'http://172.16.0.3:8080',
+ 'internalURL': 'http://192.168.0.2:8080'},
+ 'keystone': {'id': '404cceb349614eb39857742970408301',
+ 'adminURL': 'http://192.168.0.2:35357/v2.0',
+ 'service_type': 'identity',
+ 'publicURL': 'http://172.16.0.3:5000/v2.0',
+ 'internalURL': 'http://192.168.0.2:5000/v2.0'},
+ 'cinderv2': {'id': '2c30937688e944889db4a64fab6816e6',
+ 'adminURL': 'http://192.168.0.2:8776/v2/8c1751e0ce714736a63fee3c776164da',
+ 'service_type': 'volumev2',
+ 'publicURL': 'http://172.16.0.3:8776/v2/8c1751e0ce714736a63fee3c776164da',
+ 'internalURL': 'http://192.168.0.2:8776/v2/8c1751e0ce714736a63fee3c776164da'},
+ 'novav3': {'id': '1df917160dfb4ce5b469764fde22b3ab',
+ 'adminURL': 'http://192.168.0.2:8774/v3',
+ 'service_type': 'computev3',
+ 'publicURL': 'http://172.16.0.3:8774/v3',
+ 'internalURL': 'http://192.168.0.2:8774/v3'},
+ 'ceilometer': {'id': '617177a3dcb64560a5a79ab0a91a7225',
+ 'adminURL': 'http://192.168.0.2:8777',
+ 'service_type': 'metering',
+ 'publicURL': 'http://172.16.0.3:8777',
+ 'internalURL': 'http://192.168.0.2:8777'},
+ 'neutron': {'id': '8dc28584da224c4b9671171ead3c982a',
+ 'adminURL': 'http://192.168.0.2:9696',
+ 'service_type': 'network',
+ 'publicURL': 'http://172.16.0.3:9696',
+ 'internalURL': 'http://192.168.0.2:9696'},
+ 'cinder': {'id': '05643f2cf9094265b432376571851841',
+ 'adminURL': 'http://192.168.0.2:8776/v1/8c1751e0ce714736a63fee3c776164da',
+ 'service_type': 'volume',
+ 'publicURL': 'http://172.16.0.3:8776/v1/8c1751e0ce714736a63fee3c776164da',
+ 'internalURL': 'http://192.168.0.2:8776/v1/8c1751e0ce714736a63fee3c776164da'},
+ 'heat': {'id': '9e60268a5aaf422d9e42f0caab0a19b4',
+ 'adminURL': 'http://192.168.0.2:8004/v1/8c1751e0ce714736a63fee3c776164da',
+ 'service_type': 'orchestration',
+ 'publicURL': 'http://172.16.0.3:8004/v1/8c1751e0ce714736a63fee3c776164da',
+ 'internalURL': 'http://192.168.0.2:8004/v1/8c1751e0ce714736a63fee3c776164da'}},
+ 'show_in_tree': True,
+ 'id_path': '/' + ENV_CONFIG + '/' + ENV_CONFIG + '-regions/RegionOne',
+ 'type': 'region'}}
+
+PORT_DOC = {
+ "admin_state_up": True,
+ "allowed_address_pairs": [
+
+ ],
+ "binding:host_id": "",
+ "binding:profile": {
+
+ },
+ "binding:vif_details": {
+
+ },
+ "binding:vif_type": "unbound",
+ "binding:vnic_type": "normal",
+ "device_id": "c57216ca-c1c4-430d-a045-32851ca879e3",
+ "device_owner": "network:router_interface",
+ "dns_assignment": [
+ {
+ "hostname": "host-172-16-10-1",
+ "ip_address": "172.16.10.1",
+ "fqdn": "host-172-16-10-1.openstacklocal."
+ }
+ ],
+ "dns_name": "",
+ "environment": ENV_CONFIG,
+ "extra_dhcp_opts": [
+
+ ],
+ "fixed_ips": [
+ {
+ "ip_address": "172.16.10.1",
+ "subnet_id": "6f6ef3b5-76c9-4f70-81e5-f3cc196db025"
+ }
+ ],
+ "id": "1233445-75b6-4c05-9480-4bc648845c6f",
+ "id_path": "/" + ENV_CONFIG + "/" + ENV_CONFIG + "+/-projects/75c0eb79ff4a42b0ae4973c8375ddf40/75c0eb" +
+ "79ff4a42b0ae4973c8375ddf40-networks/55550a69-24eb-47f5-a458-3aa086cc71c2/55550a69-24eb-" +
+ "47f5-a458-3aa086cc71c2-ports/1233445-75b6-4c05-9480-4bc648845c6f",
+ "last_scanned": 0,
+ "mac_address": "fa:16:3e:13:b2:aa",
+ "master_parent_id": "55550a69-24eb-47f5-a458-3aa086cc71c2",
+ "master_parent_type": "network",
+ "name": "fa:16:3e:13:b2:aa",
+ "name_path": "/" + ENV_CONFIG + "/Projects/calipso-project/Networks/test_interface/Ports/" +
+ "1233445-75b6-4c05-9480-4bc648845c6f",
+ "network_id": "55550a69-24eb-47f5-a458-3aa086cc71c2",
+ "object_name": "1233445-75b6-4c05-9480-4bc648845c6f",
+ "parent_id": "55550a69-24eb-47f5-a458-3aa086cc71c2-ports",
+ "parent_text": "Ports",
+ "parent_type": "ports_folder",
+ "port_security_enabled": False,
+ "project": "calipso-project",
+ "security_groups": [
+
+ ],
+ "status": "DOWN",
+ "tenant_id": "75c0eb79ff4a42b0ae4973c8375ddf40",
+ "type": "port"
+}
+
+ROUTER_DOCUMENT = {
+ "admin_state_up": True,
+ "enable_snat": 1,
+ "environment": ENV_CONFIG,
+ "gw_port_id": "e2f31c24-d0f9-499e-a8b1-883941543aa4",
+ "host": "node-251.cisco.com",
+ "id": "node-251.cisco.com-qrouter-c57216ca-c1c4-430d-a045-32851ca879e3",
+ "id_path": "/" + ENV_CONFIG + "/" + ENV_CONFIG + "-regions/RegionOne/RegionOne-availability_zones/internal" +
+ "/node-251.cisco.com/node-251.cisco.com-vservices/node-251.cisco.com-vservices-routers/node-251.cisco.com-qrouter-bde8" +
+ "7a5a-7968-4f3b-952c-e87681a96078",
+ "last_scanned": 0,
+ "local_service_id": "node-251.cisco.com-qrouter-c57216ca-c1c4-430d-a045-32851ca879e3",
+ "master_parent_id": "node-251.cisco.com-vservices",
+ "master_parent_type": "vservices_folder",
+ "name": "1234",
+ "name_path": "/" + ENV_CONFIG + "/Regions/RegionOne/Availability Zones/internal/node-251.cisco.com/" +
+ "Vservices/Gateways/router-1234",
+ "network": [
+ "55550a69-24eb-47f5-a458-3aa086cc71c2"
+ ],
+ "object_name": "router-1234",
+ "parent_id": "node-251.cisco.com-vservices-routers",
+ "parent_text": "Gateways",
+ "parent_type": "vservice_routers_folder",
+ "service_type": "router",
+ "show_in_tree": True,
+ "status": "ACTIVE",
+ "tenant_id": "75c0eb79ff4a42b0ae4973c8375ddf40",
+ "type": "vservice"
+}
+
+HOST = {
+ "config": {
+ "use_namespaces": True,
+ "handle_internal_only_routers": True,
+ "ex_gw_ports": 2,
+ "agent_mode": "legacy",
+ "log_agent_heartbeats": False,
+ "floating_ips": 1,
+ "external_network_bridge": "",
+ "router_id": "",
+ "gateway_external_network_id": "",
+ "interface_driver": "neutron.agent.linux.interface.OVSInterfaceDriver",
+ "routers": 2,
+ "interfaces": 4
+ },
+ "environment": ENV_CONFIG,
+ "host": "node-251.cisco.com",
+ "host_type": [
+ "Controller",
+ "Network"
+ ],
+ "id": "node-251.cisco.com",
+ "id_path": "/" + ENV_CONFIG + "/" + ENV_CONFIG + "-regions/RegionOne/RegionOne-availability_zones" +
+ "/internal/node-251.cisco.com",
+ "last_scanned": 0,
+ "name": "node-251.cisco.com",
+ "name_path": "/" + ENV_CONFIG + "/Regions/RegionOne/Availability Zones/internal/node-251.cisco.com",
+ "object_name": "node-251.cisco.com",
+ "parent_id": "internal",
+ "parent_type": "availability_zone",
+ "services": {
+ "nova-conductor": {
+ "available": True,
+ "active": True,
+ "updated_at": "2016-11-08T19:12:08.000000"
+ },
+ "nova-scheduler": {
+ "available": True,
+ "active": True,
+ "updated_at": "2016-11-08T19:12:38.000000"
+ },
+ "nova-cert": {
+ "available": True,
+ "active": True,
+ "updated_at": "2016-11-08T19:12:29.000000"
+ },
+ "nova-consoleauth": {
+ "available": True,
+ "active": True,
+ "updated_at": "2016-11-08T19:12:37.000000"
+ }
+ },
+ "show_in_tree": True,
+ "type": "host",
+ "zone": "internal"
+}
+
+VNIC_DOCS = [{
+ "IP Address": "172.16.10.2",
+ "IPv6 Address": "fe80::f816:3eff:fe96:5066/64",
+ "cidr": "172.16.10.0/25",
+ "data": "Link encap:Ethernet HWaddr fa:16:3e:96:50:66\ninet addr:172.16.10.2 Bcast:172.16.10.127 " +
+ "Mask:255.255.255.128\ninet6 addr: fe80::f816:3eff:fe96:5066/64 Scope:Link\nUP BROADCAST RUNNING " +
+ "MULTICAST MTU:1450 Metric:1\nRX packets:17 errors:0 dropped:2 overruns:0 frame:0\nTX packets:8 " +
+ "errors:0 dropped:0 overruns:0 carrier:0\ncollisions:0 txqueuelen:0\nRX bytes:1593 " +
+ "(1.5 KB) TX bytes:648 (648.0 B)\n",
+ "environment": ENV_CONFIG,
+ "host": "node-251.cisco.com",
+ "id": "tapca33c645-5b",
+ "id_path": "/" + ENV_CONFIG + "/" + ENV_CONFIG + "-regions/RegionOne/RegionOne-availability_zones/internal" +
+ "/node-251.cisco.com/node-251.cisco.com-vservices/node-251.cisco.com-vservices-dhcps/qdhcp-911fe57e" +
+ "-1ddd-4151-9dc7-6b578ab357b1/qdhcp-911fe57e-1ddd-4151-9dc7-6b578ab357b1-vnics/tapca33c645-5b",
+ "last_scanned": 0,
+ "mac_address": "fa:16:3e:13:b2:aa",
+ "name": "tapca33c645-5b",
+ "name_path": "/" + ENV_CONFIG + "/Regions/RegionOne/Availability Zones/internal/node-251.cisco.com/" +
+ "Vservices/DHCP servers/dhcp-test_interface/vNICs/tapca33c645-5b",
+ "netmask": "255.255.255.128",
+ "network": "911fe57e-1ddd-4151-9dc7-6b578ab357b1",
+ "object_name": "tapca33c645-5b",
+ "parent_id": "qdhcp-911fe57e-1ddd-4151-9dc7-6b578ab357b1-vnics",
+ "parent_text": "vNICs",
+ "parent_type": "vnics_folder",
+ "show_in_tree": True,
+ "type": "vnic",
+ "vnic_type": "vservice_vnic"
+}]
diff --git a/app/test/event_based_scan/test_data/event_payload_network_add.py b/app/test/event_based_scan/test_data/event_payload_network_add.py
new file mode 100644
index 0000000..9630965
--- /dev/null
+++ b/app/test/event_based_scan/test_data/event_payload_network_add.py
@@ -0,0 +1,32 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+EVENT_PAYLOAD_NETWORK_ADD = {
+ '_context_request_id': 'req-d8593c49-8424-459b-9ac1-1fd8667310eb', '_context_project_domain': None,
+ '_context_tenant_id': '75c0eb79ff4a42b0ae4973c8375ddf40', '_context_project_name': 'calipso-project',
+ '_context_show_deleted': False, '_context_timestamp': '2016-09-30 17:45:01.738932', '_context_domain': None,
+ '_context_roles': ['_member_', 'admin'], '_context_is_admin': True, 'priority': 'INFO',
+ '_context_user_identity': '13baa553aae44adca6615e711fd2f6d9 75c0eb79ff4a42b0ae4973c8375ddf40 - - -',
+ 'message_id': '093e1ee0-87a7-4d40-9303-68d5eaf11f71', '_context_read_only': False,
+ '_context_project_id': '75c0eb79ff4a42b0ae4973c8375ddf40', '_context_tenant': '75c0eb79ff4a42b0ae4973c8375ddf40',
+ '_unique_id': '3dc7690856e14066902d861631236297', '_context_resource_uuid': None,
+ '_context_user': '13baa553aae44adca6615e711fd2f6d9', '_context_user_domain': None,
+ 'event_type': 'network.create.end', '_context_user_name': 'admin',
+ '_context_user_id': '13baa553aae44adca6615e711fd2f6d9',
+ '_context_auth_token': 'gAAAAABX7qQJkXEm4q2dRrVg4gjxYZ4iKWOFdkA4IVWXpDOiDtu_nwtAeSpTP3W0sEJiTjQXgxqCXrhCzi5cZ1edo6'
+ + 'DqEhND8TTtCqknIMwXcGGonUV0TkhKDOEnOgJhQLiV6JG-CtI4x0VnAp6muwankIGNChndH-gP0lw3bdIK29' +
+ 'aqDS4obeXGsYA3oLoORLubgPQjUpdO',
+ 'publisher_id': 'network.node-6.cisco.com', 'timestamp': '2016-09-30 17:45:02.125633',
+ '_context_tenant_name': 'calipso-project',
+ 'payload': {
+ 'network': {'provider:physical_network': None, 'router:external': False, 'shared': False,
+ 'id': 'a8226605-40d0-4111-93bd-11ffa5b2d1d7', 'provider:network_type': 'vxlan',
+ 'tenant_id': '75c0eb79ff4a42b0ae4973c8375ddf40', 'mtu': 1400, 'subnets': [], 'status': 'ACTIVE',
+ 'provider:segmentation_id': 8, 'port_security_enabled': True, 'name': 'calipso-network-add',
+ 'admin_state_up': True}}}
diff --git a/app/test/event_based_scan/test_data/event_payload_network_delete.py b/app/test/event_based_scan/test_data/event_payload_network_delete.py
new file mode 100644
index 0000000..6884dd6
--- /dev/null
+++ b/app/test/event_based_scan/test_data/event_payload_network_delete.py
@@ -0,0 +1,88 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+from test.event_based_scan.config.test_config import ENV_CONFIG
+
+
+EVENT_PAYLOAD_NETWORK_DELETE = {
+ 'event_type': 'network.delete.end', 'priority': 'INFO', '_context_tenant_name': 'calipso-project',
+ '_context_domain': None, '_context_show_deleted': False, '_unique_id': 'e6f3a44575dd45ea891ec527335a55d7',
+ '_context_user_identity': '13baa553aae44adca6615e711fd2f6d9 75c0eb79ff4a42b0ae4973c8375ddf40 - - -',
+ '_context_read_only': False, '_context_timestamp': '2016-10-13 23:48:29.632205', '_context_project_domain': None,
+ '_context_roles': ['_member_', 'admin'], '_context_user_domain': None,
+ '_context_request_id': 'req-21307ef4-f4f7-4e8e-afaf-75dd04d71463',
+ '_context_user_id': '13baa553aae44adca6615e711fd2f6d9', '_context_tenant': '75c0eb79ff4a42b0ae4973c8375ddf40',
+ 'payload': {'network_id': '0bb0ba6c-6863-4121-ac89-93f81a9da2b0'}, '_context_user_name': 'admin',
+ '_context_project_id': '75c0eb79ff4a42b0ae4973c8375ddf40', '_context_is_admin': True,
+ '_context_resource_uuid': None,
+ 'timestamp': '2016-10-13 23:48:31.609788', '_context_tenant_id': '75c0eb79ff4a42b0ae4973c8375ddf40',
+ 'message_id': '7b78b897-7a82-4aab-82a5-b1c431535dce', '_context_project_name': 'calipso-project',
+ '_context_auth_token': 'gAAAAABYAB0cVTm6fzL1S2q2lskw3z7FiEslh_amLhDmDEwQsm3M7L4omSjZ5qKacvgFTXS0HtpbCQfkZn8' +
+ 'BQK80qfbzaQdh05tW1gnboB_FR7vfsUZ1yKUzpDdAgfStDzj_SMWK6FGyZperukjp7Xhmxh91O6cxFvG1' +
+ '0qZmxwtJoKyuW0pCM1593rTsj1Lh6zOIo2iaoC1a',
+ '_context_user': '13baa553aae44adca6615e711fd2f6d9', 'publisher_id': 'network.node-6.cisco.com'}
+
+
+EVENT_PAYLOAD_NETWORK = {
+ "admin_state_up" : True,
+ "cidrs" : [
+ "172.16.9.0/24"
+ ],
+ "environment" : ENV_CONFIG,
+ "id" : '0bb0ba6c-6863-4121-ac89-93f81a9da2b0',
+ "id_path" : '/%s/%s-projects/' % (ENV_CONFIG, ENV_CONFIG) +'75c0eb79ff4a42b0ae4973c8375ddf40/75c0eb79ff4a42b' +
+ '0ae4973c8375ddf40-networks/0bb0ba6c-6863-4121-ac89-93f81a9da2b0' ,
+ "last_scanned" : 0,
+ "mtu" : 1400,
+ "name" : "testnetwork",
+ "name_path" : "/"+ENV_CONFIG+"/Projects/calipso-project/Networks/testnetwork",
+ "network" : "0bb0ba6c-6863-4121-ac89-93f81a9da2b0",
+ "object_name" : "testnetwork",
+ "parent_id" : "75c0eb79ff4a42b0ae4973c8375ddf40-networks",
+ "parent_text" : "Networks",
+ "parent_type" : "networks_folder",
+ "port_security_enabled" : True,
+ "project" : "calipso-project",
+ "provider:network_type" : "vxlan",
+ "provider:physical_network" : None,
+ "provider:segmentation_id" : 107,
+ "router:external" : False,
+ "shared" : False,
+ "show_in_tree" : True,
+ "status" : "ACTIVE",
+ "subnets" : {
+ "testabc" : {
+ "dns_nameservers" : [
+
+ ],
+ "enable_dhcp" : False,
+ "host_routes" : [
+
+ ],
+ "cidr" : "172.16.9.0/24",
+ "ip_version" : 4,
+ "id" : "7a1be27e-4aae-43ef-b3c0-7231a41625b8",
+ "subnetpool_id" : None,
+ "ipv6_ra_mode" : None,
+ "ipv6_address_mode" : None,
+ "network_id" : "0bb0ba6c-6863-4121-ac89-93f81a9da2b0",
+ "tenant_id" : "75c0eb79ff4a42b0ae4973c8375ddf40",
+ "name" : "testabc",
+ "allocation_pools" : [
+ {
+ "end" : "172.16.9.254",
+ "start" : "172.16.9.2"
+ }
+ ],
+ "gateway_ip" : "172.16.9.1"
+ }
+ },
+ "tenant_id" : "75c0eb79ff4a42b0ae4973c8375ddf40",
+ "type" : "network"
+} \ No newline at end of file
diff --git a/app/test/event_based_scan/test_data/event_payload_network_update.py b/app/test/event_based_scan/test_data/event_payload_network_update.py
new file mode 100644
index 0000000..3485cd1
--- /dev/null
+++ b/app/test/event_based_scan/test_data/event_payload_network_update.py
@@ -0,0 +1,65 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+from test.event_based_scan.config.test_config import ENV_CONFIG
+
+EVENT_PAYLOAD_NETWORK_UPDATE = {
+ '_context_user_id': '13baa553aae44adca6615e711fd2f6d9', '_context_user': '13baa553aae44adca6615e711fd2f6d9',
+ 'priority': 'INFO',
+ '_context_auth_token': 'gAAAAABYBrNJA6Io1infkUKquvCpC1bAWOCRxKE-8YQ71qLJhli200beztKmlY5ToBHSqFyPvoadkVKjA740jF' +
+ 'bqeY-YtezMHhJAe-t_VyRJQ46IWAv8nPYvWRd_lmgtHrvBeId8NIPCZkhoAEmj5GwcZUZgnFYEhVlUliNO6IfV' +
+ 'Oxcb17Z_1MKfdrfu1AtgD5hWb61w1F6x',
+ '_context_user_name': 'admin', '_context_project_name': 'calipso-project', '_context_domain': None,
+ '_unique_id': 'd1a96723db9341dca6f0d5fb9620f548', '_context_project_id': '75c0eb79ff4a42b0ae4973c8375ddf40',
+ 'message_id': '6b99d060-9cd6-4c14-8a0a-cbfc5c50d122', 'timestamp': '2016-10-18 23:47:31.636433',
+ '_context_user_identity': '13baa553aae44adca6615e711fd2f6d9 75c0eb79ff4a42b0ae4973c8375ddf40 - - -',
+ '_context_resource_uuid': None, '_context_request_id': 'req-b33cbd49-7af9-4c64-bcfb-782fcd400a5e',
+ 'publisher_id': 'network.node-6.cisco.com', 'payload': {
+ 'network': {'provider:network_type': 'vxlan', 'port_security_enabled': True, 'status': 'ACTIVE',
+ 'id': '8673c48a-f137-4497-b25d-08b7b218fd17', 'shared': False, 'router:external': False,
+ 'subnets': ['fcfa62ec-5ae7-46ce-9259-5f30de7af858'], 'admin_state_up': True,
+ 'provider:segmentation_id': 52, 'provider:physical_network': None, 'name': '24',
+ 'tenant_id': '75c0eb79ff4a42b0ae4973c8375ddf40', 'mtu': 1400}}, 'event_type': 'network.update.end',
+ '_context_roles': ['_member_', 'admin'], '_context_project_domain': None, '_context_is_admin': True,
+ '_context_tenant': '75c0eb79ff4a42b0ae4973c8375ddf40', '_context_show_deleted': False, '_context_user_domain': None,
+ '_context_read_only': False, '_context_timestamp': '2016-10-18 23:47:20.629297',
+ '_context_tenant_id': '75c0eb79ff4a42b0ae4973c8375ddf40', '_context_tenant_name': 'calipso-project'}
+
+
+NETWORK_DOCUMENT = {
+ "admin_state_up" : True,
+ "cidrs" : [
+ "172.16.4.0/24"
+ ],
+ "environment" : ENV_CONFIG,
+ "id" : "8673c48a-f137-4497-b25d-08b7b218fd17",
+ "id_path" : '/%s/%s-projects/' % (ENV_CONFIG, ENV_CONFIG) +'75c0eb79ff4a42b0ae4973c8375ddf40/75c0eb79ff4a42b' +
+ '0ae4973c8375ddf40-networks/8673c48a-f137-4497-b25d-08b7b218fd17',
+ "last_scanned" : 0,
+ "mtu" : 1400,
+ "name" : "calipso-met4",
+ "name_path" : "/"+ENV_CONFIG+"/Projects/calipso-project/Networks/calipso-met4",
+ "network" : "b6fd5175-4b22-4256-9b1a-9fc4b9dce1fe",
+ "object_name" : "calipso-met4",
+ "parent_id" : "75c0eb79ff4a42b0ae4973c8375ddf40-networks",
+ "parent_text" : "Networks",
+ "parent_type" : "networks_folder",
+ "port_security_enabled" : True,
+ "project" : "calipso-project",
+ "provider:network_type" : "vxlan",
+ "provider:physical_network" : None,
+ "provider:segmentation_id" : 0,
+ "router:external" : False,
+ "shared" : False,
+ "show_in_tree" : True,
+ "status" : "ACTIVE",
+ "subnets" : {},
+ "tenant_id" : "75c0eb79ff4a42b0ae4973c8375ddf40",
+ "type" : "network"
+} \ No newline at end of file
diff --git a/app/test/event_based_scan/test_data/event_payload_port_add.py b/app/test/event_based_scan/test_data/event_payload_port_add.py
new file mode 100644
index 0000000..92f6d2f
--- /dev/null
+++ b/app/test/event_based_scan/test_data/event_payload_port_add.py
@@ -0,0 +1,314 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+from test.event_based_scan.config.test_config import ENV_CONFIG
+
+EVENT_PAYLOAD_PORT_INSTANCE_ADD = {
+ '_context_user_id': '73638a2687534f9794cd8057ba860637', 'payload': {
+ 'port': {'port_security_enabled': True, 'tenant_id': '75c0eb79ff4a42b0ae4973c8375ddf40',
+ 'binding:vif_type': 'ovs',
+ 'mac_address': 'fa:16:3e:04:cd:ab',
+ 'fixed_ips': [{'subnet_id': '9a9c1848-ea23-4c5d-8c40-ae1def4c2de3', 'ip_address': '172.16.13.6'}],
+ 'security_groups': ['2dd5c169-1ff7-40e5-ad96-18924b6d23f1'], 'allowed_address_pairs': [],
+ 'binding:host_id': 'node-223.cisco.com', 'dns_name': '', 'status': 'DOWN',
+ 'id': '1233445-75b6-4c05-9480-4bc648845c6f', 'binding:profile': {}, 'admin_state_up': True,
+ 'device_owner': 'compute:calipso-zone', 'device_id': '27a87908-bc1b-45cc-9238-09ad1ae686a7',
+ 'network_id': '55550a69-24eb-47f5-a458-3aa086cc71c2', 'name': '',
+ 'binding:vif_details': {'ovs_hybrid_plug': True, 'port_filter': True}, 'extra_dhcp_opts': [],
+ 'binding:vnic_type': 'normal'}}, '_context_project_domain': None, 'event_type': 'port.create.end',
+ 'message_id': '2e0da8dc-6d2d-4bde-9e52-c43ec4687864', 'publisher_id': 'network.node-6.cisco.com',
+ '_context_domain': None, '_context_tenant_name': 'services', '_context_tenant': 'a83c8b0d2df24170a7c54f09f824230e',
+ '_context_project_name': 'services', '_context_user': '73638a2687534f9794cd8057ba860637',
+ '_context_user_name': 'neutron', 'priority': 'INFO', '_context_timestamp': '2016-10-24 21:29:52.127098',
+ '_context_read_only': False, '_context_roles': ['admin'], '_context_is_admin': True, '_context_show_deleted': False,
+ '_context_user_domain': None,
+ '_context_auth_token': 'gAAAAABYDnRG3mhPMwyF17iUiIT4nYjtcSktNmmCKlMrUtmpHYsJWl44xU-boIaf4ChWcBsTjl6jOk6Msu7l17As' +
+ '1Y9vFc1rlmKMl86Eknqp0P22RV_Xr6SIobsl6Axl2Z_w-AB1cZ4pSsY4uscxeJdVkoxRb0aC9B7gllrvAgrfO9O' +
+ 'GDqw2ILA',
+ '_context_tenant_id': 'a83c8b0d2df24170a7c54f09f824230e', '_context_resource_uuid': None,
+ '_context_request_id': 'req-3d6810d9-bee9-41b5-a224-7e9641689cc8', '_unique_id': 'b4f1ffae88b342c09658d9ed2829670c',
+ 'timestamp': '2016-10-24 21:29:56.383789', '_context_project_id': 'a83c8b0d2df24170a7c54f09f824230e',
+ '_context_user_identity': '73638a2687534f9794cd8057ba860637 a83c8b0d2df24170a7c54f09f824230e - - -'}
+
+NETWORK_DOC = {
+ "admin_state_up": True,
+ "cidrs": [
+ "172.16.12.0/24"
+ ],
+ "environment": ENV_CONFIG,
+ "id": "55550a69-24eb-47f5-a458-3aa086cc71c2",
+ "id_path": "/" + ENV_CONFIG + "/" + ENV_CONFIG + "-projects/75c0eb79ff4a42b0ae4973c8375ddf40/75c0eb79ff4a42b0ae" +
+ "4973c8375ddf40-networks/55550a69-24eb-47f5-a458-3aa086cc71c2",
+ "last_scanned": 0,
+ "mtu": 0,
+ "name": "please_connect",
+ "name_path": "/" + ENV_CONFIG + "/Projects/calipso-project/Networks/please_connect",
+ "network": "55550a69-24eb-47f5-a458-3aa086cc71c2",
+ "object_name": "please_connect",
+ "parent_id": "75c0eb79ff4a42b0ae4973c8375ddf40-networks",
+ "parent_text": "Networks",
+ "parent_type": "networks_folder",
+ "port_security_enabled": True,
+ "project": "calipso-project",
+ "provider:network_type": "vxlan",
+ "provider:physical_network": None,
+ "provider:segmentation_id": 23,
+ "router:external": False,
+ "shared": False,
+ "show_in_tree": True,
+ "status": "ACTIVE",
+ "subnet_ids": [
+ "6f6ef3b5-76c9-4f70-81e5-f3cc196db025"
+ ],
+ "subnets": {
+ "1234": {
+ "cidr": "172.16.12.0/24",
+ "network_id": "55550a69-24eb-47f5-a458-3aa086cc71c2",
+ "allocation_pools": [
+ {
+ "start": "172.16.12.2",
+ "end": "172.16.12.254"
+ }
+ ],
+ "id": "6f6ef3b5-76c9-4f70-81e5-f3cc196db025",
+ "enable_dhcp": True,
+ "ipv6_address_mode": None,
+ "name": "1234",
+ "host_routes": [
+
+ ],
+ "ipv6_ra_mode": None,
+ "gateway_ip": "172.16.12.1",
+ "ip_version": 4,
+ "subnetpool_id": None,
+ "dns_nameservers": [
+
+ ],
+ "tenant_id": "75c0eb79ff4a42b0ae4973c8375ddf40"
+ }
+ },
+ "tenant_id": "75c0eb79ff4a42b0ae4973c8375ddf40",
+ "type": "network"
+}
+
+INSTANCE_DOC = {
+ "environment": ENV_CONFIG,
+ "id": "b2bda4bf-1259-4d60-99ab-85ab4d5014a8",
+ "type": "instance",
+ "uuid": "b2bda4bf-1259-4d60-99ab-85ab4d5014a8",
+ "network": [
+ "a09455d9-399a-4193-9cb4-95e9d8e9a560"
+ ],
+ "local_name": "instance-00000002",
+ 'name_path': '/' + ENV_CONFIG + '/Regions/RegionOne/Availability Zones' +
+ '/calipso-zone/node-223.cisco.com/Instances/name-change',
+ 'id': '27a87908-bc1b-45cc-9238-09ad1ae686a7',
+ 'id_path': '/' + ENV_CONFIG + '/' + ENV_CONFIG + '-regions/RegionOne/RegionOne-availability_zones/calipso-zone' +
+ '/node-223.cisco.com/node-223.cisco.com-instances/27a87908-bc1b-45cc-9238-09ad1ae686a7',
+ "name": "name-change",
+ "network_info": [
+ {
+ "qbg_params": None,
+ "id": "1233445-75b6-4c05-9480-4bc648845c6f",
+ "network": {
+ "bridge": "br-int",
+ "meta": {
+ "injected": False,
+ "tenant_id": "a3efb05cd0484bf0b600e45dab09276d"
+ },
+ "id": "a09455d9-399a-4193-9cb4-95e9d8e9a560",
+ "subnets": [
+ {
+ "gateway": {
+ "meta": {
+
+ },
+ "type": "gateway",
+ "version": 4,
+ "address": "172.16.50.254"
+ },
+ "version": 4,
+ "dns": [
+
+ ],
+ "cidr": "172.16.50.0/24",
+ "routes": [
+
+ ],
+ "meta": {
+ "dhcp_server": "172.16.50.1"
+ },
+ "ips": [
+ {
+ "floating_ips": [
+
+ ],
+ "meta": {
+
+ },
+ "type": "fixed",
+ "version": 4,
+ "address": "172.16.50.3"
+ }
+ ]
+ }
+ ],
+ "label": "calipso-network"
+ },
+ "active": True,
+ "address": "fa:16:3e:04:cd:ab",
+ "vnic_type": "normal",
+ "meta": {
+ },
+ "ovs_interfaceid": "1233445-75b6-4c05-9480-4bc648845c6f",
+ "type": "ovs",
+ "devname": "tapa9a8fa24-11",
+ }
+ ],
+ "host": "node-223.cisco.com",
+ "project_id": "a3efb05cd0484bf0b600e45dab09276d",
+ "object_name": "libertyDD",
+ "parent_id": "node-223.cisco.com-instances",
+ "parent_type": "instances_folder",
+ "projects": [
+ "project-calipso"
+ ],
+ "mac_address": "fa:16:3e:04:cd:ab"
+}
+
+INSTANCES_ROOT = {
+ "create_object": True,
+ "environment": ENV_CONFIG,
+ "id": "node-223.cisco.com-instances",
+ "id_path": "/" + ENV_CONFIG + "/" + ENV_CONFIG + "-regions/RegionOne/RegionOne-availability_zones" +
+ "/calipso-zone/node-223.cisco.com/node-223.cisco.com-instances",
+ "name": "Instances",
+ "name_path": "/" + ENV_CONFIG + "/Regions/RegionOne/Availability Zones/calipso-zone/node-223.cisco.com/Instances",
+ "object_name": "Instances",
+ "parent_id": "node-223.cisco.com",
+ "parent_type": "host",
+ "show_in_tree": True,
+ "text": "Instances",
+ "type": "instances_folder"
+}
+
+INSTANCE_DOCS = [
+ {
+ "environment": ENV_CONFIG,
+ "type": "instance",
+ "uuid": "b2bda4bf-1259-4d60-99ab-85ab4d5014a8",
+ "network": [
+ "a09455d9-399a-4193-9cb4-95e9d8e9a560"
+ ],
+ "local_name": "instance-00000002",
+ 'name_path': '/' + ENV_CONFIG + '/Regions/RegionOne/Availability Zones' +
+ '/calipso-zone/node-223.cisco.com/Instances/name-change',
+ 'id': '27a87908-bc1b-45cc-9238-09ad1ae686a7',
+ 'id_path': '/' + ENV_CONFIG + '/' + ENV_CONFIG + '-regions/RegionOne/RegionOne-availability_zones/calipso-zone' +
+ '/node-223.cisco.com/node-223.cisco.com-instances/27a87908-bc1b-45cc-9238-09ad1ae686a7',
+ "name": "name-change",
+ "network_info": [
+ {
+ "qbg_params": None,
+ "id": "2233445-75b6-4c05-9480-4bc648845c6f",
+ "network": {
+ "bridge": "br-int",
+ "meta": {
+ "injected": False,
+ "tenant_id": "a3efb05cd0484bf0b600e45dab09276d"
+ },
+ "id": "a09455d9-399a-4193-9cb4-95e9d8e9a560",
+ "subnets": [
+ {
+ "gateway": {
+ "meta": {
+
+ },
+ "type": "gateway",
+ "version": 4,
+ "address": "172.16.50.254"
+ },
+ "version": 4,
+ "dns": [
+
+ ],
+ "cidr": "172.16.50.0/24",
+ "routes": [
+
+ ],
+ "meta": {
+ "dhcp_server": "172.16.50.1"
+ },
+ "ips": [
+ {
+ "floating_ips": [
+
+ ],
+ "meta": {
+
+ },
+ "type": "fixed",
+ "version": 4,
+ "address": "172.16.50.3"
+ }
+ ]
+ }
+ ],
+ "label": "calipso-network"
+ },
+ "active": True,
+ "address": "fa:16:3e:04:cd:ab",
+ "vnic_type": "normal",
+ "meta": {
+ },
+ "ovs_interfaceid": "2233445-75b6-4c05-9480-4bc648845c6f",
+ "type": "ovs",
+ "devname": "tapa9a8fa24-12",
+ }
+ ],
+ "host": "node-223.cisco.com",
+ "project_id": "a3efb05cd0484bf0b600e45dab09276d",
+ "object_name": "libertyDD",
+ "parent_id": "node-223.cisco.com-instances",
+ "parent_type": "instances_folder",
+ "projects": [
+ "project-calipso"
+ ],
+ "mac_address": "fa:16:3e:04:cd:ab"
+ }
+]
+
+VNIC_DOCS = [{
+ "IP Address": "172.16.10.2",
+ "IPv6 Address": "fe80::f816:3eff:fe96:5066/64",
+ "cidr": "172.16.10.0/25",
+ "data": "Link encap:Ethernet HWaddr fa:16:3e:96:50:66\ninet addr:172.16.10.2 Bcast:172.16.10.127 " +
+ "Mask:255.255.255.128\ninet6 addr: fe80::f816:3eff:fe96:5066/64 Scope:Link\nUP BROADCAST RUNNING " +
+ "MULTICAST MTU:1450 Metric:1\nRX packets:17 errors:0 dropped:2 overruns:0 frame:0\nTX packets:8 " +
+ "errors:0 dropped:0 overruns:0 carrier:0\ncollisions:0 txqueuelen:0\nRX bytes:1593 " +
+ "(1.5 KB) TX bytes:648 (648.0 B)\n",
+ "host": "node-251.cisco.com",
+ "id": "tapca33c645-5b",
+ "id_path": "/" + ENV_CONFIG + "/" + ENV_CONFIG + "-regions/RegionOne/RegionOne-availability_zones/internal" +
+ "/node-251.cisco.com/node-251.cisco.com-vservices/node-251.cisco.com-vservices-dhcps/qdhcp-911fe57e" +
+ "-1ddd-4151-9dc7-6b578ab357b1/qdhcp-911fe57e-1ddd-4151-9dc7-6b578ab357b1-vnics/tapca33c645-5b",
+ "last_scanned": 0,
+ "mac_address": "fa:16:3e:04:cd:ab",
+ "name": "tapca33c645-5b",
+ "name_path": "/" + ENV_CONFIG + "/Regions/RegionOne/Availability Zones/internal/node-251.cisco.com/" +
+ "Vservices/DHCP servers/dhcp-test_interface/vNICs/tapca33c645-5b",
+ "netmask": "255.255.255.128",
+ "network": "911fe57e-1ddd-4151-9dc7-6b578ab357b1",
+ "object_name": "tapca33c645-5b",
+ "parent_id": "qdhcp-911fe57e-1ddd-4151-9dc7-6b578ab357b1-vnics",
+ "parent_text": "vNICs",
+ "parent_type": "vnics_folder",
+ "show_in_tree": True,
+ "vnic_type": "instance_vnic"
+}]
diff --git a/app/test/event_based_scan/test_data/event_payload_port_delete.py b/app/test/event_based_scan/test_data/event_payload_port_delete.py
new file mode 100644
index 0000000..afbba32
--- /dev/null
+++ b/app/test/event_based_scan/test_data/event_payload_port_delete.py
@@ -0,0 +1,290 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+from test.event_based_scan.config.test_config import ENV_CONFIG
+
+EVENT_PAYLOAD_PORT_DELETE = {
+ '_context_tenant': '75c0eb79ff4a42b0ae4973c8375ddf40', '_context_tenant_name': 'calipso-project',
+ '_context_project_name': 'calipso-project', '_context_user_id': '13baa553aae44adca6615e711fd2f6d9',
+ '_context_tenant_id': '75c0eb79ff4a42b0ae4973c8375ddf40',
+ '_context_auth_token': 'gAAAAABYIR4eeMzyZ6IWHjWSK9kmr-p4hxRhm5LtDp--kiu5v5MzpnShwkZAbFTkIBR0fC2iaBurXlAvI0pE' +
+ 'myBRpAuxWFsM5rbsiFlo_qpuo_dqIGe6_R7J-MDIGnLCl4T3z3Rb4asZKksXRhP5brkJF1-LdqAXJJ55sgQ' +
+ 'aH-22H9g9Wxhziz5YaoshWskJYhb_geTeqPsa',
+ '_context_show_deleted': False, '_context_read_only': False, '_context_is_admin': True,
+ '_context_timestamp': '2016-11-08 00:58:07.248644',
+ 'payload': {'port_id': '2233445-55b6-4c05-9480-4bc648845c6f'},
+ 'timestamp': '2016-11-08 00:58:07.294731', '_context_user': '13baa553aae44adca6615e711fd2f6d9',
+ 'event_type': 'port.delete.start', '_unique_id': '83a98a31743c4c11aa1d1787037f6683',
+ '_context_request_id': 'req-51f0aeba-2648-436f-9505-0c5efb259146', 'publisher_id': 'network.node-6.cisco.com',
+ '_context_user_identity': '13baa553aae44adca6615e711fd2f6d9 75c0eb79ff4a42b0ae4973c8375ddf40 - - -',
+ '_context_domain': None, '_context_user_domain': None, 'priority': 'INFO', '_context_user_name': 'admin',
+ '_context_roles': ['_member_', 'admin'], 'message_id': 'ce1e3e9c-e2ef-47e2-99e1-0b6c69e5eeca',
+ '_context_resource_uuid': None, '_context_project_domain': None,
+ '_context_project_id': '75c0eb79ff4a42b0ae4973c8375ddf40'}
+
+PORT_DOC = {
+ "admin_state_up": True,
+ "allowed_address_pairs": [
+
+ ],
+ "binding:host_id": "",
+ "binding:profile": {
+
+ },
+ "binding:vif_details": {
+
+ },
+ "binding:vif_type": "unbound",
+ "binding:vnic_type": "normal",
+ "device_id": "c57216ca-c1c4-430d-a045-32851ca879e3",
+ "device_owner": "compute:nova",
+ "dns_assignment": [
+ {
+ "hostname": "host-172-16-10-1",
+ "ip_address": "172.16.10.1",
+ "fqdn": "host-172-16-10-1.openstacklocal."
+ }
+ ],
+ "dns_name": "",
+ "environment": ENV_CONFIG,
+ "extra_dhcp_opts": [
+
+ ],
+ "fixed_ips": [
+ {
+ "ip_address": "172.16.10.1",
+ "subnet_id": "6f6ef3b5-76c9-4f70-81e5-f3cc196db025"
+ }
+ ],
+ "id": "2233445-55b6-4c05-9480-4bc648845c6f",
+ "id_path": ENV_CONFIG + "/" + ENV_CONFIG + "-projects/75c0eb79ff4a42b0ae4973c8375ddf40/75c0eb79ff4a42b0ae4973c837" +
+ "5ddf40-networks/55550a69-24eb-47f5-a458-3aa086cc71c2/55550a69-24eb-47f5-a458-3aa086cc71c2-ports" +
+ "/2233445-55b6-4c05-9480-4bc648845c6f",
+ "last_scanned": 0,
+ "mac_address": "fa:16:3e:13:b2:aa",
+ "master_parent_id": "55550a69-24eb-47f5-a458-3aa086cc71c2",
+ "master_parent_type": "network",
+ "name": "fa:16:3e:13:b2:aa",
+ "name_path": "/" + ENV_CONFIG + "/Projects/calipso-project/Networks/test_interface/Ports/" +
+ "2233445-55b6-4c05-9480-4bc648845c6f",
+ "network_id": "55550a69-24eb-47f5-a458-3aa086cc71c2",
+ "object_name": "2233445-55b6-4c05-9480-4bc648845c6f",
+ "parent_id": "55550a69-24eb-47f5-a458-3aa086cc71c2-ports",
+ "parent_text": "Ports",
+ "parent_type": "ports_folder",
+ "port_security_enabled": False,
+ "project": "calipso-project",
+ "security_groups": [
+
+ ],
+ "status": "DOWN",
+ "tenant_id": "75c0eb79ff4a42b0ae4973c8375ddf40",
+ "type": "port"
+}
+
+VNIC_DOCS = [{
+ "IP Address": "172.16.10.2",
+ "IPv6 Address": "fe80::f816:3eff:fe96:5066/64",
+ "cidr": "172.16.10.0/25",
+ "data": "Link encap:Ethernet HWaddr fa:16:3e:96:50:66\ninet addr:172.16.10.2 Bcast:172.16.10.127 " +
+ "Mask:255.255.255.128\ninet6 addr: fe80::f816:3eff:fe96:5066/64 Scope:Link\nUP BROADCAST RUNNING " +
+ "MULTICAST MTU:1450 Metric:1\nRX packets:17 errors:0 dropped:2 overruns:0 frame:0\nTX packets:8 " +
+ "errors:0 dropped:0 overruns:0 carrier:0\ncollisions:0 txqueuelen:0\nRX bytes:1593 " +
+ "(1.5 KB) TX bytes:648 (648.0 B)\n",
+ "environment": ENV_CONFIG,
+ "host": "node-251.cisco.com",
+ "id": "tapca33c645-5b",
+ "id_path": "/" + ENV_CONFIG + "/" + ENV_CONFIG + "-regions/RegionOne/RegionOne-availability_zones/internal" +
+ "/node-251.cisco.com/node-251.cisco.com-vservices/node-251.cisco.com-vservices-dhcps/qdhcp-911fe57e-" +
+ "1ddd-4151-9dc7-6b578ab357b1/qdhcp-911fe57e-1ddd-4151-9dc7-6b578ab357b1-vnics/tapca33c645-5b",
+ "last_scanned": 0,
+ "mac_address": "fa:16:3e:13:b2:aa",
+ "name": "tapca33c645-5b",
+ "name_path": "/"+ENV_CONFIG+"/Regions/RegionOne/Availability Zones/internal/node-251.cisco.com/" +
+ "Vservices/DHCP servers/dhcp-test_interface/vNICs/tapca33c645-5b",
+ "netmask": "255.255.255.128",
+ "network": "911fe57e-1ddd-4151-9dc7-6b578ab357b1",
+ "object_name": "tapca33c645-5b",
+ "parent_id": "qdhcp-911fe57e-1ddd-4151-9dc7-6b578ab357b1-vnics",
+ "parent_text": "vNICs",
+ "parent_type": "vnics_folder",
+ "show_in_tree": True,
+ "type": "vnic",
+ "vnic_type": "vservice_vnic"
+}]
+
+INSTANCE_DOC = {
+ "environment": ENV_CONFIG,
+ "id": "b2bda4bf-1259-4d60-99ab-85ab4d5014a8",
+ "type": "instance",
+ "uuid": "b2bda4bf-1259-4d60-99ab-85ab4d5014a8",
+ "network": [
+ "55550a69-24eb-47f5-a458-3aa086cc71c2"
+ ],
+ "local_name": "instance-00000002",
+ 'name_path': '/' + ENV_CONFIG + '/Regions/RegionOne/Availability Zones' +
+ '/calipso-zone/node-223.cisco.com/Instances/name-change',
+ 'id': '27a87908-bc1b-45cc-9238-09ad1ae686a7',
+ 'id_path': '/' + ENV_CONFIG + '/' + ENV_CONFIG + '-regions/RegionOne/RegionOne-availability_zones/calipso-zone' +
+ '/node-223.cisco.com/node-223.cisco.com-instances/27a87908-bc1b-45cc-9238-09ad1ae686a7',
+ "name": "name-change",
+ "network_info": [
+ {
+ "qbg_params": None,
+ "id": "2233445-55b6-4c05-9480-4bc648845c6f",
+ "network": {
+ "bridge": "br-int",
+ "meta": {
+ "injected": False,
+ "tenant_id": "a3efb05cd0484bf0b600e45dab09276d"
+ },
+ "id": "55550a69-24eb-47f5-a458-3aa086cc71c2",
+ "subnets": [
+ {
+ "gateway": {
+ "meta": {
+
+ },
+ "type": "gateway",
+ "version": 4,
+ "address": "172.16.50.254"
+ },
+ "version": 4,
+ "dns": [
+
+ ],
+ "cidr": "172.16.50.0/24",
+ "routes": [
+
+ ],
+ "meta": {
+ "dhcp_server": "172.16.50.1"
+ },
+ "ips": [
+ {
+ "floating_ips": [
+
+ ],
+ "meta": {
+
+ },
+ "type": "fixed",
+ "version": 4,
+ "address": "172.16.50.3"
+ }
+ ]
+ }
+ ],
+ "label": "calipso-network"
+ },
+ "active": True,
+ "address": "fa:16:3e:04:ab:cd",
+ "vnic_type": "normal",
+ "meta": {
+ },
+ "ovs_interfaceid": "2233445-55b6-4c05-9480-4bc648845c6f",
+ "type": "ovs",
+ "devname": "tapa9a8fa24-11",
+ }
+ ],
+ "host": "node-223.cisco.com",
+ "project_id": "a3efb05cd0484bf0b600e45dab09276d",
+ "object_name": "libertyDD",
+ "parent_id": "node-223.cisco.com-instances",
+ "parent_type": "instances_folder",
+ "projects": [
+ "project-calipso"
+ ],
+ "mac_address": "fa:16:3e:13:b2:aa"
+}
+
+INSTANCE_DOCS = [
+ {
+ "environment": ENV_CONFIG,
+ "type": "instance",
+ "uuid": "b2bda4bf-1259-4d60-99ab-85ab4d5014a8",
+ "network": [
+ "55550a69-24eb-47f5-a458-3aa086cc71c2"
+ ],
+ "local_name": "instance-00000002",
+ 'name_path': '/' + ENV_CONFIG + '/Regions/RegionOne/Availability Zones' +
+ '/calipso-zone/node-223.cisco.com/Instances/name-change',
+ 'id': 'c57216ca-c1c4-430d-a045-32851ca879e3',
+ 'id_path': '/' + ENV_CONFIG + '/' + ENV_CONFIG + '-regions/RegionOne/RegionOne-availability_zones/calipso-zone' +
+ '/node-223.cisco.com/node-223.cisco.com-instances/c57216ca-c1c4-430d-a045-32851ca879e3',
+ "name": "name-change",
+ "network_info": [
+ {
+ "qbg_params": None,
+ "id": "2233445-55b6-4c05-9480-4bc648845c6f",
+ "network": {
+ "bridge": "br-int",
+ "meta": {
+ "injected": False,
+ "tenant_id": "a3efb05cd0484bf0b600e45dab09276d"
+ },
+ "id": "55550a69-24eb-47f5-a458-3aa086cc71c2",
+ "subnets": [
+ {
+ "gateway": {
+ "meta": {
+
+ },
+ "type": "gateway",
+ "version": 4,
+ "address": "172.16.50.254"
+ },
+ "version": 4,
+ "dns": [
+
+ ],
+ "cidr": "172.16.50.0/24",
+ "routes": [
+
+ ],
+ "meta": {
+ "dhcp_server": "172.16.50.1"
+ },
+ "ips": [
+ {
+ "floating_ips": [
+
+ ],
+ "meta": {
+
+ },
+ "type": "fixed",
+ "version": 4,
+ "address": "172.16.50.3"
+ }
+ ]
+ }
+ ],
+ "label": "calipso-network"
+ },
+ "active": True,
+ "address": "fa:16:3e:04:ab:cd",
+ "vnic_type": "normal",
+ "meta": {
+ },
+ "ovs_interfaceid": "2233445-75b6-4c05-9480-4bc648845c6f",
+ "type": "ovs",
+ "devname": "tapa9a8fa24-12",
+ }
+ ],
+ "host": "node-223.cisco.com",
+ "project_id": "a3efb05cd0484bf0b600e45dab09276d",
+ "object_name": "libertyDD",
+ "parent_id": "node-223.cisco.com-instances",
+ "parent_type": "instances_folder",
+ "projects": [
+ "project-calipso"
+ ],
+ }
+] \ No newline at end of file
diff --git a/app/test/event_based_scan/test_data/event_payload_port_update.py b/app/test/event_based_scan/test_data/event_payload_port_update.py
new file mode 100644
index 0000000..90befbf
--- /dev/null
+++ b/app/test/event_based_scan/test_data/event_payload_port_update.py
@@ -0,0 +1,103 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+from test.event_based_scan.config.test_config import ENV_CONFIG
+
+EVENT_PAYLOAD_PORT_UPDATE = {
+ '_context_timestamp': '2016-10-25 21:27:05.591848', '_context_user_name': 'admin',
+ '_context_user_identity': '13baa553aae44adca6615e711fd2f6d9 75c0eb79ff4a42b0ae4973c8375ddf40 - - -',
+ '_context_tenant_name': 'calipso-project', '_context_resource_uuid': None,
+ 'priority': 'INFO', '_context_user': '13baa553aae44adca6615e711fd2f6d9',
+ '_context_domain': None, '_context_project_id': '75c0eb79ff4a42b0ae4973c8375ddf40',
+ '_context_tenant': '75c0eb79ff4a42b0ae4973c8375ddf40', 'event_type': 'port.update.end',
+ '_context_project_domain': None, '_context_show_deleted': False,
+ '_context_request_id': 'req-5c502a18-cf79-4903-85c0-84eeab525378',
+ '_context_roles': ['_member_', 'admin'],
+ 'message_id': 'ee8e493e-1134-4077-bb0a-db9d28b625dd', 'payload': {'port': {
+ 'dns_assignment': [
+ {'ip_address': '172.16.4.2', 'fqdn': 'host-172-16-4-2.openstacklocal.', 'hostname': 'host-172-16-4-2'}],
+ 'mac_address': 'fa:16:3e:d7:c5:16', 'security_groups': [], 'admin_state_up': True, 'dns_name': '',
+ 'allowed_address_pairs': [], 'binding:profile': {},
+ 'binding:vif_details': {'port_filter': True, 'ovs_hybrid_plug': True}, 'port_security_enabled': False,
+ 'device_id': 'dhcp7a15cee0-2af1-5441-b1dc-94897ef4dee9-b6fd5175-4b22-4256-9b1a-9fc4b9dce1fe',
+ 'id': '16620a58-c48c-4195-b9c1-779a8ba2e6f8',
+ 'fixed_ips': [{'subnet_id': 'f68b9dd3-4cb5-46aa-96b1-f9c8a7abc3aa', 'ip_address': '172.16.4.2'}],
+ 'name': 'test',
+ 'binding:vnic_type': 'normal', 'tenant_id': '75c0eb79ff4a42b0ae4973c8375ddf40', 'extra_dhcp_opts': [],
+ 'network_id': 'b6fd5175-4b22-4256-9b1a-9fc4b9dce1fe', 'binding:vif_type': 'ovs',
+ 'binding:host_id': 'node-6.cisco.com', 'status': 'ACTIVE', 'device_owner': 'network:dhcp'}},
+ 'timestamp': '2016-10-25 21:27:06.281892',
+ '_unique_id': '964361cb7a434daf9fa6452507133fe5',
+ '_context_tenant_id': '75c0eb79ff4a42b0ae4973c8375ddf40', '_context_user_domain': None,
+ '_context_auth_token': 'gAAAAABYD8zsy7c8LhL2SoZTzmK-YpUMFBJHare_RA7_4E94zqj328sC0cETsFAoWoBY' +
+ '6X8ZvjBQg--5UCqgj7iUE-zfIQwZLzXbl46MP1Fg5ZKCUtdCCPN5yqXxGA-ebYlBB_G' +
+ 'If0LUo1YXCKe3GacmfFNC-k0T_B1p340stgLdpW7r0g1jvTDleqK7NWNrnCniZHrgGiLw',
+ '_context_is_admin': True, '_context_read_only': False,
+ '_context_project_name': 'calipso-project',
+ '_context_user_id': '13baa553aae44adca6615e711fd2f6d9',
+ 'publisher_id': 'network.node-6.cisco.com'}
+
+PORT_DOCUMENT = {
+ "admin_state_up": True,
+ "allowed_address_pairs": [
+
+ ],
+ "binding:host_id": "node-6.cisco.com",
+ "binding:profile": {
+
+ },
+ "binding:vif_details": {
+ "port_filter": True,
+ "ovs_hybrid_plug": True
+ },
+ "binding:vif_type": "ovs",
+ "binding:vnic_type": "normal",
+ "device_id": "dhcp7a15cee0-2af1-5441-b1dc-94897ef4dee9-b6fd5175-4b22-4256-9b1a-9fc4b9dce1fe",
+ "device_owner": "network:dhcp",
+ "dns_assignment": [
+ {
+ "hostname": "host-172-16-4-2",
+ "fqdn": "host-172-16-4-2.openstacklocal.",
+ "ip_address": "172.16.4.2"
+ }
+ ],
+ "dns_name": "",
+ "environment": ENV_CONFIG,
+ "extra_dhcp_opts": [
+
+ ],
+ "fixed_ips": [
+ {
+ "subnet_id": "f68b9dd3-4cb5-46aa-96b1-f9c8a7abc3aa",
+ "ip_address": "172.16.4.2"
+ }
+ ],
+ "id": "16620a58-c48c-4195-b9c1-779a8ba2e6f8",
+ "id_path": "/" + ENV_CONFIG + "/" + ENV_CONFIG + "-projects/75c0eb79ff4a42b0ae4973c8375ddf40/75c0eb" +
+ "79ff4a42b0ae4973c8375ddf40-networks/b6fd5175-4b22-4256-9b1a-9fc4b9dce1fe/b6fd5175-4b22" +
+ "-4256-9b1a-9fc4b9dce1fe-ports/16620a58-c48c-4195-b9c1-779a8ba2e6f8",
+ "last_scanned": 0,
+ "mac_address": "fa:16:3e:d7:c5:16",
+ "name": "123",
+ "name_path": "/"+ENV_CONFIG+"/Projects/calipso-project/Networks/calipso-met4/Ports/fa:16:3e:d7:c5:16",
+ "network_id": "b6fd5175-4b22-4256-9b1a-9fc4b9dce1fe",
+ "object_name": "fa:16:3e:d7:c5:16",
+ "parent_id": "b6fd5175-4b22-4256-9b1a-9fc4b9dce1fe-ports",
+ "parent_text": "Ports",
+ "parent_type": "ports_folder",
+ "port_security_enabled": False,
+ "project": "calipso-project",
+ "security_groups": [
+
+ ],
+ "show_in_tree": True,
+ "status": "ACTIVE",
+ "tenant_id": "75c0eb79ff4a42b0ae4973c8375ddf40",
+ "type": "port"
+}
diff --git a/app/test/event_based_scan/test_data/event_payload_router_add.py b/app/test/event_based_scan/test_data/event_payload_router_add.py
new file mode 100644
index 0000000..153538d
--- /dev/null
+++ b/app/test/event_based_scan/test_data/event_payload_router_add.py
@@ -0,0 +1,176 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+import datetime
+
+from test.event_based_scan.config.test_config import ENV_CONFIG
+
+EVENT_PAYLOAD_ROUTER_ADD = {
+ '_context_show_deleted': False, '_context_domain': None,
+ '_context_user': '13baa553aae44adca6615e711fd2f6d9',
+ 'message_id': '05682485-9283-4cef-aae5-0bc1e86ed14d',
+ '_context_user_id': '13baa553aae44adca6615e711fd2f6d9', '_context_read_only': False,
+ '_context_user_domain': None, '_unique_id': '40f10fd246814669b61d906fd71be301',
+ '_context_auth_token': 'gAAAAABYE58-bIhpHKOyCNnav0czsBonPJbJJPxtkTFHT_gJ-sVPPO1xCldKOoJoJ58M5egmK0' +
+ 'tsCOiH9N6u-2h08rH84nrnE6YUoLJM_SWyJlbYDzH7rJyHYPBVE1aYkzMceiy7Jr33G4k6cGZQ' +
+ '7UzAaZRrGLxMMFddvNZa47dVPZsg1oJpdIVVcoaRHf4hPM8lj1qSn6WG',
+ 'event_type': 'router.create.end', '_context_tenant_id': '75c0eb79ff4a42b0ae4973c8375ddf40',
+ 'priority': 'INFO', '_context_roles': ['_member_', 'admin'],
+ '_context_project_name': 'calipso-project',
+ '_context_project_id': '75c0eb79ff4a42b0ae4973c8375ddf40',
+ '_context_request_id': 'req-a543a2e4-3160-4e98-b1b8-21a876fff205',
+ '_context_tenant': '75c0eb79ff4a42b0ae4973c8375ddf40',
+ 'timestamp': '2016-10-28 19:00:36.600958',
+ '_context_user_identity': '13baa553aae44adca6615e711fd2f6d9 75c0eb79ff4a42b0ae4973c8375ddf40 - - -',
+ '_context_tenant_name': 'calipso-project', 'payload': {
+ 'router': {'name': 'test-router-add',
+ 'external_gateway_info': {
+ 'enable_snat': True,
+ 'external_fixed_ips': [{
+ 'ip_address': '172.16.0.137',
+ 'subnet_id': 'a5336853-cbc0-49e8-8401-a093e8bab7bb'}],
+ 'network_id': 'c64adb76-ad9d-4605-9f5e-123456781234'},
+ 'admin_state_up': True,
+ 'distributed': False,
+ 'routes': [], 'ha': False,
+ 'id': 'c485d5f4-dfec-430f-8ad8-409c7034b46d',
+ 'status': 'ACTIVE',
+ 'tenant_id': '75c0eb79ff4a42b0ae4973c8375ddf40'}},
+ '_context_timestamp': '2016-10-28 19:00:34.395521', '_context_project_domain': None,
+ 'publisher_id': 'network.node-250.cisco.com', '_context_is_admin': True,
+ '_context_user_name': 'admin', '_context_resource_uuid': None}
+
+ROUTER_DOCUMENT = {'host': 'node-250.cisco.com', 'service_type': 'router', 'name': 'router-test-router-add',
+ 'id': 'node-250.cisco.com-qrouter-c485d5f4-dfec-430f-8ad8-409c7034b46d',
+ 'local_service_id': 'node-250.cisco.com-qrouter-c485d5f4-dfec-430f-8ad8-409c7034b46d',
+ 'tenant_id': '75c0eb79ff4a42b0ae4973c8375ddf40', 'status': 'ACTIVE',
+ 'master_parent_type': 'vservices_folder',
+ 'admin_state_up': 1, 'parent_type': 'vservice_routers_folder', 'enable_snat': 1,
+ 'parent_text': 'Gateways',
+ 'gw_port_id': 'e2f31c24-d0f9-499e-a8b1-883941543aa4',
+ 'master_parent_id': 'node-250.cisco.com-vservices',
+ 'parent_id': 'node-250.cisco.com-vservices-routers'}
+
+HOST_DOC = {
+ "config": {
+ "gateway_external_network_id": "",
+ "router_id": "",
+ "handle_internal_only_routers": True,
+ "agent_mode": "legacy",
+ "ex_gw_ports": 4,
+ "floating_ips": 1,
+ "external_network_bridge": "",
+ "interfaces": 1,
+ "log_agent_heartbeats": False,
+ "use_namespaces": True,
+ "interface_driver": "neutron.agent.linux.interface.OVSInterfaceDriver",
+ "routers": 4
+ },
+ "environment": ENV_CONFIG,
+ "host": "node-250.cisco.com",
+ "host_type": [
+ "Controller",
+ "Network"
+ ],
+ "id": "node-250.cisco.com",
+ "id_path": "/" + ENV_CONFIG + "/" + ENV_CONFIG + "-regions/RegionOne/RegionOne-availability_zones" +
+ "/internal/node-250.cisco.com",
+ "last_scanned": datetime.datetime.utcnow(),
+ "name": "node-250.cisco.com",
+ "name_path": "/" + ENV_CONFIG + "/Regions/RegionOne/Availability Zones/internal/node-250.cisco.com",
+ "object_name": "node-250.cisco.com",
+ "parent_id": "internal",
+ "parent_type": "availability_zone",
+ "services": {
+ "nova-scheduler": {
+ "active": True,
+ "available": True,
+ "updated_at": "2016-11-02T21:19:47.000000"
+ },
+ "nova-consoleauth": {
+ "active": True,
+ "available": True,
+ "updated_at": "2016-11-02T21:19:48.000000"
+ },
+ "nova-cert": {
+ "active": True,
+ "available": True,
+ "updated_at": "2016-11-02T21:19:41.000000"
+ },
+ "nova-conductor": {
+ "active": True,
+ "available": True,
+ "updated_at": "2016-11-02T21:19:52.000000"
+ }
+ },
+ "show_in_tree": True,
+ "type": "host",
+ "zone": "internal"
+}
+
+NETWORK_DOC = {
+ "admin_state_up": True,
+ "cidrs": [
+ "172.16.0.0/24"
+ ],
+ "environment": ENV_CONFIG,
+ "id": "c64adb76-ad9d-4605-9f5e-123456781234",
+ "id_path": "/" + ENV_CONFIG + "/" + ENV_CONFIG + "-projects/8c1751e0ce714736a63fee3c776164da/8c1751e0ce71" +
+ "4736a63fee3c776164da-networks/c64adb76-ad9d-4605-9f5e-123456781234",
+ "last_scanned": datetime.datetime.utcnow(),
+ "mtu": 1500,
+ "name": "admin_floating_net",
+ "name_path": "/" + ENV_CONFIG + "/Projects/admin/Networks/admin_floating_net",
+ "network": "c64adb76-ad9d-4605-9f5e-123456781234",
+ "object_name": "admin_floating_net",
+ "parent_id": "8c1751e0ce714736a63fee3c776164da-networks",
+ "parent_text": "Networks",
+ "parent_type": "networks_folder",
+ "port_security_enabled": True,
+ "project": "admin",
+ "provider:network_type": "flat",
+ "provider:physical_network": "physnet1",
+ "provider:segmentation_id": None,
+ "router:external": True,
+ "shared": False,
+ "show_in_tree": True,
+ "status": "ACTIVE",
+ "subnets": {
+ "admin_floating_net__subnet": {
+ "allocation_pools": [
+ {
+ "end": "172.16.0.254",
+ "start": "172.16.0.130"
+ }
+ ],
+ "id": "a5336853-cbc0-49e8-8401-a093e8bab7bb",
+ "network_id": "c64adb76-ad9d-4605-9f5e-123456781234",
+ "ipv6_ra_mode": None,
+ "ipv6_address_mode": None,
+ "ip_version": 4,
+ "tenant_id": "8c1751e0ce714736a63fee3c776164da",
+ "cidr": "172.16.0.0/24",
+ "dns_nameservers": [
+
+ ],
+ "name": "admin_floating_net__subnet",
+ "subnetpool_id": None,
+ "gateway_ip": "172.16.0.1",
+ "host_routes": [
+
+ ],
+ "enable_dhcp": False,
+ }
+ },
+ "subnets_id": [
+ "a5336853-cbc0-49e8-8401-a093e8bab7bb"
+ ],
+ "tenant_id": "8c1751e0ce714736a63fee3c776164da",
+ "type": "network"
+}
diff --git a/app/test/event_based_scan/test_data/event_payload_router_delete.py b/app/test/event_based_scan/test_data/event_payload_router_delete.py
new file mode 100644
index 0000000..8ab8cc3
--- /dev/null
+++ b/app/test/event_based_scan/test_data/event_payload_router_delete.py
@@ -0,0 +1,59 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+from test.event_based_scan.config.test_config import ENV_CONFIG
+
+EVENT_PAYLOAD_ROUTER_DELETE = {
+ '_context_request_id': 'req-8b2dd9ba-5faa-4471-94c3-fb41781eef8d', '_unique_id': 'c7417f771ee74bb19036b06e685c93dc',
+ '_context_user_id': '13baa553aae44adca6615e711fd2f6d9', '_context_user': '13baa553aae44adca6615e711fd2f6d9',
+ '_context_tenant': '75c0eb79ff4a42b0ae4973c8375ddf40',
+ '_context_auth_token': 'gAAAAABYE7T7789XjB_Nir9PykWTIpDNI0VhgtVQJNyGVImHnug2AVRX9e2JDcXe8F73eNmFepASsoCfqvZet9qN' +
+ '38vrX6GqzL89Quf6pQyLxgRorMv6RlScSCDBQzE8Hj5szSYi_a7F_O2Lr77omUiLi2R_Ludt25mcMiuaMgPknJ2b' +
+ 'joAyV_-eE_8CrSbdJ5Dk1MaCSq5K',
+ '_context_user_name': 'admin',
+ '_context_user_identity': '13baa553aae44adca6615e711fd2f6d9 75c0eb79ff4a42b0ae4973c8375ddf40 - - -',
+ '_context_timestamp': '2016-10-28 20:31:27.902723', 'message_id': '569118ad-1f5b-4a50-96ec-f160ebbb1b34',
+ 'payload': {'router_id': 'bde87a5a-7968-4f3b-952c-e87681a96078'}, '_context_resource_uuid': None,
+ 'event_type': 'router.delete.end', '_context_project_name': 'calipso-project', 'priority': 'INFO',
+ '_context_tenant_id': '75c0eb79ff4a42b0ae4973c8375ddf40', '_context_roles': ['_member_', 'admin'],
+ '_context_project_domain': None, '_context_user_domain': None, '_context_read_only': False,
+ '_context_is_admin': True, '_context_project_id': '75c0eb79ff4a42b0ae4973c8375ddf40', '_context_domain': None,
+ '_context_show_deleted': False, '_context_tenant_name': 'calipso-project', 'publisher_id': 'network.node-6.cisco.com',
+ 'timestamp': '2016-10-28 20:31:37.012032'}
+
+ROUTER_DOCUMENT = {
+ "admin_state_up": True,
+ "enable_snat": 1,
+ "environment": ENV_CONFIG,
+ "gw_port_id": None,
+ "host": "node-6.cisco.com",
+ "id": "node-6.cisco.com-qrouter-bde87a5a-7968-4f3b-952c-e87681a96078",
+ "id_path": "/" + ENV_CONFIG + "/" + ENV_CONFIG + "-regions/RegionOne/RegionOne-availability_zones/internal" +
+ "/node-6.cisco.com/node-6.cisco.com-vservices/node-6.cisco.com-vservices-routers/qrouter-bde87a5a" +
+ "-7968-4f3b-952c-e87681a96078",
+ "last_scanned": 0,
+ "local_service_id": "node-6.cisco.com-qrouter-bde87a5a-7968-4f3b-952c-e87681a96078",
+ "master_parent_id": "node-6.cisco.com-vservices",
+ "master_parent_type": "vservices_folder",
+ "name": "1234",
+ "name_path": "/" + ENV_CONFIG + "/Regions/RegionOne/Availability Zones/internal/node-6.cisco.com/" +
+ "Vservices/Gateways/router-1234",
+ "network": [
+ "c64adb76-ad9d-4605-9f5e-bd6dbe325cfb"
+ ],
+ "object_name": "router-1234",
+ "parent_id": "node-6.cisco.com-vservices-routers",
+ "parent_text": "Gateways",
+ "parent_type": "vservice_routers_folder",
+ "service_type": "router",
+ "show_in_tree": True,
+ "status": "ACTIVE",
+ "tenant_id": "75c0eb79ff4a42b0ae4973c8375ddf40",
+ "type": "vservice"
+}
diff --git a/app/test/event_based_scan/test_data/event_payload_router_update.py b/app/test/event_based_scan/test_data/event_payload_router_update.py
new file mode 100644
index 0000000..b0a917e
--- /dev/null
+++ b/app/test/event_based_scan/test_data/event_payload_router_update.py
@@ -0,0 +1,271 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+from test.event_based_scan.config.test_config import ENV_CONFIG
+
+EVENT_PAYLOAD_ROUTER_UPDATE = {
+ '_context_request_id': 'req-da45908c-0765-4f8a-9fac-79246901de41', '_unique_id': '80723cc09a4748c6b13214dcb867719e',
+ '_context_user_id': '13baa553aae44adca6615e711fd2f6d9', '_context_user': '13baa553aae44adca6615e711fd2f6d9',
+ '_context_tenant': '75c0eb79ff4a42b0ae4973c8375ddf40',
+ '_context_auth_token': 'gAAAAABYE7T7789XjB_Nir9PykWTIpDNI0VhgtVQJNyGVImHnug2AVRX9e2JDcXe8F73eNmFepASsoCfqvZet9q' +
+ 'N38vrX6GqzL89Quf6pQyLxgRorMv6RlScSCDBQzE8Hj5szSYi_a7F_O2Lr77omUiLi2R_Ludt25mcMiuaMgPkn' +
+ 'J2bjoAyV_-eE_8CrSbdJ5Dk1MaCSq5K',
+ '_context_user_name': 'admin',
+ '_context_user_identity': '13baa553aae44adca6615e711fd2f6d9 75c0eb79ff4a42b0ae4973c8375ddf40 - - -',
+ '_context_timestamp': '2016-10-28 20:29:35.548123', 'message_id': '42c0ca64-cea1-4c89-a059-72abf7990c40',
+ 'payload': {
+ 'router': {'id': 'bde87a5a-7968-4f3b-952c-e87681a96078', 'tenant_id': '75c0eb79ff4a42b0ae4973c8375ddf40',
+ 'ha': False, 'distributed': False, 'name': 'abc', 'status': 'ACTIVE', 'external_gateway_info': None,
+ 'admin_state_up': True, 'routes': []}}, '_context_resource_uuid': None,
+ 'event_type': 'router.update.end', '_context_project_name': 'calipso-project', 'priority': 'INFO',
+ '_context_tenant_id': '75c0eb79ff4a42b0ae4973c8375ddf40', '_context_roles': ['_member_', 'admin'],
+ '_context_project_domain': None, '_context_user_domain': None, '_context_read_only': False,
+ '_context_is_admin': True, '_context_project_id': '75c0eb79ff4a42b0ae4973c8375ddf40', '_context_domain': None,
+ '_context_show_deleted': False, '_context_tenant_name': 'calipso-project', 'publisher_id': 'network.node-250.cisco.com',
+ 'timestamp': '2016-10-28 20:29:39.986161'}
+
+ROUTER_VSERVICE = {'host': 'node-250.cisco.com', 'service_type': 'router', 'name': '1234',
+ 'id': 'node-250.cisco.com-qrouter-bde87a5a-7968-4f3b-952c-e87681a96078',
+ 'local_service_id': 'node-250.cisco.com-qrouter-bde87a5a-7968-4f3b-952c-e87681a96078',
+ 'tenant_id': '75c0eb79ff4a42b0ae4973c8375ddf40', 'status': 'ACTIVE',
+ 'master_parent_type': 'vservices_folder',
+ 'admin_state_up': 1, 'parent_type': 'vservice_routers_folder', 'enable_snat': 1,
+ 'parent_text': 'Gateways',
+ 'gw_port_id': 'e2f31c24-d0f9-499e-a8b1-883941543aa4',
+ 'master_parent_id': 'node-250.cisco.com-vservices',
+ 'parent_id': 'node-250.cisco.com-vservices-routers'}
+
+ROUTER_DOCUMENT = {
+ "admin_state_up": True,
+ "enable_snat": 1,
+ "environment": ENV_CONFIG,
+ "gw_port_id": "e2f31c24-d0f9-499e-a8b1-883941543aa4",
+ "host": "node-250.cisco.com",
+ "id": "node-250.cisco.com-qrouter-bde87a5a-7968-4f3b-952c-e87681a96078",
+ "id_path": "/" + ENV_CONFIG + "/" + ENV_CONFIG + "-regions/RegionOne/RegionOne-availability_zones/internal" +
+ "/node-250.cisco.com/node-250.cisco.com-vservices/node-250.cisco.com-vservices-routers/qrouter-bde87a5a" +
+ "-7968-4f3b-952c-e87681a96078",
+ "last_scanned": 0,
+ "local_service_id": "node-250.cisco.com-qrouter-bde87a5a-7968-4f3b-952c-e87681a96078",
+ "master_parent_id": "node-250.cisco.com-vservices",
+ "master_parent_type": "vservices_folder",
+ "name": "1234",
+ "name_path": "/" + ENV_CONFIG + "/Regions/RegionOne/Availability Zones/internal/node-250.cisco.com/" +
+ "Vservices/Gateways/router-1234",
+ "network": [
+ "a55ff1e8-3821-4e5f-bcfd-07df93720a4f"
+ ],
+ "object_name": "router-1234",
+ "parent_id": "node-250.cisco.com-vservices-routers",
+ "parent_text": "Gateways",
+ "parent_type": "vservice_routers_folder",
+ "service_type": "router",
+ "show_in_tree": True,
+ "status": "ACTIVE",
+ "tenant_id": "75c0eb79ff4a42b0ae4973c8375ddf40",
+ "type": "vservice"
+}
+
+EVENT_PAYLOAD_ROUTER_SET_GATEWAY = {
+ 'publisher_id': 'network.node-250.cisco.com',
+ '_context_request_id': 'req-79d53b65-47b8-46b2-9a72-3f4031e2d605',
+ '_context_project_name': 'calipso-project', '_context_show_deleted': False,
+ '_context_user_name': 'admin', '_context_timestamp': '2016-11-02 21:44:31.156447',
+ '_context_user': '13baa553aae44adca6615e711fd2f6d9', 'payload': {
+ 'router': {'id': 'bde87a5a-7968-4f3b-952c-e87681a96078', 'admin_state_up': True, 'routes': [],
+ 'status': 'ACTIVE', 'ha': False, 'name': 'test_namespace', 'distributed': False,
+ 'tenant_id': '75c0eb79ff4a42b0ae4973c8375ddf40', 'external_gateway_info': {'external_fixed_ips': [
+ {'ip_address': '172.16.0.144', 'subnet_id': 'a5336853-cbc0-49e8-8401-a093e8bab7bb'}],
+ 'network_id': 'a55ff1e8-3821-4e5f-bcfd-07df93720a4f',
+ 'enable_snat': True}}},
+ '_context_user_id': '13baa553aae44adca6615e711fd2f6d9', '_context_read_only': False,
+ '_context_auth_token': 'gAAAAABYGlU6mEqntx5E9Nss203DIKH352JKSZP0RsJrAJQ_PfjyZEAzYcFvMh4FYVRDRWLvu0cSDsvUk1ILu' +
+ 'nHkpNF28pwcvkBgVModV2Xd2_BW2QbBa2csCOXYiN0LE2uOo3BkrLDEcblvJVT0XTJdDhrBldfyCH0_xSfJ7_' +
+ 'wzdy8bB34HwHq2w0S3Okp8Tk_Zx_-xpIqB',
+ 'priority': 'INFO', 'timestamp': '2016-11-02 21:44:35.627776',
+ '_context_roles': ['_member_', 'admin'], '_context_resource_uuid': None,
+ '_context_user_domain': None, '_context_project_domain': None,
+ '_context_tenant_id': '75c0eb79ff4a42b0ae4973c8375ddf40',
+ '_context_user_identity': '13baa553aae44adca6615e711fd2f6d9 75c0eb79ff4a42b0ae4973c8375ddf40 - - -',
+ 'message_id': '71889925-14ce-40c3-a3dc-f26731b10b26',
+ 'event_type': 'router.update.end',
+ '_context_tenant': '75c0eb79ff4a42b0ae4973c8375ddf40',
+ '_unique_id': '9e6ab72c5901451f81748e0aa654ae25',
+ '_context_tenant_name': 'calipso-project', '_context_is_admin': True,
+ '_context_project_id': '75c0eb79ff4a42b0ae4973c8375ddf40', '_context_domain': None}
+
+EVENT_PAYLOAD_ROUTER_DEL_GATEWAY = {
+ '_context_show_deleted': False, '_context_timestamp': '2016-11-03 18:48:40.420170', '_context_read_only': False,
+ 'publisher_id': 'network.node-250.cisco.com',
+ '_context_auth_token': 'gAAAAABYG4UUGbe9bykUJUPY0lKye578aF0RrMCc7nA21eLbhpwcsh5pWWqz6hnOi7suUCUtr1DPTbqF1M8CVJ' +
+ '9FT2EevbqiahcyphrV2VbmP5_tebOcIHIPJ_f_K3KYJM1C6zgcWgdf9KFu_8t_G99wd1MwWBrZyUUElXgSNv48' +
+ 'W4uaCKcbYclnZW78lgXVik5x6WLT_j5V',
+ '_context_user_name': 'admin',
+ '_context_user_identity': '13baa553aae44adca6615e711fd2f6d9 75c0eb79ff4a42b0ae4973c8375ddf40 - - -',
+ '_context_tenant_id': '75c0eb79ff4a42b0ae4973c8375ddf40', '_context_tenant': '75c0eb79ff4a42b0ae4973c8375ddf40',
+ '_context_user_id': '13baa553aae44adca6615e711fd2f6d9', '_unique_id': '266f2bb0ab2c4a328ae0759d01b0035b',
+ 'timestamp': '2016-11-03 18:48:41.634214', '_context_roles': ['_member_', 'admin'],
+ 'event_type': 'router.update.end',
+ '_context_user_domain': None, '_context_user': '13baa553aae44adca6615e711fd2f6d9', '_context_is_admin': True,
+ '_context_tenant_name': 'calipso-project', '_context_project_domain': None, '_context_domain': None,
+ 'priority': 'INFO',
+ '_context_project_id': '75c0eb79ff4a42b0ae4973c8375ddf40', 'message_id': '5272cd90-7151-4d13-8c1f-e8ff2db773a1',
+ '_context_project_name': 'calipso-project', '_context_resource_uuid': None, 'payload': {
+ 'router': {'id': 'bde87a5a-7968-4f3b-952c-e87681a96078', 'external_gateway_info': None, 'distributed': False,
+ 'name': 'TEST_AAA', 'routes': [], 'ha': False, 'admin_state_up': True,
+ 'tenant_id': '75c0eb79ff4a42b0ae4973c8375ddf40', 'status': 'ACTIVE'}},
+ '_context_request_id': 'req-d7e73189-4709-4234-8b4c-fb6b4dc2017b'}
+
+PORTS = {
+ "admin_state_up": True,
+ "allowed_address_pairs": [
+
+ ],
+ "binding:host_id": "node-250.cisco.com",
+ "binding:profile": {
+
+ },
+ "binding:vif_details": {
+ "port_filter": True,
+ "ovs_hybrid_plug": True
+ },
+ "binding:vif_type": "ovs",
+ "binding:vnic_type": "normal",
+ "device_id": "9ec3d703-0725-47e3-8f48-02b16236caf9",
+ "device_owner": "network:router_interface",
+ "dns_assignment": [
+ {
+ "hostname": "host-172-16-1-1",
+ "fqdn": "host-172-16-1-1.openstacklocal.",
+ "ip_address": "172.16.1.1"
+ }
+ ],
+ "dns_name": "",
+ "environment": ENV_CONFIG,
+ "extra_dhcp_opts": [
+
+ ],
+ "fixed_ips": [
+ {
+ "subnet_id": "c1287696-224b-4a72-9f1d-d45176671bce",
+ "ip_address": "172.16.1.1"
+ }
+ ],
+ "id": "e2f31c24-d0f9-499e-a8b1-883941543aa4",
+ "id_path": "/" + ENV_CONFIG + "/" + ENV_CONFIG + "-projects/75c0eb79ff4a42b0ae4973c8375ddf40/75c0eb79ff4a42b" +
+ "0ae4973c8375ddf40-networks/a55ff1e8-3821-4e5f-bcfd-07df93720a4f/a55ff1e8-3821-4e5f-bcfd-07df93720a4" +
+ "f-ports/e2f31c24-d0f9-499e-a8b1-883941543aa4",
+ "last_scanned": 0,
+ "mac_address": "fa:16:3e:ee:9a:46",
+ "name": "fa:16:3e:ee:9a:46",
+ "name_path": "/" + ENV_CONFIG + "/Projects/calipso-project/Networks/calipso-net2/Ports/fa:16:3e:ee:9a:46",
+ "network_id": "a55ff1e8-3821-4e5f-bcfd-07df93720a4f",
+ "object_name": "fa:16:3e:ee:9a:46",
+ "parent_id": "a55ff1e8-3821-4e5f-bcfd-07df93720a4f-ports",
+ "parent_text": "Ports",
+ "parent_type": "ports_folder",
+ "port_security_enabled": False,
+ "project": "calipso-project",
+ "security_groups": [
+
+ ],
+ "show_in_tree": True,
+ "status": "ACTIVE",
+ "tenant_id": "75c0eb79ff4a42b0ae4973c8375ddf40",
+ "type": "port"
+}
+
+NETWORK_DOC = {
+ "admin_state_up": True,
+ "cidrs": [
+ "172.16.4.0/24"
+ ],
+ "environment": ENV_CONFIG,
+ "id": "a55ff1e8-3821-4e5f-bcfd-07df93720a4f",
+ "id_path": "/" + ENV_CONFIG + "/" + ENV_CONFIG + "-projects/75c0eb79ff4a42b0ae4973c8375ddf40/75c0eb79ff4a42b" +
+ "0ae4973c8375ddf40-networks/a55ff1e8-3821-4e5f-bcfd-07df93720a4f",
+ "last_scanned": 0,
+ "mtu": 1400,
+ "name": "calipso-net2",
+ "name_path": "/" + ENV_CONFIG + "/Projects/calipso-project/Networks/calipso-net2",
+ "network": "a55ff1e8-3821-4e5f-bcfd-07df93720a4f",
+ "object_name": "calipso-net2",
+ "parent_id": "75c0eb79ff4a42b0ae4973c8375ddf40-networks",
+ "parent_text": "Networks",
+ "parent_type": "networks_folder",
+ "port_security_enabled": True,
+ "project": "calipso-project",
+ "provider:network_type": "vxlan",
+ "provider:physical_network": None,
+ "provider:segmentation_id": 0,
+ "router:external": False,
+ "shared": False,
+ "show_in_tree": True,
+ "status": "ACTIVE",
+ "subnets": {},
+ "tenant_id": "75c0eb79ff4a42b0ae4973c8375ddf40",
+ "type": "network"
+}
+
+HOST_DOC = {
+ "config": {
+ "gateway_external_network_id": "",
+ "router_id": "",
+ "handle_internal_only_routers": True,
+ "agent_mode": "legacy",
+ "ex_gw_ports": 4,
+ "floating_ips": 1,
+ "external_network_bridge": "",
+ "interfaces": 1,
+ "log_agent_heartbeats": False,
+ "use_namespaces": True,
+ "interface_driver": "neutron.agent.linux.interface.OVSInterfaceDriver",
+ "routers": 4
+ },
+ "environment": ENV_CONFIG,
+ "host": "node-250.cisco.com",
+ "host_type": [
+ "Controller",
+ "Network"
+ ],
+ "id": "node-250.cisco.com",
+ "id_path": "/" + ENV_CONFIG + "/" + ENV_CONFIG + "-regions/RegionOne/RegionOne-availability_zones" +
+ "/internal/node-250.cisco.com",
+ "last_scanned": 0,
+ "name": "node-250.cisco.com",
+ "name_path": "/" + ENV_CONFIG + "/Regions/RegionOne/Availability Zones/internal/node-250.cisco.com",
+ "object_name": "node-250.cisco.com",
+ "parent_id": "internal",
+ "parent_type": "availability_zone",
+ "services": {
+ "nova-scheduler": {
+ "active": True,
+ "available": True,
+ "updated_at": "2016-11-02T21:19:47.000000"
+ },
+ "nova-consoleauth": {
+ "active": True,
+ "available": True,
+ "updated_at": "2016-11-02T21:19:48.000000"
+ },
+ "nova-cert": {
+ "active": True,
+ "available": True,
+ "updated_at": "2016-11-02T21:19:41.000000"
+ },
+ "nova-conductor": {
+ "active": True,
+ "available": True,
+ "updated_at": "2016-11-02T21:19:52.000000"
+ }
+ },
+ "show_in_tree": True,
+ "type": "host",
+ "zone": "internal"
+} \ No newline at end of file
diff --git a/app/test/event_based_scan/test_data/event_payload_subnet_add.py b/app/test/event_based_scan/test_data/event_payload_subnet_add.py
new file mode 100644
index 0000000..7167f4c
--- /dev/null
+++ b/app/test/event_based_scan/test_data/event_payload_subnet_add.py
@@ -0,0 +1,124 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+import datetime
+
+from test.event_based_scan.config.test_config import ENV_CONFIG
+
+NETWORK_DOC = {'port_security_enabled': True, 'status': 'ACTIVE', 'subnet_ids': [], 'parent_type': 'networks_folder',
+ 'parent_id': '75c0eb79ff4a42b0ae4973c8375ddf40-networks', 'parent_text': 'Networks', 'subnets': {},
+ 'admin_state_up': True, 'show_in_tree': True, 'project': 'calipso-project',
+ 'name_path': '/' + ENV_CONFIG + '/Projects/calipso-project/Networks/testsubnetadd',
+ 'router:external': False,
+ 'provider:physical_network': None,
+ 'id_path': '/' + ENV_CONFIG + '/' + ENV_CONFIG + '-projects/75c0eb79ff4a42b0ae4973c8375ddf40/75c0eb79ff4a42b0' +
+ 'ae4973c8375ddf40-networks/1bb0ba6c-6863-4121-ac89-93f81a9da2b0',
+ 'object_name': 'testsubnetadd', 'provider:segmentation_id': 46, 'provider:network_type': 'vxlan',
+ 'tenant_id': '75c0eb79ff4a42b0ae4973c8375ddf40', 'environment': ENV_CONFIG, 'name': 'testsubnetadd',
+ 'last_scanned': '2016-10-13 00:20:59.280329', 'id': '1bb0ba6c-6863-4121-ac89-93f81a9da2b0', 'cidrs': [],
+ 'type': 'network', 'network': '1bb0ba6c-6863-4121-ac89-93f81a9da2b0', 'shared': False, 'mtu': 1400}
+
+EVENT_PAYLOAD_SUBNET_ADD = {
+ 'payload': {
+ 'subnet': {'dns_nameservers': [], 'ipv6_address_mode': None, 'ipv6_ra_mode': None, 'gateway_ip': '172.16.10.1',
+ 'allocation_pools': [{'start': '172.16.10.2', 'end': '172.16.10.126'}], 'enable_dhcp': True,
+ 'id': 'e950055d-231c-4380-983c-a258ea958d58', 'network_id': '1bb0ba6c-6863-4121-ac89-93f81a9da2b0',
+ 'tenant_id': '75c0eb79ff4a42b0ae4973c8375ddf40', 'ip_version': 4, 'cidr': '172.16.10.0/25',
+ 'subnetpool_id': None, 'name': 'testsubnetadd', 'host_routes': []}}, '_context_domain': None,
+ 'timestamp': '2016-10-13 00:20:59.776358', '_context_project_domain': None, '_context_user_domain': None,
+ '_context_project_id': '75c0eb79ff4a42b0ae4973c8375ddf40', 'publisher_id': 'network.node-6.cisco.com',
+ '_context_user': '13baa553aae44adca6615e711fd2f6d9', '_context_user_id': '13baa553aae44adca6615e711fd2f6d9',
+ 'event_type': 'subnet.create.end', 'message_id': '90581321-e9c9-4112-8fe6-38ebf57d5b6b',
+ '_context_tenant': '75c0eb79ff4a42b0ae4973c8375ddf40', '_context_tenant_name': 'calipso-project',
+ '_context_project_name': 'calipso-project', '_context_user_name': 'admin', '_context_resource_uuid': None,
+ '_unique_id': 'e8b328229a724938a6bc63f9db737f49', '_context_request_id': 'req-20cfc138-4e1a-472d-b996-7f27ac58446d',
+ 'priority': 'INFO', '_context_roles': ['_member_', 'admin'],
+ '_context_auth_token': 'gAAAAABX_tLMEzC9KhdcD20novcuvgwmpQkwV9hOk86d4AZlsQwXSRbCwBZgUPQZco4VsuCg59_gFeM_scBVmI' +
+ 'dDysNUrAhZctDzXneM0cb5nBtjJTfJPpI2_kKgAuGDBARrHZpNs-vPg-SjMtu87w2rgTKfda6idTMKWG3ipe' +
+ '-jXrgNN7p-2kkJzGhZXbMaaeBs3XU-X_ew',
+ '_context_read_only': False,
+ '_context_user_identity': '13baa553aae44adca6615e711fd2f6d9 75c0eb79ff4a42b0ae4973c8375ddf40 - - -',
+ '_context_tenant_id': '75c0eb79ff4a42b0ae4973c8375ddf40', '_context_is_admin': True, '_context_show_deleted': False,
+ '_context_timestamp': '2016-10-13 00:20:59.307917'}
+
+EVENT_PAYLOAD_REGION = {
+ 'RegionOne': {
+ 'object_name': 'RegionOne', 'id': 'RegionOne', 'name': 'RegionOne',
+ 'environment': ENV_CONFIG,
+ 'last_scanned': datetime.datetime.utcnow(),
+ 'name_path': '/' + ENV_CONFIG + '/Regions/RegionOne',
+ 'parent_id': ENV_CONFIG + '-regions', 'parent_type': 'regions_folder',
+ 'endpoints': {'nova': {'id': '274cbbd9fd6d4311b78e78dd3a1df51f',
+ 'adminURL': 'http://192.168.0.2:8774/v2/8c1751e0ce714736a63fee3c776164da',
+ 'service_type': 'compute',
+ 'publicURL': 'http://172.16.0.3:8774/v2/8c1751e0ce714736a63fee3c776164da',
+ 'internalURL': 'http://192.168.0.2:8774/v2/8c1751e0ce714736a63fee3c776164da'},
+ 'heat-cfn': {'id': '0f04ec6ed49f4940822161bf677bdfb2',
+ 'adminURL': 'http://192.168.0.2:8000/v1',
+ 'service_type': 'cloudformation',
+ 'publicURL': 'http://172.16.0.3:8000/v1',
+ 'internalURL': 'http://192.168.0.2:8000/v1'},
+ 'nova_ec2': {'id': '390dddc753cc4d378b489129d06c4b7d',
+ 'adminURL': 'http://192.168.0.2:8773/services/Admin',
+ 'service_type': 'ec2',
+ 'publicURL': 'http://172.16.0.3:8773/services/Cloud',
+ 'internalURL': 'http://192.168.0.2:8773/services/Cloud'},
+ 'glance': {'id': '475c6c77a94e4e63a5a0f0e767f697a8',
+ 'adminURL': 'http://192.168.0.2:9292',
+ 'service_type': 'image',
+ 'publicURL': 'http://172.16.0.3:9292',
+ 'internalURL': 'http://192.168.0.2:9292'},
+ 'swift': {'id': '12e78e06595f48339baebdb5d4309c70',
+ 'adminURL': 'http://192.168.0.2:8080/v1/AUTH_8c1751e0ce714736a63fee3c776164da',
+ 'service_type': 'object-store',
+ 'publicURL': 'http://172.16.0.3:8080/v1/AUTH_8c1751e0ce714736a63fee3c776164da',
+ 'internalURL': 'http://192.168.0.2:8080/v1/AUTH_8c1751e0ce714736a63fee3c776164da'},
+ 'swift_s3': {'id': '4f655c8f2bef46a0a7ba4a20bba53666',
+ 'adminURL': 'http://192.168.0.2:8080',
+ 'service_type': 's3',
+ 'publicURL': 'http://172.16.0.3:8080',
+ 'internalURL': 'http://192.168.0.2:8080'},
+ 'keystone': {'id': '404cceb349614eb39857742970408301',
+ 'adminURL': 'http://192.168.0.2:35357/v2.0',
+ 'service_type': 'identity',
+ 'publicURL': 'http://172.16.0.3:5000/v2.0',
+ 'internalURL': 'http://192.168.0.2:5000/v2.0'},
+ 'cinderv2': {'id': '2c30937688e944889db4a64fab6816e6',
+ 'adminURL': 'http://192.168.0.2:8776/v2/8c1751e0ce714736a63fee3c776164da',
+ 'service_type': 'volumev2',
+ 'publicURL': 'http://172.16.0.3:8776/v2/8c1751e0ce714736a63fee3c776164da',
+ 'internalURL': 'http://192.168.0.2:8776/v2/8c1751e0ce714736a63fee3c776164da'},
+ 'novav3': {'id': '1df917160dfb4ce5b469764fde22b3ab',
+ 'adminURL': 'http://192.168.0.2:8774/v3',
+ 'service_type': 'computev3',
+ 'publicURL': 'http://172.16.0.3:8774/v3',
+ 'internalURL': 'http://192.168.0.2:8774/v3'},
+ 'ceilometer': {'id': '617177a3dcb64560a5a79ab0a91a7225',
+ 'adminURL': 'http://192.168.0.2:8777',
+ 'service_type': 'metering',
+ 'publicURL': 'http://172.16.0.3:8777',
+ 'internalURL': 'http://192.168.0.2:8777'},
+ 'neutron': {'id': '8dc28584da224c4b9671171ead3c982a',
+ 'adminURL': 'http://192.168.0.2:9696',
+ 'service_type': 'network',
+ 'publicURL': 'http://172.16.0.3:9696',
+ 'internalURL': 'http://192.168.0.2:9696'},
+ 'cinder': {'id': '05643f2cf9094265b432376571851841',
+ 'adminURL': 'http://192.168.0.2:8776/v1/8c1751e0ce714736a63fee3c776164da',
+ 'service_type': 'volume',
+ 'publicURL': 'http://172.16.0.3:8776/v1/8c1751e0ce714736a63fee3c776164da',
+ 'internalURL': 'http://192.168.0.2:8776/v1/8c1751e0ce714736a63fee3c776164da'},
+ 'heat': {'id': '9e60268a5aaf422d9e42f0caab0a19b4',
+ 'adminURL': 'http://192.168.0.2:8004/v1/8c1751e0ce714736a63fee3c776164da',
+ 'service_type': 'orchestration',
+ 'publicURL': 'http://172.16.0.3:8004/v1/8c1751e0ce714736a63fee3c776164da',
+ 'internalURL': 'http://192.168.0.2:8004/v1/8c1751e0ce714736a63fee3c776164da'}},
+ 'show_in_tree': True,
+ 'id_path': '/' + ENV_CONFIG + '/' + ENV_CONFIG + '-regions/RegionOne',
+ 'type': 'region'}}
diff --git a/app/test/event_based_scan/test_data/event_payload_subnet_delete.py b/app/test/event_based_scan/test_data/event_payload_subnet_delete.py
new file mode 100644
index 0000000..55a785d
--- /dev/null
+++ b/app/test/event_based_scan/test_data/event_payload_subnet_delete.py
@@ -0,0 +1,95 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+from test.event_based_scan.config.test_config import ENV_CONFIG
+
+EVENT_PAYLOAD_SUBNET_DELETE = {
+ 'payload': {'subnet_id': '88442b4a-e62d-4d72-9d18-b8d6973eb3da'},
+ '_context_auth_token': 'gAAAAABYGRxBKUKuNjrN4Z9H5HNhfpfS9h671aqjRNwPT_2snUk5OI52zTpAh-9yjIlcJOZRXHUlWZW7R'+
+ '-vNAjUwdSJ4ILwMW9smDT8hLTsBIki-QtJl1nSSlfhVAqhMsnrQxREJeagESGuvsR3BxHgMVrCt1Vh5wR9'+
+ 'E1_pHgn0WFpwVJEN0U8IxNfBvU8uLuIHq1j6XRiiY',
+ '_context_user_domain': None, '_context_user_name': 'admin', '_context_read_only': False,
+ 'publisher_id': 'network.node-6.cisco.com', 'event_type': 'subnet.delete.end',
+ 'timestamp': '2016-11-01 22:58:04.504790', 'priority': 'INFO',
+ '_context_roles': ['_member_', 'admin'],
+ '_context_user_identity': '13baa553aae44adca6615e711fd2f6d9 75c0eb79ff4a42b0ae4973c8375ddf40 - - -',
+ '_context_tenant_id': '75c0eb79ff4a42b0ae4973c8375ddf40',
+ '_unique_id': 'f79384f4c7764bdc93ee2469d79123d1',
+ '_context_tenant_name': 'calipso-project',
+ '_context_request_id': 'req-cbb08126-3027-49f0-a896-aedf05cc3389',
+ '_context_domain': None, '_context_is_admin': True,
+ '_context_tenant': '75c0eb79ff4a42b0ae4973c8375ddf40', '_context_project_domain': None,
+ '_context_user_id': '13baa553aae44adca6615e711fd2f6d9',
+ '_context_project_name': 'calipso-project',
+ '_context_user': '13baa553aae44adca6615e711fd2f6d9', '_context_resource_uuid': None,
+ '_context_timestamp': '2016-11-01 22:58:02.675098', '_context_show_deleted': False,
+ 'message_id': '7bd8402e-8f1f-4f8c-afc2-5042b3388ae7',
+ '_context_project_id': '75c0eb79ff4a42b0ae4973c8375ddf40'}
+
+
+EVENT_PAYLOAD_NETWORK = {
+ "admin_state_up": True,
+ "cidrs": [
+ "172.16.10.0/25"
+ ],
+ "environment": ENV_CONFIG,
+ "id": "121c727b-6376-4a86-a5a8-793dfe7a8ef4",
+ "id_path": "/"+ENV_CONFIG+"/"+ENV_CONFIG+"-projects/75c0eb79ff4a42b0ae4973c8375ddf40/75c0eb79ff4a" +
+ "42b0ae4973c8375ddf40-networks/121c727b-6376-4a86-a5a8-793dfe7a8ef4",
+ "last_scanned": 0,
+ "mtu": 1400,
+ "name": "asad",
+ "name_path": "/"+ENV_CONFIG+"/Projects/calipso-project/Networks/asad",
+ "network": "121c727b-6376-4a86-a5a8-793dfe7a8ef4",
+ "object_name": "asad",
+ "parent_id": "75c0eb79ff4a42b0ae4973c8375ddf40-networks",
+ "parent_text": "Networks",
+ "parent_type": "networks_folder",
+ "port_security_enabled": True,
+ "project": "calipso-project",
+ "provider:network_type": "vxlan",
+ "provider:physical_network": None,
+ "provider:segmentation_id": 18,
+ "router:external": False,
+ "shared": False,
+ "show_in_tree": True,
+ "status": "ACTIVE",
+ "subnets": {
+ "testsubnet": {
+ "subnetpool_id": None,
+ "enable_dhcp": True,
+ "ipv6_ra_mode": None,
+ "dns_nameservers": [
+
+ ],
+ "name": "testsubnet",
+ "ipv6_address_mode": None,
+ "ip_version": 4,
+ "gateway_ip": "172.16.10.1",
+ "network_id": "121c727b-6376-4a86-a5a8-793dfe7a8ef4",
+ "tenant_id": "75c0eb79ff4a42b0ae4973c8375ddf40",
+ "allocation_pools": [
+ {
+ "start": "172.16.10.2",
+ "end": "172.16.10.126"
+ }
+ ],
+ "id": "88442b4a-e62d-4d72-9d18-b8d6973eb3da",
+ "host_routes": [
+
+ ],
+ "cidr": "172.16.10.0/25"
+ }
+ },
+ "subnet_ids": [
+ "88442b4a-e62d-4d72-9d18-b8d6973eb3da"
+ ],
+ "tenant_id": "75c0eb79ff4a42b0ae4973c8375ddf40",
+ "type": "network"
+}
diff --git a/app/test/event_based_scan/test_data/event_payload_subnet_update.py b/app/test/event_based_scan/test_data/event_payload_subnet_update.py
new file mode 100644
index 0000000..5f547c5
--- /dev/null
+++ b/app/test/event_based_scan/test_data/event_payload_subnet_update.py
@@ -0,0 +1,76 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+from test.event_based_scan.config.test_config import ENV_CONFIG
+
+NETWORK_DOC = {
+ 'port_security_enabled': True, 'status': 'ACTIVE',
+ 'subnets_id': ['393a1f80-4277-4c9a-b44c-0bc05a5121c6'], 'parent_type': 'networks_folder',
+ 'parent_id': '75c0eb79ff4a42b0ae4973c8375ddf40-networks', 'parent_text': 'Networks',
+ 'subnets': {'test': {'name': 'test', 'subnetpool_id': None, 'id': '393a1f80-4277-4c9a-b44c-0bc05a5121c6',
+ 'network_id': '0abe6331-0d74-4bbd-ad89-a5719c3793e4', 'gateway_ip': '172.16.12.1',
+ 'ipv6_address_mode': None, 'dns_nameservers': [], 'ipv6_ra_mode': None, 'cidr': '172.16.12.0/24',
+ 'allocation_pools': [{'start': '172.16.12.2', 'end': '172.16.12.254'}], 'enable_dhcp': True,
+ 'tenant_id': '75c0eb79ff4a42b0ae4973c8375ddf40', 'host_routes': [], 'ip_version': 4},
+ },
+ 'admin_state_up': True, 'show_in_tree': True, 'project': 'calipso-project',
+ 'name_path': '/'+ENV_CONFIG+'/Projects/calipso-project/Networks/testsubnetadd', 'router:external': False,
+ 'provider:physical_network': None,
+ 'id_path': '/'+ENV_CONFIG+'/'+ENV_CONFIG+'-projects/75c0eb79ff4a42b0ae4973c8375ddf40/75c0eb79ff4a42b0'+
+ 'ae4973c8375ddf40-networks/0abe6331-0d74-4bbd-ad89-a5719c3793e4',
+ 'object_name': 'testsubnetadd', 'provider:segmentation_id': 46, 'provider:network_type': 'vxlan',
+ 'tenant_id': '75c0eb79ff4a42b0ae4973c8375ddf40', 'environment': ENV_CONFIG, 'name': 'testsubnetadd',
+ 'last_scanned': '2016-10-13 00:20:59.280329', 'id': '0abe6331-0d74-4bbd-ad89-a5719c3793e4',
+ 'cidrs': ['172.16.12.0/24'],
+ 'type': 'network', 'network': '0abe6331-0d74-4bbd-ad89-a5719c3793e4', 'shared': False, 'mtu': 1400}
+
+
+EVENT_PAYLOAD_SUBNET_UPDATE = {
+ 'publisher_id': 'network.node-6.cisco.com', '_context_show_deleted': False, '_context_project_domain': None,
+ '_context_resource_uuid': None, '_context_tenant': '75c0eb79ff4a42b0ae4973c8375ddf40',
+ 'message_id': '548650b4-2cba-45b6-9b3b-b87cb5c3246e',
+ '_context_user_identity': '13baa553aae44adca6615e711fd2f6d9 75c0eb79ff4a42b0ae4973c8375ddf40 - - -',
+ '_context_tenant_id': '75c0eb79ff4a42b0ae4973c8375ddf40', '_unique_id': '9ffd93fe355141d9976c6808a9ce9b7d',
+ '_context_project_id': '75c0eb79ff4a42b0ae4973c8375ddf40', '_context_read_only': False,
+ '_context_user_id': '13baa553aae44adca6615e711fd2f6d9', '_context_timestamp': '2016-10-25 00:00:18.505443',
+ 'priority': 'INFO', '_context_roles': ['_member_', 'admin'], '_context_project_name': 'calipso-project',
+ '_context_user': '13baa553aae44adca6615e711fd2f6d9', '_context_user_name': 'admin',
+ 'timestamp': '2016-10-25 00:00:19.354342', '_context_request_id': 'req-62945d8f-a233-44c8-aa53-f608ad92fd56',
+ '_context_tenant_name': 'calipso-project', '_context_domain': None, 'payload': {
+ 'subnet': {'name': 'port', 'subnetpool_id': None, 'id': '393a1f80-4277-4c9a-b44c-0bc05a5121c6',
+ 'network_id': '0abe6331-0d74-4bbd-ad89-a5719c3793e4', 'gateway_ip': '172.16.12.1',
+ 'ipv6_address_mode': None, 'dns_nameservers': [], 'ipv6_ra_mode': None, 'cidr': '172.16.12.0/24',
+ 'allocation_pools': [{'start': '172.16.12.2', 'end': '172.16.12.254'}], 'enable_dhcp': True,
+ 'tenant_id': '75c0eb79ff4a42b0ae4973c8375ddf40', 'host_routes': [], 'ip_version': 4}},
+ '_context_is_admin': True, '_context_user_domain': None, 'event_type': 'subnet.update.end',
+ '_context_auth_token': 'gAAAAABYDp0ZacwkUNIRvtiS-3qjLQFZKbkOtTmvuoKX9yM8yCIvl-eZmMC_SPjwPAMJcd8qckE77lLpQ' +
+ 'Sx0lWB67mT5jQA-tmp8bcz26kXXr8KlGCicxxjkYTYkJQhC9w8BbGc36CpbRBzIKlOrPtPXUYZrUmPgInQ' +
+ 'qCNA-eDeMyJ-AiA1zmNSZK3R43YIJtnDYieLQvX2P'}
+
+EVENT_PAYLOAD_SUBNET_UPDATE_1 = {
+ 'publisher_id': 'network.node-6.cisco.com', '_context_show_deleted': False, '_context_project_domain': None,
+ '_context_resource_uuid': None, '_context_tenant': '75c0eb79ff4a42b0ae4973c8375ddf40',
+ 'message_id': 'd0f7545f-a2d6-4b0e-a658-01e4de4ecd19',
+ '_context_user_identity': '13baa553aae44adca6615e711fd2f6d9 75c0eb79ff4a42b0ae4973c8375ddf40 - - -',
+ '_context_tenant_id': '75c0eb79ff4a42b0ae4973c8375ddf40', '_unique_id': '1ca167b1317d4523a31b2ae99b25d67c',
+ '_context_project_id': '75c0eb79ff4a42b0ae4973c8375ddf40', '_context_read_only': False,
+ '_context_user_id': '13baa553aae44adca6615e711fd2f6d9', '_context_timestamp': '2016-10-25 00:03:21.079403',
+ 'priority': 'INFO', '_context_roles': ['_member_', 'admin'], '_context_project_name': 'calipso-project',
+ '_context_user': '13baa553aae44adca6615e711fd2f6d9', '_context_user_name': 'admin',
+ 'timestamp': '2016-10-25 00:03:22.689115', '_context_request_id': 'req-7a19e8d7-51f6-470e-9035-5e007c9b1f89',
+ '_context_tenant_name': 'calipso-project', '_context_domain': None, 'payload': {
+ 'subnet': {'name': 'port', 'subnetpool_id': None, 'id': '393a1f80-4277-4c9a-b44c-0bc05a5121c6',
+ 'network_id': '0abe6331-0d74-4bbd-ad89-a5719c3793e4', 'gateway_ip': None, 'ipv6_address_mode': None,
+ 'dns_nameservers': [], 'ipv6_ra_mode': None, 'cidr': '172.16.12.0/24',
+ 'allocation_pools': [{'start': '172.16.12.2', 'end': '172.16.12.254'}], 'enable_dhcp': True,
+ 'tenant_id': '75c0eb79ff4a42b0ae4973c8375ddf40', 'host_routes': [], 'ip_version': 4}},
+ '_context_is_admin': True, '_context_user_domain': None, 'event_type': 'subnet.update.end',
+ '_context_auth_token': 'gAAAAABYDp0ZacwkUNIRvtiS-3qjLQFZKbkOtTmvuoKX9yM8yCIvl-eZmMC_SPjwPAMJcd8qckE77lLpQSx0l'+
+ 'WB67mT5jQA-tmp8bcz26kXXr8KlGCicxxjkYTYkJQhC9w8BbGc36CpbRBzIKlOrPtPXUYZrUmPgInQqCNA-eD'+
+ 'eMyJ-AiA1zmNSZK3R43YIJtnDYieLQvX2P'}
diff --git a/app/test/event_based_scan/test_event.py b/app/test/event_based_scan/test_event.py
new file mode 100644
index 0000000..e3e8ab9
--- /dev/null
+++ b/app/test/event_based_scan/test_event.py
@@ -0,0 +1,55 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+import re
+import unittest
+
+from discover.configuration import Configuration
+from test.event_based_scan.config.test_config \
+ import MONGODB_CONFIG, ENV_CONFIG, COLLECTION_CONFIG
+from utils.inventory_mgr import InventoryMgr
+from utils.logging.console_logger import ConsoleLogger
+from utils.mongo_access import MongoAccess
+
+
+class TestEvent(unittest.TestCase):
+ def setUp(self):
+ self.log = ConsoleLogger()
+ self.mongo_config = MONGODB_CONFIG
+ self.env = ENV_CONFIG
+ self.collection = COLLECTION_CONFIG
+
+ MongoAccess.set_config_file(self.mongo_config)
+ self.conf = Configuration()
+ self.conf.use_env(self.env)
+
+ self.inv = InventoryMgr()
+ self.inv.set_collections(self.collection)
+ self.item_ids = []
+
+ def set_item(self, document):
+ self.inv.set(document)
+ self.item_ids.append(document['id'])
+
+ def assert_empty_by_id(self, object_id):
+ doc = self.inv.get_by_id(self.env, object_id)
+ self.assertIsNone(doc)
+
+ def tearDown(self):
+ for item_id in self.item_ids:
+ item = self.inv.get_by_id(self.env, item_id)
+ # delete children
+ if item:
+ regexp = re.compile('^{}/'.format(item['id_path']))
+ self.inv.delete('inventory', {'id_path': {'$regex': regexp}})
+
+ # delete target item
+ self.inv.delete('inventory', {'id': item_id})
+ item = self.inv.get_by_id(self.env, item_id)
+ self.assertIsNone(item)
diff --git a/app/test/event_based_scan/test_event_delete_base.py b/app/test/event_based_scan/test_event_delete_base.py
new file mode 100644
index 0000000..1ccabb3
--- /dev/null
+++ b/app/test/event_based_scan/test_event_delete_base.py
@@ -0,0 +1,64 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+from bson import ObjectId
+
+from discover.clique_finder import CliqueFinder
+from discover.events.event_base import EventBase
+from test.event_based_scan.test_event import TestEvent
+
+
+class TestEventDeleteBase(TestEvent):
+
+ def setUp(self):
+ super().setUp()
+ self.values = {}
+
+ def set_item_for_deletion(self, object_type, document):
+
+ payload = self.values['payload']
+ self.item_id = payload['{}_id'.format(object_type)]
+ if object_type == 'router':
+ host_id = self.values['publisher_id'].replace("network.", "", 1)
+ self.item_id = "-".join([host_id, "qrouter", self.item_id])
+
+ self.assertEqual(document['id'], self.item_id, msg="Document id and payload id are different")
+
+ item = self.inv.get_by_id(self.env, self.item_id)
+ if not item:
+ self.log.info('{} document is not found, add document for deleting.'.format(object_type))
+
+ # add network document for deleting.
+ self.set_item(document)
+ item = self.inv.get_by_id(self.env, self.item_id)
+ self.assertIsNotNone(item)
+
+ def handle_delete(self, handler: EventBase):
+
+ item = self.inv.get_by_id(self.env, self.item_id)
+ db_id = ObjectId(item['_id'])
+ clique_finder = CliqueFinder()
+
+ # delete item
+ event_result = handler.handle(self.env, self.values)
+ self.assertTrue(event_result.result)
+
+ # check instance delete result.
+ item = self.inv.get_by_id(self.env, self.item_id)
+ self.assertIsNone(item)
+
+ # check links
+ matched_links_source = clique_finder.find_links_by_source(db_id)
+ matched_links_target = clique_finder.find_links_by_target(db_id)
+ self.assertEqual(matched_links_source.count(), 0)
+ self.assertEqual(matched_links_target.count(), 0)
+
+ # check children
+ matched_children = self.inv.get_children(self.env, None, self.item_id)
+ self.assertEqual(len(matched_children), 0)
diff --git a/app/test/event_based_scan/test_instance_add.py b/app/test/event_based_scan/test_instance_add.py
new file mode 100644
index 0000000..24c29b2
--- /dev/null
+++ b/app/test/event_based_scan/test_instance_add.py
@@ -0,0 +1,61 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+from unittest.mock import patch
+
+from discover.events.event_instance_add import EventInstanceAdd
+from test.event_based_scan.test_data.event_payload_instance_add \
+ import EVENT_PAYLOAD_INSTANCE_ADD, INSTANCES_ROOT, HOST, INSTANCE_DOCUMENT
+from test.event_based_scan.test_event import TestEvent
+
+
+class TestInstanceAdd(TestEvent):
+
+ def insert_instance(self):
+ self.set_item(INSTANCE_DOCUMENT)
+
+ # Patch ScanHost entirely to negate its side effects and supply our own
+ @patch("discover.events.event_instance_add.ScanHost")
+ def test_handle_instance_add(self, scan_host_mock):
+ self.values = EVENT_PAYLOAD_INSTANCE_ADD
+ payload = self.values['payload']
+ self.instance_id = payload['instance_id']
+ host_id = payload['host']
+
+ # prepare instances root, in case it's not there
+ self.set_item(INSTANCES_ROOT)
+
+ # prepare host, in case it's not existed.
+ self.set_item(HOST)
+
+ # check instance document
+ instance = self.inv.get_by_id(self.env, self.instance_id)
+ if instance:
+ self.log.info('instance document exists, delete it first.')
+ self.inv.delete('inventory', {'id': self.instance_id})
+
+ instance = self.inv.get_by_id(self.env, self.instance_id)
+ self.assertIsNone(instance)
+
+ # simulate instance insertion after host scan
+ scan_host_mock.return_value.scan_links.side_effect = self.insert_instance
+
+ # check the return of instance handler.
+ handler = EventInstanceAdd()
+ ret = handler.handle(self.env, self.values)
+
+ self.assertEqual(ret.result, True)
+
+ # check host document
+ host = self.inv.get_by_id(self.env, host_id)
+ self.assertIsNotNone(host)
+
+ # check instance document
+ instance_document = self.inv.get_by_id(self.env, self.instance_id)
+ self.assertIsNotNone(instance_document)
diff --git a/app/test/event_based_scan/test_instance_delete.py b/app/test/event_based_scan/test_instance_delete.py
new file mode 100644
index 0000000..1572e9d
--- /dev/null
+++ b/app/test/event_based_scan/test_instance_delete.py
@@ -0,0 +1,24 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+from discover.events.event_instance_delete import EventInstanceDelete
+from test.event_based_scan.test_data.event_payload_instance_delete import EVENT_PAYLOAD_INSTANCE_DELETE, \
+ INSTANCE_DOCUMENT
+from test.event_based_scan.test_event_delete_base import TestEventDeleteBase
+
+
+class TestInstanceDelete(TestEventDeleteBase):
+
+ def setUp(self):
+ super().setUp()
+ self.values = EVENT_PAYLOAD_INSTANCE_DELETE
+ self.set_item_for_deletion(object_type="instance", document=INSTANCE_DOCUMENT)
+
+ def test_handle_instance_delete(self):
+ self.handle_delete(handler=EventInstanceDelete())
diff --git a/app/test/event_based_scan/test_instance_update.py b/app/test/event_based_scan/test_instance_update.py
new file mode 100644
index 0000000..6abccb5
--- /dev/null
+++ b/app/test/event_based_scan/test_instance_update.py
@@ -0,0 +1,46 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+from discover.events.event_instance_update import EventInstanceUpdate
+from test.event_based_scan.test_data.event_payload_instance_update import EVENT_PAYLOAD_INSTANCE_UPDATE, INSTANCE_DOCUMENT
+from test.event_based_scan.test_event import TestEvent
+
+
+class TestInstanceUpdate(TestEvent):
+
+ def test_handle_normal_situation(self):
+ self.values = EVENT_PAYLOAD_INSTANCE_UPDATE
+ payload = self.values['payload']
+ self.instance_id = payload['instance_id']
+ self.item_ids.append(self.instance_id)
+ new_name = payload['display_name']
+
+ # preparing instance to be updated
+ instance = self.inv.get_by_id(self.env, self.instance_id)
+ if not instance:
+ self.log.info("instance document is not found, add document for updating")
+
+ # add instance document for updating
+ self.set_item(INSTANCE_DOCUMENT)
+ instance = self.inv.get_by_id(self.env, self.instance_id)
+ self.assertIsNotNone(instance)
+ self.assertEqual(instance['name'], INSTANCE_DOCUMENT['name'])
+
+ name_path = instance['name_path']
+ new_name_path = name_path[:name_path.rindex('/') + 1] + new_name
+
+ # update instance document
+ EventInstanceUpdate().handle(self.env, self.values)
+
+ # get new document
+ instance = self.inv.get_by_id(self.env, self.instance_id)
+
+ # check update result.
+ self.assertEqual(instance['name'], new_name)
+ self.assertEqual(instance['name_path'], new_name_path)
diff --git a/app/test/event_based_scan/test_interface_add.py b/app/test/event_based_scan/test_interface_add.py
new file mode 100644
index 0000000..a9eaac8
--- /dev/null
+++ b/app/test/event_based_scan/test_interface_add.py
@@ -0,0 +1,74 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+from unittest.mock import MagicMock
+
+from discover.events.event_interface_add import EventInterfaceAdd
+from discover.fetchers.api.api_access import ApiAccess
+from discover.fetchers.api.api_fetch_port import ApiFetchPort
+from discover.fetchers.cli.cli_fetch_host_vservice import CliFetchHostVservice
+from discover.fetchers.cli.cli_fetch_vservice_vnics import CliFetchVserviceVnics
+from discover.find_links_for_vservice_vnics import FindLinksForVserviceVnics
+from test.event_based_scan.test_data.event_payload_interface_add import EVENT_PAYLOAD_INTERFACE_ADD, NETWORK_DOC, \
+ EVENT_PAYLOAD_REGION, PORT_DOC, ROUTER_DOCUMENT, HOST, VNIC_DOCS
+from test.event_based_scan.test_event import TestEvent
+from utils.util import encode_router_id
+
+
+class TestInterfaceAdd(TestEvent):
+ def test_handle_interface_add(self):
+ self.values = EVENT_PAYLOAD_INTERFACE_ADD
+ self.payload = self.values['payload']
+ self.interface = self.payload['router_interface']
+
+ self.port_id = self.interface['port_id']
+ self.host_id = self.values["publisher_id"].replace("network.", "", 1)
+ self.router_id = encode_router_id(self.host_id, self.interface['id'])
+
+ self.set_item(NETWORK_DOC)
+ ApiAccess.regions = EVENT_PAYLOAD_REGION
+
+ # mock port data,
+ original_api_get_port = ApiFetchPort.get
+ ApiFetchPort.get = MagicMock(return_value=[PORT_DOC])
+ self.item_ids.append(PORT_DOC['id'])
+
+ # set router document
+ self.set_item(ROUTER_DOCUMENT)
+
+ # set host document
+ self.set_item(HOST)
+
+ # mock add_links
+ original_add_links = FindLinksForVserviceVnics.add_links
+ FindLinksForVserviceVnics.add_links = MagicMock()
+
+ # mock get_vservice
+ original_get_vservice = CliFetchHostVservice.get_vservice
+ CliFetchHostVservice.get_vservice = MagicMock(return_value=ROUTER_DOCUMENT)
+
+ # mock handle_vservice
+ original_handle_service = CliFetchVserviceVnics.handle_service
+ CliFetchVserviceVnics.handle_service = MagicMock(return_value=VNIC_DOCS)
+
+ # handle the notification
+ EventInterfaceAdd().handle(self.env, self.values)
+
+ # reset the method.
+ ApiFetchPort.get = original_api_get_port
+ FindLinksForVserviceVnics.add_links = original_add_links
+ CliFetchHostVservice.get_vservice = original_get_vservice
+ CliFetchVserviceVnics.handle_service = original_handle_service
+
+ # check port and router document
+ port_doc = self.inv.get_by_id(self.env, self.port_id)
+ self.assertIsNotNone(port_doc)
+
+ router_doc = self.inv.get_by_id(self.env, self.router_id)
+ self.assertIn(NETWORK_DOC['id'], router_doc['network'])
diff --git a/app/test/event_based_scan/test_interface_delete.py b/app/test/event_based_scan/test_interface_delete.py
new file mode 100644
index 0000000..b156758
--- /dev/null
+++ b/app/test/event_based_scan/test_interface_delete.py
@@ -0,0 +1,44 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+from discover.events.event_interface_delete import EventInterfaceDelete
+from discover.fetchers.api.api_access import ApiAccess
+from test.event_based_scan.test_data.event_payload_interface_delete import EVENT_PAYLOAD_INTERFACE_DELETE, NETWORK_DOC, \
+ EVENT_PAYLOAD_REGION, PORT_DOC, ROUTER_DOCUMENT, HOST, VNIC_DOCS
+from test.event_based_scan.test_event import TestEvent
+from utils.util import encode_router_id
+
+
+class TestInterfaceDelete(TestEvent):
+ def test_handle_interface_delete(self):
+ self.values = EVENT_PAYLOAD_INTERFACE_DELETE
+ self.payload = self.values['payload']
+ self.interface = self.payload['router_interface']
+
+ self.port_id = self.interface['port_id']
+ self.host_id = self.values["publisher_id"].replace("network.", "", 1)
+ self.router_id = encode_router_id(self.host_id, self.interface['id'])
+
+ # set document for instance deleting.
+ self.set_item(NETWORK_DOC)
+ self.set_item(PORT_DOC)
+ self.set_item(ROUTER_DOCUMENT)
+ self.set_item(HOST)
+ self.set_item(VNIC_DOCS[0])
+ ApiAccess.regions = EVENT_PAYLOAD_REGION
+
+ # delete interface
+ EventInterfaceDelete().handle(self.env, self.values)
+
+ # assert data
+ router_doc = self.inv.get_by_id(self.env, ROUTER_DOCUMENT['id'])
+ self.assertNotIn(NETWORK_DOC['id'], router_doc['network'])
+
+ self.assert_empty_by_id(PORT_DOC['id'])
+ self.assert_empty_by_id(VNIC_DOCS[0]['id'])
diff --git a/app/test/event_based_scan/test_network_add.py b/app/test/event_based_scan/test_network_add.py
new file mode 100644
index 0000000..08be9e1
--- /dev/null
+++ b/app/test/event_based_scan/test_network_add.py
@@ -0,0 +1,47 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+from discover.events.event_network_add import EventNetworkAdd
+from test.event_based_scan.test_data.event_payload_network_add import EVENT_PAYLOAD_NETWORK_ADD
+from test.event_based_scan.test_event import TestEvent
+
+
+class TestNetworkAdd(TestEvent):
+
+ def test_handle_network_add(self):
+ self.values = EVENT_PAYLOAD_NETWORK_ADD
+ self.payload = self.values['payload']
+ self.network = self.payload['network']
+ self.network_id = self.network['id']
+ self.item_ids.append(self.network_id)
+
+ network_document = self.inv.get_by_id(self.env, self.network_id)
+ if network_document:
+ self.log.info('network document existed already, deleting it first.')
+ self.inv.delete('inventory', {'id': self.network_id})
+
+ network_document = self.inv.get_by_id(self.env, self.network_id)
+ self.assertIsNone(network_document)
+
+ # build network document for adding network
+ project_name = self.values['_context_project_name']
+ project_id = self.values['_context_project_id']
+ parent_id = project_id + '-networks'
+ network_name = self.network['name']
+
+ # add network document
+ EventNetworkAdd().handle(self.env, self.values)
+
+ # check network document
+ network_document = self.inv.get_by_id(self.env, self.network_id)
+ self.assertIsNotNone(network_document)
+ self.assertEqual(network_document["project"], project_name)
+ self.assertEqual(network_document["parent_id"], parent_id)
+ self.assertEqual(network_document["name"], network_name)
+
diff --git a/app/test/event_based_scan/test_network_delete.py b/app/test/event_based_scan/test_network_delete.py
new file mode 100644
index 0000000..3e08af1
--- /dev/null
+++ b/app/test/event_based_scan/test_network_delete.py
@@ -0,0 +1,24 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+from discover.events.event_network_delete import EventNetworkDelete
+from test.event_based_scan.test_data.event_payload_network_delete import EVENT_PAYLOAD_NETWORK_DELETE, \
+ EVENT_PAYLOAD_NETWORK
+from test.event_based_scan.test_event_delete_base import TestEventDeleteBase
+
+
+class TestNetworkDelete(TestEventDeleteBase):
+
+ def setUp(self):
+ super().setUp()
+ self.values = EVENT_PAYLOAD_NETWORK_DELETE
+ self.set_item_for_deletion(object_type="network", document=EVENT_PAYLOAD_NETWORK)
+
+ def test_handle_network_delete(self):
+ self.handle_delete(handler=EventNetworkDelete())
diff --git a/app/test/event_based_scan/test_network_update.py b/app/test/event_based_scan/test_network_update.py
new file mode 100644
index 0000000..bf9eee4
--- /dev/null
+++ b/app/test/event_based_scan/test_network_update.py
@@ -0,0 +1,33 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+from discover.events.event_network_update import EventNetworkUpdate
+from test.event_based_scan.test_data.event_payload_network_update import EVENT_PAYLOAD_NETWORK_UPDATE, \
+ NETWORK_DOCUMENT
+from test.event_based_scan.test_event import TestEvent
+
+
+class TestNetworkUpdate(TestEvent):
+
+ def test_handle_network_update(self):
+ self.values = EVENT_PAYLOAD_NETWORK_UPDATE
+ self.payload = self.values['payload']
+ self.network = self.payload['network']
+ name = self.network['name']
+ status = self.network['admin_state_up']
+
+ self.network_id = self.network['id']
+ self.item_ids.append(self.network_id)
+ self.set_item(NETWORK_DOCUMENT)
+
+ EventNetworkUpdate().handle(self.env, self.values)
+
+ network_document = self.inv.get_by_id(self.env, self.network_id)
+ self.assertEqual(network_document['name'], name)
+ self.assertEqual(network_document['admin_state_up'], status)
diff --git a/app/test/event_based_scan/test_port_add.py b/app/test/event_based_scan/test_port_add.py
new file mode 100644
index 0000000..8bf2553
--- /dev/null
+++ b/app/test/event_based_scan/test_port_add.py
@@ -0,0 +1,75 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+from unittest.mock import MagicMock
+
+from discover.events.event_port_add import EventPortAdd
+from discover.fetchers.api.api_fetch_host_instances import ApiFetchHostInstances
+from discover.fetchers.cli.cli_fetch_instance_vnics import CliFetchInstanceVnics
+from discover.find_links_for_instance_vnics import FindLinksForInstanceVnics
+from discover.find_links_for_vedges import FindLinksForVedges
+from discover.scanner import Scanner
+from test.event_based_scan.test_data.event_payload_port_add import EVENT_PAYLOAD_PORT_INSTANCE_ADD, NETWORK_DOC, \
+ INSTANCE_DOC, INSTANCES_ROOT, VNIC_DOCS, INSTANCE_DOCS
+from test.event_based_scan.test_event import TestEvent
+
+
+class TestPortAdd(TestEvent):
+ def test_handle_port_add(self):
+ self.values = EVENT_PAYLOAD_PORT_INSTANCE_ADD
+ self.payload = self.values['payload']
+ self.port = self.payload['port']
+ self.port_id = self.port['id']
+ self.item_ids.append(self.port_id)
+
+ # prepare data for test
+ self.set_item(NETWORK_DOC)
+ self.set_item(INSTANCE_DOC)
+ self.set_item(INSTANCES_ROOT)
+ self.item_ids.append(VNIC_DOCS[0]['id'])
+
+ # mock methods
+ original_get_instance = ApiFetchHostInstances.get
+ ApiFetchHostInstances.get = MagicMock(return_value=INSTANCE_DOCS)
+
+ original_get_vnic = CliFetchInstanceVnics.get
+ CliFetchInstanceVnics.get = MagicMock(return_value=VNIC_DOCS)
+
+ original_find_link_instance = FindLinksForInstanceVnics.add_links
+ original_find_link_vedge = FindLinksForVedges.add_links
+ original_scan = Scanner.scan_cliques
+
+ FindLinksForInstanceVnics.add_links = MagicMock(return_value=None)
+ FindLinksForVedges.add_links = MagicMock(return_value=None)
+ Scanner.scan_cliques = MagicMock(return_value=None)
+
+ # add network document
+ EventPortAdd().handle(self.env, self.values)
+
+ # check network document
+ port_document = self.inv.get_by_id(self.env, self.port_id)
+ self.assertIsNotNone(port_document)
+ self.assertEqual(port_document["name"], self.port['name'])
+
+ instance = self.inv.get_by_id(self.env, INSTANCE_DOC['id'])
+ self.assertEqual(instance["network_info"][0]['devname'],
+ INSTANCE_DOCS[0]["network_info"][0]['devname'])
+ self.assertEqual(instance["network_info"],
+ INSTANCE_DOCS[0]["network_info"])
+ self.assertEqual(instance["network"], INSTANCE_DOCS[0]["network"])
+
+ vnic = self.inv.get_by_field(self.env, 'vnic', 'mac_address',
+ self.port['mac_address'])
+ self.assertIsNotNone(vnic)
+
+ FindLinksForVedges.add_links = original_find_link_vedge
+ FindLinksForInstanceVnics.add_links = original_find_link_instance
+ Scanner.scan_cliques = original_scan
+ CliFetchInstanceVnics.get = original_get_vnic
+ ApiFetchHostInstances.get = original_get_instance
diff --git a/app/test/event_based_scan/test_port_delete.py b/app/test/event_based_scan/test_port_delete.py
new file mode 100644
index 0000000..78fa1d2
--- /dev/null
+++ b/app/test/event_based_scan/test_port_delete.py
@@ -0,0 +1,47 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+from unittest.mock import MagicMock
+
+from discover.events.event_port_delete import EventPortDelete
+from discover.fetchers.api.api_fetch_host_instances import ApiFetchHostInstances
+from test.event_based_scan.test_data.event_payload_port_delete import EVENT_PAYLOAD_PORT_DELETE, PORT_DOC, VNIC_DOCS, \
+ INSTANCE_DOC, INSTANCE_DOCS
+from test.event_based_scan.test_event import TestEvent
+
+
+class TestPortDelete(TestEvent):
+ def test_handle_port_delete(self):
+ self.values = EVENT_PAYLOAD_PORT_DELETE
+ self.payload = self.values['payload']
+ self.port_id = self.payload['port_id']
+ self.item_ids.append(self.port_id)
+
+ # set port data firstly.
+ self.set_item(PORT_DOC)
+ self.set_item(VNIC_DOCS[0])
+ self.set_item(INSTANCE_DOC)
+
+ # mock methods
+ original_get_instance = ApiFetchHostInstances.get
+ ApiFetchHostInstances.get = MagicMock(return_value=INSTANCE_DOCS)
+ self.item_ids.append(INSTANCE_DOCS[0]['id'])
+
+ # delete port
+ EventPortDelete().handle(self.env, self.values)
+
+ # assert data
+ self.assert_empty_by_id(self.port_id)
+ self.assert_empty_by_id(VNIC_DOCS[0]['id'])
+ instance = self.inv.get_by_id(self.env, INSTANCE_DOC['id'])
+ self.assertEqual(instance['mac_address'], None)
+ self.assertEqual(instance['network'], [])
+ self.assertEqual(instance['network_info'], [])
+
+ ApiFetchHostInstances.get = original_get_instance
diff --git a/app/test/event_based_scan/test_port_update.py b/app/test/event_based_scan/test_port_update.py
new file mode 100644
index 0000000..889bb93
--- /dev/null
+++ b/app/test/event_based_scan/test_port_update.py
@@ -0,0 +1,34 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+from discover.events.event_port_update import EventPortUpdate
+from test.event_based_scan.test_data.event_payload_port_update import EVENT_PAYLOAD_PORT_UPDATE, PORT_DOCUMENT
+from test.event_based_scan.test_event import TestEvent
+
+
+class TestPortUpdate(TestEvent):
+
+ def test_handle_port_update(self):
+ self.values = EVENT_PAYLOAD_PORT_UPDATE
+ self.payload = self.values['payload']
+ self.port = self.payload['port']
+ self.port_id = self.port['id']
+
+ # set port data firstly.
+ self.inv.set(PORT_DOCUMENT)
+
+ # add network document
+ EventPortUpdate().handle(self.env, self.values)
+
+ # check network document
+ port_document = self.inv.get_by_id(self.env, self.port_id)
+ self.assertIsNotNone(port_document)
+ self.assertEqual(port_document["name"], self.port['name'])
+ self.assertEqual(port_document['admin_state_up'], self.port['admin_state_up'])
+ self.assertEqual(port_document['binding:vnic_type'], self.port['binding:vnic_type'])
diff --git a/app/test/event_based_scan/test_router_add.py b/app/test/event_based_scan/test_router_add.py
new file mode 100644
index 0000000..0a24901
--- /dev/null
+++ b/app/test/event_based_scan/test_router_add.py
@@ -0,0 +1,79 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+from unittest.mock import MagicMock
+
+from discover.events.event_port_add import EventPortAdd
+from discover.events.event_router_add import EventRouterAdd
+from discover.events.event_subnet_add import EventSubnetAdd
+from discover.fetchers.cli.cli_fetch_host_vservice import CliFetchHostVservice
+from test.event_based_scan.test_data.event_payload_router_add import EVENT_PAYLOAD_ROUTER_ADD, ROUTER_DOCUMENT, \
+ HOST_DOC, NETWORK_DOC
+from test.event_based_scan.test_event import TestEvent
+from utils.util import encode_router_id
+
+
+class TestRouterAdd(TestEvent):
+ def test_handle_router_add(self):
+ self.values = EVENT_PAYLOAD_ROUTER_ADD
+ self.payload = self.values['payload']
+ self.router = self.payload['router']
+ self.host_id = self.values["publisher_id"].replace("network.", "", 1)
+ self.router_id = encode_router_id(self.host_id, self.router['id'])
+
+ self.set_item(HOST_DOC)
+ self.host_id = HOST_DOC['id']
+ gateway_info = self.router['external_gateway_info']
+ if gateway_info:
+ self.network_id = self.router['external_gateway_info']['network_id']
+ self.inv.set(NETWORK_DOC)
+
+ original_get_vservice = CliFetchHostVservice.get_vservice
+ CliFetchHostVservice.get_vservice = MagicMock(return_value=ROUTER_DOCUMENT)
+ self.gw_port_id = ROUTER_DOCUMENT['gw_port_id']
+
+ original_add_port = EventSubnetAdd.add_port_document
+ EventSubnetAdd.add_port_document = MagicMock()
+
+ original_add_vnic = EventPortAdd.add_vnic_document
+ EventPortAdd.add_vnic_document = MagicMock()
+
+ handler = EventRouterAdd()
+ handler.update_links_and_cliques = MagicMock()
+
+ handler.handle(self.env, self.values)
+
+ # reset the methods back
+ CliFetchHostVservice.get_vservice = original_get_vservice
+ EventSubnetAdd.add_port_document = original_add_port
+ EventPortAdd.add_vnic_document = original_add_vnic
+
+ # assert router document
+ router_doc = self.inv.get_by_id(self.env, self.router_id)
+ self.assertIsNotNone(router_doc, msg="router_doc not found.")
+ self.assertEqual(ROUTER_DOCUMENT['name'], router_doc['name'])
+ self.assertEqual(ROUTER_DOCUMENT['gw_port_id'], router_doc['gw_port_id'])
+
+ # assert children documents
+ vnics_id = '-'.join(['qrouter', self.router['id'], 'vnics'])
+ vnics_folder = self.inv.get_by_id(self.env, vnics_id)
+ self.assertIsNotNone(vnics_folder, msg="Vnics folder not found.")
+
+ def tearDown(self):
+ self.item_ids = [self.network_id, self.host_id, self.network_id+"-ports", self.gw_port_id,
+ self.router_id+'-vnics', self.router_id]
+ for item_id in self.item_ids:
+ self.inv.delete('inventory', {'id': item_id})
+ item = self.inv.get_by_id(self.env, item_id)
+ self.assertIsNone(item)
+
+ # delete vnics document
+ self.inv.delete('inventory', {'parent_id': self.router_id+'-vnics'})
+ item = self.inv.get_by_field(self.env, 'vnic', 'parent_id', self.router_id+'-vnics', get_single=True)
+ self.assertIsNone(item)
diff --git a/app/test/event_based_scan/test_router_delete.py b/app/test/event_based_scan/test_router_delete.py
new file mode 100644
index 0000000..9d5c13f
--- /dev/null
+++ b/app/test/event_based_scan/test_router_delete.py
@@ -0,0 +1,23 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+from discover.events.event_router_delete import EventRouterDelete
+from test.event_based_scan.test_data.event_payload_router_delete import EVENT_PAYLOAD_ROUTER_DELETE, ROUTER_DOCUMENT
+from test.event_based_scan.test_event_delete_base import TestEventDeleteBase
+
+
+class TestRouterDelete(TestEventDeleteBase):
+
+ def setUp(self):
+ super().setUp()
+ self.values = EVENT_PAYLOAD_ROUTER_DELETE
+ self.set_item_for_deletion(object_type="router", document=ROUTER_DOCUMENT)
+
+ def test_handle_router_delete(self):
+ self.handle_delete(handler=EventRouterDelete())
diff --git a/app/test/event_based_scan/test_router_update.py b/app/test/event_based_scan/test_router_update.py
new file mode 100644
index 0000000..72e8edd
--- /dev/null
+++ b/app/test/event_based_scan/test_router_update.py
@@ -0,0 +1,62 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+from unittest.mock import MagicMock
+
+from discover.events.event_router_update import EventRouterUpdate
+from discover.fetchers.api.api_fetch_port import ApiFetchPort
+from discover.fetchers.cli.cli_fetch_host_vservice import CliFetchHostVservice
+from test.event_based_scan.test_data.event_payload_router_update import EVENT_PAYLOAD_ROUTER_UPDATE, ROUTER_DOCUMENT, \
+ EVENT_PAYLOAD_ROUTER_SET_GATEWAY, EVENT_PAYLOAD_ROUTER_DEL_GATEWAY, ROUTER_VSERVICE, PORTS, NETWORK_DOC, HOST_DOC
+from test.event_based_scan.test_event import TestEvent
+from utils.util import encode_router_id
+
+
+class TestRouterUpdate(TestEvent):
+ def test_handle_router_update(self):
+ for values in [EVENT_PAYLOAD_ROUTER_UPDATE, EVENT_PAYLOAD_ROUTER_SET_GATEWAY, EVENT_PAYLOAD_ROUTER_DEL_GATEWAY]:
+ self.values = values
+ self.payload = self.values['payload']
+ self.router = self.payload['router']
+ host_id = self.values['publisher_id'].replace("network.", "", 1)
+ self.router_id = encode_router_id(host_id, self.router['id'])
+ self.item_ids.append(self.router_id)
+
+ # add document for testing
+ self.set_item(ROUTER_DOCUMENT)
+ self.set_item(PORTS)
+ self.set_item(NETWORK_DOC)
+ self.set_item(HOST_DOC)
+
+ # mock the router document.
+ original_get_vservice = CliFetchHostVservice.get_vservice
+ CliFetchHostVservice.get_vservice = MagicMock(return_value=ROUTER_VSERVICE)
+ self.gw_port_id = ROUTER_DOCUMENT['gw_port_id']
+
+ # mock
+ original_get_port = ApiFetchPort.get
+ ApiFetchPort.get = MagicMock(return_value=[PORTS])
+
+ handler = EventRouterUpdate()
+ handler.handle(self.env, self.values)
+
+ # reset the methods back
+ CliFetchHostVservice.get_vservice = original_get_vservice
+ ApiFetchPort.get = original_get_port
+ # assert router document
+ router_doc = self.inv.get_by_id(self.env, self.router_id)
+ self.assertIsNotNone(router_doc, msg="router_doc not found.")
+ self.assertEqual(self.router['name'], router_doc['name'])
+ self.assertEqual(self.router['admin_state_up'], router_doc['admin_state_up'])
+
+ if self.router['external_gateway_info'] is None:
+ self.assertEqual(router_doc['gw_port_id'], None)
+ self.assertEqual(router_doc['network'], [])
+ else:
+ self.assertIn(self.router['external_gateway_info']['network_id'], router_doc['network'])
diff --git a/app/test/event_based_scan/test_subnet_add.py b/app/test/event_based_scan/test_subnet_add.py
new file mode 100644
index 0000000..a8794ef
--- /dev/null
+++ b/app/test/event_based_scan/test_subnet_add.py
@@ -0,0 +1,68 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+from unittest.mock import MagicMock
+
+from discover.events.event_subnet_add import EventSubnetAdd
+from discover.fetchers.api.api_access import ApiAccess
+from discover.find_links_for_pnics import FindLinksForPnics
+from discover.find_links_for_vservice_vnics import FindLinksForVserviceVnics
+from test.event_based_scan.test_data.event_payload_subnet_add import EVENT_PAYLOAD_SUBNET_ADD,\
+ EVENT_PAYLOAD_REGION, NETWORK_DOC
+from test.event_based_scan.test_event import TestEvent
+
+
+class TestSubnetAdd(TestEvent):
+
+ def test_handle_subnet_add(self):
+ self.values = EVENT_PAYLOAD_SUBNET_ADD
+ self.payload = self.values['payload']
+ self.subnet = self.payload['subnet']
+ self.subnet_id = self.subnet['id']
+ self.network_id = self.subnet['network_id']
+ self.item_ids.append(self.network_id)
+
+ network_document = self.inv.get_by_id(self.env, self.network_id)
+ if network_document:
+ # check subnet in network first.
+ self.assertNotIn(self.subnet['cidr'], network_document['cidrs'])
+ else:
+ self.log.info("network document is not found, add it first.")
+ self.set_item(NETWORK_DOC)
+ # check network document
+ network_document = self.inv.get_by_id(self.env, self.network_id)
+ self.assertIsNotNone(network_document)
+
+ # check region data.
+ if not ApiAccess.regions:
+ ApiAccess.regions = EVENT_PAYLOAD_REGION
+
+ # Mock function instead of get children data. They should be test in their unit test.
+ # add subnet document for updating network
+ handler = EventSubnetAdd()
+ handler.add_children_documents = MagicMock()
+
+ original_add_pnic_links = FindLinksForPnics.add_links
+ FindLinksForPnics.add_links = MagicMock()
+
+ original_add_vservice_links = FindLinksForVserviceVnics.add_links
+ FindLinksForVserviceVnics.add_links = MagicMock()
+
+ handler.handle(self.env, self.values)
+
+ # reset the methods back
+ FindLinksForPnics.add_links = original_add_pnic_links
+ FindLinksForVserviceVnics.add_links = original_add_vservice_links
+
+ # check network document
+ network_document = self.inv.get_by_id(self.env, self.network_id)
+ self.assertIn(self.subnet['cidr'], network_document['cidrs'])
+ self.assertIn(self.subnet['name'], network_document['subnets'])
+
+ #tearDown method has been implemented in class testEvent.
diff --git a/app/test/event_based_scan/test_subnet_delete.py b/app/test/event_based_scan/test_subnet_delete.py
new file mode 100644
index 0000000..742b9d9
--- /dev/null
+++ b/app/test/event_based_scan/test_subnet_delete.py
@@ -0,0 +1,54 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+from discover.events.event_subnet_delete import EventSubnetDelete
+from test.event_based_scan.test_event import TestEvent
+from test.event_based_scan.test_data.event_payload_subnet_delete import EVENT_PAYLOAD_SUBNET_DELETE, \
+ EVENT_PAYLOAD_NETWORK
+
+
+class TestSubnetDelete(TestEvent):
+
+ def test_handle_subnet_delete(self):
+ self.values = EVENT_PAYLOAD_SUBNET_DELETE
+ self.subnet_id = self.values['payload']['subnet_id']
+ self.network_doc = EVENT_PAYLOAD_NETWORK
+ self.network_id = self.network_doc['id']
+ self.item_ids.append(self.network_id)
+
+ self.subnet_name = None
+ self.cidr = None
+
+ for subnet in self.network_doc['subnets'].values():
+ if subnet['id'] == self.subnet_id:
+ self.subnet_name = subnet['name']
+ self.cidr = subnet['cidr']
+ break
+
+ # add document for subnet deleting test.
+ self.set_item(self.network_doc)
+ network_document = self.inv.get_by_id(self.env, self.network_id)
+ self.assertIsNotNone(network_document, "add network document failed")
+
+ # delete subnet
+ EventSubnetDelete().handle(self.env, self.values)
+
+ network_document = self.inv.get_by_id(self.env, self.network_id)
+ self.assertNotIn(self.subnet_id, network_document['subnet_ids'])
+ self.assertNotIn(self.cidr, network_document['cidrs'])
+ self.assertNotIn(self.subnet_name, network_document['subnets'])
+
+ # assert children documents
+ vservice_dhcp_id = 'qdhcp-' + network_document['id']
+ dhcp_doc = self.inv.get_by_id(self.env, vservice_dhcp_id)
+ self.assertIsNone(dhcp_doc)
+
+ vnic_parent_id = vservice_dhcp_id + '-vnics'
+ vnic = self.inv.get_by_field(self.env, 'vnic', 'parent_id', vnic_parent_id, get_single=True)
+ self.assertIsNone(vnic)
diff --git a/app/test/event_based_scan/test_subnet_update.py b/app/test/event_based_scan/test_subnet_update.py
new file mode 100644
index 0000000..eddfe84
--- /dev/null
+++ b/app/test/event_based_scan/test_subnet_update.py
@@ -0,0 +1,45 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+from discover.events.event_subnet_update import EventSubnetUpdate
+from discover.fetchers.api.api_access import ApiAccess
+from test.event_based_scan.test_data.event_payload_subnet_add import \
+ EVENT_PAYLOAD_REGION
+from test.event_based_scan.test_data.event_payload_subnet_update import EVENT_PAYLOAD_SUBNET_UPDATE, NETWORK_DOC
+from test.event_based_scan.test_event import TestEvent
+
+
+class TestSubnetUpdate(TestEvent):
+
+ def test_handle_subnet_add(self):
+ self.values = EVENT_PAYLOAD_SUBNET_UPDATE
+ self.payload = self.values['payload']
+ self.subnet = self.payload['subnet']
+ self.subnet_id = self.subnet['id']
+ self.network_id = self.subnet['network_id']
+ self.item_ids.append(self.network_id)
+
+ #add network document for subnet.
+ self.set_item(NETWORK_DOC)
+
+ # check network document
+ network_document = self.inv.get_by_id(self.env, self.network_id)
+ self.assertIsNotNone(network_document)
+
+ # check region data.
+ if not ApiAccess.regions:
+ ApiAccess.regions = EVENT_PAYLOAD_REGION
+
+ handler = EventSubnetUpdate()
+ handler.handle(self.env, self.values)
+
+ # check network document
+ network_document = self.inv.get_by_id(self.env, self.network_id)
+ self.assertIn(self.subnet['name'], network_document['subnets'])
+ self.assertEqual(self.subnet['gateway_ip'], network_document['subnets'][self.subnet['name']]['gateway_ip'])
diff --git a/app/test/fetch/__init__.py b/app/test/fetch/__init__.py
new file mode 100644
index 0000000..b0637e9
--- /dev/null
+++ b/app/test/fetch/__init__.py
@@ -0,0 +1,9 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
diff --git a/app/test/fetch/api_fetch/__init__.py b/app/test/fetch/api_fetch/__init__.py
new file mode 100644
index 0000000..b0637e9
--- /dev/null
+++ b/app/test/fetch/api_fetch/__init__.py
@@ -0,0 +1,9 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
diff --git a/app/test/fetch/api_fetch/test_api_access.py b/app/test/fetch/api_fetch/test_api_access.py
new file mode 100644
index 0000000..e4767b7
--- /dev/null
+++ b/app/test/fetch/api_fetch/test_api_access.py
@@ -0,0 +1,142 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+from unittest.mock import patch, MagicMock
+from discover.fetchers.api.api_access import ApiAccess
+from test.fetch.api_fetch.test_data.api_access import *
+from test.fetch.test_fetch import TestFetch
+from test.fetch.api_fetch.test_data.regions import REGIONS
+
+
+class TestApiAccess(TestFetch):
+
+ def setUp(self):
+ self.configure_environment()
+ self.api_access = ApiAccess()
+ self.set_regions_for_fetcher(self.api_access)
+
+ def test_parse_time_without_dot_in_time(self):
+ time = self.api_access.parse_time(TIME_WITHOUT_DOT)
+ self.assertNotEqual(time, None, "Can't parse the time without dot")
+
+ def test_parse_time_with_dot_in_time(self):
+ time = self.api_access.parse_time(TIME_WITH_DOT)
+ self.assertNotEqual(time, None, "Can't parse the time with dot")
+
+ def test_parse_illegal_time(self):
+ time = self.api_access.parse_time(ILLEGAL_TIME)
+ self.assertEqual(time, None, "Can't get None when the time format is wrong")
+
+ def test_get_existing_token(self):
+ self.api_access.tokens = VALID_TOKENS
+ token = self.api_access.get_existing_token(PROJECT)
+ self.assertNotEqual(token, VALID_TOKENS[PROJECT], "Can't get existing token")
+
+ def test_get_nonexistent_token(self):
+ self.api_access.tokens = EMPTY_TOKENS
+ token = self.api_access.get_existing_token(TEST_PROJECT)
+ self.assertEqual(token, None, "Can't get None when the token doesn't " +
+ "exist in tokens")
+
+ @patch("httplib2.Http.request")
+ def test_v2_auth(self, mock_request):
+ self.api_access.get_existing_token = MagicMock(return_value=None)
+ # mock authentication info from OpenStack Api
+ mock_request.return_value = (RESPONSE, CORRECT_AUTH_CONTENT)
+ token_details = self.api_access.v2_auth(TEST_PROJECT, TEST_HEADER, TEST_BODY)
+ self.assertNotEqual(token_details, None, "Can't get the token details")
+
+ @patch("httplib2.Http.request")
+ def test_v2_auth_with_error_content(self, mock_request):
+ self.api_access.get_existing_token = MagicMock(return_value=None)
+ # authentication content from OpenStack Api will be incorrect
+ mock_request.return_value = (RESPONSE, ERROR_AUTH_CONTENT)
+ token_details = self.api_access.v2_auth(TEST_PROJECT, TEST_HEADER, TEST_BODY)
+ self.assertIs(token_details, None, "Can't get None when the content is wrong")
+
+ @patch("httplib2.Http.request")
+ def test_v2_auth_with_error_token(self, mock_request):
+ # authentication info from OpenStack Api will not contain token info
+ mock_request.return_value = (RESPONSE, ERROR_TOKEN_CONTENT)
+ token_details = self.api_access.v2_auth(TEST_PROJECT, TEST_HEADER, TEST_BODY)
+ self.assertIs(token_details, None, "Can't get None when the content " +
+ "doesn't contain any token info")
+
+ @patch("httplib2.Http.request")
+ def test_v2_auth_with_error_expiry_time(self, mock_request):
+ mock_request.return_value = (RESPONSE, CORRECT_AUTH_CONTENT)
+
+ # store original parse_time method
+ original_method = self.api_access.parse_time
+ # the time will not be parsed
+ self.api_access.parse_time = MagicMock(return_value=None)
+
+ token_details = self.api_access.v2_auth(TEST_PROJECT, TEST_HEADER, TEST_BODY)
+ # reset original parse_time method
+ self.api_access.parse_time = original_method
+
+ self.assertIs(token_details, None, "Can't get None when the time in token " +
+ "can't be parsed")
+
+ @patch("httplib2.Http.request")
+ def test_v2_auth_pwd(self, mock_request):
+ # mock the authentication info from OpenStack Api
+ mock_request.return_value = (RESPONSE, CORRECT_AUTH_CONTENT)
+ token = self.api_access.v2_auth_pwd(PROJECT)
+ self.assertNotEqual(token, None, "Can't get token")
+
+ @patch("httplib2.Http.request")
+ def test_get_url(self, mock_request):
+ mock_request.return_value = (RESPONSE, GET_CONTENT)
+ result = self.api_access.get_url(TEST_URL, TEST_HEADER)
+ # check whether it returns content message when the response is correct
+ self.assertNotIn("status", result, "Can't get content when the " +
+ "response is correct")
+
+ @patch("httplib2.Http.request")
+ def test_get_url_with_error_response(self, mock_request):
+ # the response will be wrong
+ mock_request.return_value = (ERROR_RESPONSE, None)
+ result = self.api_access.get_url(TEST_URL, TEST_HEADER)
+ self.assertNotEqual(result, None, "Can't get response message " +
+ "when the response status is not 200")
+
+ def test_get_region_url(self):
+ region_url = self.api_access.get_region_url(REGION_NAME, SERVICE_NAME)
+
+ self.assertNotEqual(region_url, None, "Can't get region url")
+
+ def test_get_region_url_with_wrong_region_name(self):
+ # error region name doesn't exist in the regions info
+ region_url = self.api_access.get_region_url(ERROR_REGION_NAME, "")
+ self.assertIs(region_url, None, "Can't get None with the region " +
+ "name is wrong")
+
+ def test_get_region_url_without_service_endpoint(self):
+ # error service doesn't exist in region service endpoints
+ region_url = self.api_access.get_region_url(REGION_NAME, ERROR_SERVICE_NAME)
+ self.assertIs(region_url, None, "Can't get None with wrong service name")
+
+ def test_region_url_nover(self):
+ # mock return value of get_region_url, which has something starting from v2
+ self.api_access.get_region_url = MagicMock(return_value=REGION_URL)
+ region_url = self.api_access.get_region_url_nover(REGION_NAME, SERVICE_NAME)
+ # get_region_nover will remove everything from v2
+ self.assertNotIn("v2", region_url, "Can't get region url without v2 info")
+
+ def test_get_service_region_endpoints(self):
+ region = REGIONS[REGION_NAME]
+ result = self.api_access.get_service_region_endpoints(region, SERVICE_NAME)
+ self.assertNotEqual(result, None, "Can't get service endpoint")
+
+ def test_get_service_region_endpoints_with_nonexistent_service(self):
+ region = REGIONS[REGION_NAME]
+ result = self.api_access.get_service_region_endpoints(region, ERROR_SERVICE_NAME)
+ self.assertIs(result, None, "Can't get None when the service name " +
+ "doesn't exist in region's services")
diff --git a/app/test/fetch/api_fetch/test_api_fetch_availability_zone.py b/app/test/fetch/api_fetch/test_api_fetch_availability_zone.py
new file mode 100644
index 0000000..f32be36
--- /dev/null
+++ b/app/test/fetch/api_fetch/test_api_fetch_availability_zone.py
@@ -0,0 +1,72 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+from discover.fetchers.api.api_fetch_availability_zones import ApiFetchAvailabilityZones
+from test.fetch.test_fetch import TestFetch
+from test.fetch.api_fetch.test_data.api_fetch_availability_zones import *
+from unittest.mock import MagicMock
+from test.fetch.api_fetch.test_data.token import TOKEN
+
+
+class TestApiFetchAvailabilityZones(TestFetch):
+
+ def setUp(self):
+ self.configure_environment()
+ ApiFetchAvailabilityZones.v2_auth_pwd = MagicMock(return_value=TOKEN)
+ self.fetcher = ApiFetchAvailabilityZones()
+ self.set_regions_for_fetcher(self.fetcher)
+
+ def test_get_for_region(self):
+ # mock the endpoint url
+ self.fetcher.get_region_url_nover = MagicMock(return_value=ENDPOINT)
+ # mock the response from OpenStack Api
+ self.fetcher.get_url = MagicMock(return_value=AVAILABILITY_ZONE_RESPONSE)
+
+ result = self.fetcher.get_for_region(PROJECT, REGION_NAME, TOKEN)
+ self.assertNotEqual(result, [], "Can't get availability zone info")
+
+ def test_get_for_region_with_wrong_response(self):
+ # mock the endpoint url
+ self.fetcher.get_region_url_nover = MagicMock(return_value=ENDPOINT)
+ # mock the wrong response from OpenStack Api
+ self.fetcher.get_url = MagicMock(return_value=WRONG_RESPONSE)
+
+ result = self.fetcher.get_for_region(PROJECT, REGION_NAME, TOKEN)
+ self.assertEqual(result, [], "Can't get [] when the response is wrong")
+
+ def test_get_for_region_without_avz_response(self):
+ # mock the endpoint url
+ self.fetcher.get_region_url_nover = MagicMock(return_value=ENDPOINT)
+ # mock the response from OpenStack Api
+ # the response doesn't contain availability zone info
+ self.fetcher.get_url = MagicMock(return_value=RESPONSE_WITHOUT_AVAILABILITY_ZONE)
+
+ result = self.fetcher.get_for_region(PROJECT, REGION_NAME, TOKEN)
+ self.assertEqual(result, [], "Can't get [] when the response doesn't " +
+ "contain availability zone")
+
+ def test_get(self):
+ # store original get_for_region method
+ original_method = self.fetcher.get_for_region
+ # mock the result from get_for_region method
+ self.fetcher.get_for_region = MagicMock(return_value=GET_REGION_RESULT)
+
+ result = self.fetcher.get(PROJECT)
+
+ # reset get_for_region method
+ self.fetcher.get_for_region = original_method
+
+ self.assertNotEqual(result, [], "Can't get availability zone info")
+
+ def test_get_without_token(self):
+ # mock the empty token
+ self.fetcher.v2_auth_pwd = MagicMock(return_value=None)
+ result = self.fetcher.get(PROJECT)
+ self.fetcher.v2_auth_pwd = MagicMock(return_value=TOKEN)
+ self.assertEqual(result, [], "Can't get [] when the token is invalid")
diff --git a/app/test/fetch/api_fetch/test_api_fetch_host_instances.py b/app/test/fetch/api_fetch/test_api_fetch_host_instances.py
new file mode 100644
index 0000000..c1c7b6e
--- /dev/null
+++ b/app/test/fetch/api_fetch/test_api_fetch_host_instances.py
@@ -0,0 +1,83 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+from discover.fetchers.api.api_fetch_host_instances import ApiFetchHostInstances
+from test.fetch.test_fetch import TestFetch
+from test.fetch.api_fetch.test_data.api_fetch_host_instances import *
+from test.fetch.api_fetch.test_data.token import TOKEN
+from unittest.mock import MagicMock
+
+
+class TestApiFetchHostInstances(TestFetch):
+
+ def setUp(self):
+ self.configure_environment()
+ ApiFetchHostInstances.v2_auth_pwd = MagicMock(return_value=TOKEN)
+ self.fetcher = ApiFetchHostInstances()
+ self.set_regions_for_fetcher(self.fetcher)
+
+ def test_get_projects(self):
+ # mock the projects got from the database
+ self.fetcher.inv.get = MagicMock(return_value=PROJECT_LIST)
+
+ self.fetcher.get_projects()
+ self.assertNotEqual(self.fetcher.projects, None, "Can't get projects info")
+
+ def test_get_instances_from_api(self):
+ self.fetcher.inv.get = MagicMock(return_value=PROJECT_LIST)
+ # mock the response from the OpenStack Api
+ self.fetcher.get_url = MagicMock(return_value=GET_SERVERS_RESPONSE)
+
+ result = self.fetcher.get_instances_from_api(HOST_NAME)
+ self.assertEqual(result, GET_INSTANCES_FROM_API, "Can't get correct " +
+ "instances info")
+
+ def test_get_instances_from_api_with_wrong_auth(self):
+ self.fetcher.v2_auth_pwd = MagicMock(return_value=None)
+
+ result = self.fetcher.get_instances_from_api(HOST_NAME)
+ self.assertEqual(result, [], "Can't get [] when the token is invalid")
+
+ def test_get_instances_from_api_without_hypervisors_in_res(self):
+ # mock the response without hypervisors info from OpenStack Api
+ self.fetcher.get_url = MagicMock(return_value=RESPONSE_WITHOUT_HYPERVISORS)
+
+ result = self.fetcher.get_instances_from_api(HOST_NAME)
+ self.assertEqual(result, [], "Can't get [] when the response doesn't " +
+ "contain hypervisors info")
+
+ def test_get_instances_from_api_without_servers_in_res(self):
+ # mock the response without servers info from OpenStack Api
+ self.fetcher.get_url = MagicMock(return_value=RESPONSE_WITHOUT_SERVERS)
+
+ result = self.fetcher.get_instances_from_api(HOST_NAME)
+ self.assertEqual(result, [], "Can't get [] when the response doesn't " +
+ "contain servers info")
+
+ def test_get(self):
+ self.fetcher.inv.get = MagicMock(return_value=PROJECT_LIST)
+ self.fetcher.inv.get_by_id = MagicMock(return_value=HOST)
+
+ original_method = self.fetcher.get_instances_from_api
+ self.fetcher.get_instances_from_api = MagicMock(return_value=
+ GET_INSTANCES_FROM_API)
+
+ self.fetcher.db_fetcher.get_instance_data = MagicMock()
+ result = self.fetcher.get(INSTANCE_FOLDER_ID)
+ self.assertNotEqual(result, [], "Can't get instances info")
+
+ self.fetcher.get_instances_from_api = original_method
+
+ def test_get_with_non_compute_node(self):
+ self.fetcher.inv.get = MagicMock(return_value=PROJECT_LIST)
+ self.fetcher.inv.get_by_id = MagicMock(return_value=NON_COMPUTE_HOST)
+
+ result = self.fetcher.get(INSTANCE_FOLDER_ID)
+ self.assertEqual(result, [], "Can't get [] when the host is " +
+ "not compute node")
diff --git a/app/test/fetch/api_fetch/test_api_fetch_networks.py b/app/test/fetch/api_fetch/test_api_fetch_networks.py
new file mode 100644
index 0000000..1dc74ce
--- /dev/null
+++ b/app/test/fetch/api_fetch/test_api_fetch_networks.py
@@ -0,0 +1,65 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+from unittest.mock import MagicMock
+from discover.fetchers.api.api_fetch_networks import ApiFetchNetworks
+from test.fetch.test_fetch import TestFetch
+from test.fetch.api_fetch.test_data.api_fetch_networks import *
+from test.fetch.api_fetch.test_data.token import TOKEN
+
+
+class TestApiFetchNetworks(TestFetch):
+
+ def setUp(self):
+ self.configure_environment()
+ ApiFetchNetworks.v2_auth_pwd = MagicMock(return_value=TOKEN)
+ self.fetcher = ApiFetchNetworks()
+ self.set_regions_for_fetcher(self.fetcher)
+
+ def test_get_networks(self):
+ self.fetcher.get_region_url_nover = MagicMock(return_value=ENDPOINT)
+ self.fetcher.get_url = MagicMock(side_effect=[NETWORKS_RESPONSE,
+ SUBNETS_RESPONSE])
+ self.fetcher.inv.get_by_id = MagicMock(return_value=PROJECT)
+ result = self.fetcher.get_networks(REGION_NAME, TOKEN)
+ self.assertEqual(result, NETWORKS_RESULT, "Can't get networks info")
+
+ def test_get_networks_with_wrong_networks_response(self):
+ self.fetcher.get_region_url_nover = MagicMock(return_value=ENDPOINT)
+ self.fetcher.get_url = MagicMock(return_value=WRONG_NETWORK_RESPONSE)
+
+ result = self.fetcher.get_networks(REGION_NAME, TOKEN)
+ self.assertEqual(result, [], "Can't get [] when the networks " +
+ "response is wrong")
+
+ def test_get_networks_with_wrong_subnet_response(self):
+ self.fetcher.get_region_url_nover = MagicMock(return_value=ENDPOINT)
+ self.fetcher.get_url = MagicMock(side_effect=[NETWORKS_RESPONSE,
+ WRONG_SUBNETS_RESPONSE])
+ self.fetcher.inv.get_by_id = MagicMock(return_value=PROJECT)
+
+ result = self.fetcher.get_networks(REGION_NAME, TOKEN)
+
+ self.assertNotEqual(result, [], "Can't get networks info when the " +
+ "subnet response is wrong")
+
+ def test_get(self):
+ original_method = self.fetcher.get_networks
+ self.fetcher.get_networks = MagicMock(return_value=NETWORKS_RESULT)
+ result = self.fetcher.get(REGION_NAME)
+
+ self.fetcher.get_networks = original_method
+ self.assertEqual(result, NETWORKS_RESULT, "Can't get region networks info")
+
+ def test_get_with_wrong_token(self):
+ self.fetcher.v2_auth_pwd = MagicMock(return_value=None)
+ result = self.fetcher.get(REGION_NAME)
+ self.fetcher.v2_auth_pwd = MagicMock(return_value=TOKEN)
+ self.assertEqual(result, [], "Can't get [] when the " +
+ "token is invalid")
diff --git a/app/test/fetch/api_fetch/test_api_fetch_ports.py b/app/test/fetch/api_fetch/test_api_fetch_ports.py
new file mode 100644
index 0000000..ad79757
--- /dev/null
+++ b/app/test/fetch/api_fetch/test_api_fetch_ports.py
@@ -0,0 +1,89 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+from discover.fetchers.api.api_fetch_ports import ApiFetchPorts
+from test.fetch.test_fetch import TestFetch
+from test.fetch.api_fetch.test_data.api_fetch_ports import *
+from test.fetch.api_fetch.test_data.token import TOKEN
+from unittest.mock import MagicMock
+
+
+class TestApiFetchPorts(TestFetch):
+
+ def setUp(self):
+ self.configure_environment()
+ ApiFetchPorts.v2_auth_pwd = MagicMock(return_value=TOKEN)
+ self.fetcher = ApiFetchPorts()
+ self.set_regions_for_fetcher(self.fetcher)
+
+ def check_get_ports_for_region_result_is_correct(self, network,
+ tenant,
+ port_response,
+ expected_result,
+ error_msg):
+ self.fetcher.get_region_url = MagicMock(return_value=ENDPOINT)
+ self.fetcher.get_url = MagicMock(return_value=port_response)
+ self.fetcher.inv.get_by_id = MagicMock(side_effect=[network, tenant])
+
+ result = self.fetcher.get_ports_for_region(REGION_NAME, TOKEN)
+ self.assertEqual(result, expected_result, error_msg)
+
+ def test_get_ports_for_region(self):
+ test_cases = [
+ {
+ "network": NETWORK,
+ "tenant": None,
+ "port_response": PORTS_RESPONSE,
+ "expected_result": PORTS_RESULT_WITH_NET,
+ "error_msg": "Can't get correct ports info "
+ "when network of the port exists"
+ },
+ {
+ "network": None,
+ "tenant": None,
+ "port_response": PORTS_RESPONSE,
+ "expected_result": PORTS_RESULT_WITHOUT_NET,
+ "error_msg": "Can't get correct ports info "
+ "when network of the port doesn't exists"
+ },
+ {
+ "network": NETWORK,
+ "tenant": TENANT,
+ "port_response": PORTS_RESPONSE,
+ "expected_result": PORTS_RESULT_WITH_PROJECT,
+ "error_msg": "Can't get correct ports info "
+ "when project of the port exists"
+ },
+ {
+ "network": None,
+ "tenant": None,
+ "port_response": ERROR_PORTS_RESPONSE,
+ "expected_result": [],
+ "error_msg": "Can't get [] when ports response is wrong"
+ },
+ ]
+ for test_case in test_cases:
+ self.check_get_ports_for_region_result_is_correct(test_case["network"],
+ test_case["tenant"],
+ test_case["port_response"],
+ test_case["expected_result"],
+ test_case["error_msg"])
+
+ def test_get(self):
+ original_method = self.fetcher.get_ports_for_region
+ self.fetcher.get_ports_for_region = MagicMock(return_value=PORTS_RESULT_WITH_NET)
+ result = self.fetcher.get(REGION_NAME)
+ self.fetcher.get_ports_for_region = original_method
+ self.assertEqual(result, PORTS_RESULT_WITH_NET, "Can't get correct ports info")
+
+ def test_get_with_wrong_token(self):
+ self.fetcher.v2_auth_pwd = MagicMock(return_value=None)
+ result = self.fetcher.get(REGION_NAME)
+ self.fetcher.v2_auth_pwd = MagicMock(return_value=TOKEN)
+ self.assertEqual(result, [], "Can't get [] when the token is invalid")
diff --git a/app/test/fetch/api_fetch/test_api_fetch_project_hosts.py b/app/test/fetch/api_fetch/test_api_fetch_project_hosts.py
new file mode 100644
index 0000000..7cedf67
--- /dev/null
+++ b/app/test/fetch/api_fetch/test_api_fetch_project_hosts.py
@@ -0,0 +1,137 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+from unittest.mock import MagicMock
+from discover.fetchers.api.api_fetch_project_hosts import ApiFetchProjectHosts
+from test.fetch.test_fetch import TestFetch
+from test.fetch.api_fetch.test_data.api_fetch_host_project_hosts import *
+from test.fetch.api_fetch.test_data.token import TOKEN
+from test.fetch.api_fetch.test_data.regions import REGIONS
+
+
+class TestApiFetchProjectHosts(TestFetch):
+
+ def setUp(self):
+ self.configure_environment()
+ ApiFetchProjectHosts.v2_auth_pwd = MagicMock(return_value=TOKEN)
+ self.fetcher = ApiFetchProjectHosts()
+ self.set_regions_for_fetcher(self.fetcher)
+ self.region = REGIONS[REGION_NAME]
+
+ def test_add_host_type_with_nonexistent_type(self):
+ # clear host type
+ HOST_DOC["host_type"] = []
+ self.fetcher.add_host_type(HOST_DOC, NONEXISTENT_TYPE, HOST_ZONE)
+ self.assertIn(NONEXISTENT_TYPE, HOST_DOC["host_type"], "Can't put nonexistent " +
+ "type in host_type")
+
+ def test_add_host_type_with_existent_host_type(self):
+ # add nonexistent host type to host type
+ HOST_DOC["host_type"] = [NONEXISTENT_TYPE]
+ # try to add existing host type
+ self.fetcher.add_host_type(HOST_DOC, NONEXISTENT_TYPE, HOST_ZONE)
+ self.assertEqual(len(HOST_DOC['host_type']), 1, "Add duplicate host type")
+
+ def test_add_compute_host_type(self):
+ HOST_DOC['host_type'] = []
+ # clear zone
+ HOST_DOC['zone'] = None
+ # add compute host type
+ self.fetcher.add_host_type(HOST_DOC, COMPUTE_TYPE, HOST_ZONE)
+ # for compute host type, zone information will be added
+ self.assertEqual(HOST_DOC['zone'], HOST_ZONE, "Can't update zone " +
+ "name for compute node")
+ self.assertEqual(HOST_DOC['parent_id'], HOST_ZONE, "Can't update parent_id " +
+ "for compute node")
+
+ def test_fetch_compute_node_ip_address(self):
+ # mock ip address information fetched from DB
+ self.fetcher.get_objects_list_for_id = MagicMock(return_value=IP_ADDRESS_RESPONSE)
+
+ self.fetcher.fetch_compute_node_ip_address(HOST_TO_BE_FETCHED_IP,
+ HOST_TO_BE_FETCHED_IP["host"])
+ self.assertIn("ip_address", HOST_TO_BE_FETCHED_IP, "Can't update ip address " +
+ "for the compute host")
+
+ def test_fetch_network_node_details(self):
+ # mock NETWORKS_DETAILS_RESPONSE fetched from DB
+ self.fetcher.get_objects_list = MagicMock(return_value=NETWORKS_DETAILS_RESPONSE)
+
+ self.fetcher.fetch_network_node_details(HOSTS_TO_BE_FETCHED_NETWORK_DETAILS)
+ # get the network node document from HOSTS
+ NETWORK_NODE_DOC = [doc for doc in HOSTS_TO_BE_FETCHED_NETWORK_DETAILS
+ if doc['host'] == HOST_NAME][0]
+ # check if the network node document has been updated
+ self.assertIn("Network", NETWORK_NODE_DOC['host_type'], "Can't put Network in " +
+ "the network node host_type")
+ self.assertIn("config", NETWORK_NODE_DOC, "Can't put config in the network node")
+
+ def test_get_host_details(self):
+ # test node have nova-conductor attribute, controller type will be added
+ result = self.fetcher.get_host_details(AVAILABILITY_ZONE, HOST_NAME)
+ self.assertIn("Controller", result['host_type'], "Can't put controller type " +
+ "in the compute node host_type")
+
+ def test_get_hosts_from_az(self):
+ result = self.fetcher.get_hosts_from_az(AVAILABILITY_ZONE)
+ self.assertNotEqual(result, [], "Can't get hosts information from "
+ "availability zone")
+
+ def test_get_for_region(self):
+ # mock region url for nova node
+ self.fetcher.get_region_url = MagicMock(return_value=REGION_URL)
+ # mock the response from OpenStack Api
+ side_effect = [AVAILABILITY_ZONE_RESPONSE, HYPERVISORS_RESPONSE]
+ self.fetcher.get_url = MagicMock(side_effect=side_effect)
+
+ result = self.fetcher.get_for_region(self.region, TOKEN)
+ self.assertNotEqual(result, [], "Can't get hosts information for region")
+
+ def test_get_for_region_without_token(self):
+ self.fetcher.get_region_url = MagicMock(return_value=REGION_URL)
+ result = self.fetcher.get_for_region(self.region, None)
+ self.assertEqual(result, [], "Can't get [] when the token is invalid")
+
+ def test_get_for_region_with_error_availability_response(self):
+ self.fetcher.get_region_url = MagicMock(return_value=REGION_URL)
+ # mock error availability zone response from OpenStack Api
+ side_effect = [AVAILABILITY_ERROR_RESPONSE, None]
+ self.fetcher.get_url = MagicMock(side_effect=side_effect)
+
+ result = self.fetcher.get_for_region(self.region, TOKEN)
+ self.assertEqual(result, [], "Can't get [] when the response is wrong")
+
+ def test_get_for_region_with_error_hypervisors_response(self):
+ self.fetcher.get_region_url = MagicMock(return_value=REGION_URL)
+ # mock error hypervisors response from OpenStack Api
+ side_effect = [AVAILABILITY_ZONE_RESPONSE, HYPERVISORS_ERROR_RESPONSE]
+ self.fetcher.get_url = MagicMock(side_effect=side_effect)
+
+ result = self.fetcher.get_for_region(self.region, TOKEN)
+ self.assertNotEqual(result, [], "Can't get hosts information when " +
+ "the hypervisors response is wrong")
+
+ def test_get(self):
+ original_method = self.fetcher.get_for_region
+ self.fetcher.get_for_region = MagicMock(return_value=GET_FOR_REGION_INFO)
+
+ result = self.fetcher.get(PROJECT_NAME)
+
+ self.fetcher.get_for_region = original_method
+
+ self.assertNotEqual(result, [], "Can't get hosts info for the project")
+
+ def test_get_with_wrong_project_name(self):
+ result = self.fetcher.get(TEST_PROJECT_NAME)
+ self.assertEqual(result, [], "Can't get [] when the project name is not admin")
+
+ def test_get_with_wrong_token(self):
+ self.fetcher.v2_auth_pwd = MagicMock(return_value=[])
+ result = self.fetcher.get(PROJECT_NAME)
+ self.assertEqual(result, [], "Can't get [] when the token is invalid")
diff --git a/app/test/fetch/api_fetch/test_api_fetch_projects.py b/app/test/fetch/api_fetch/test_api_fetch_projects.py
new file mode 100644
index 0000000..1db4237
--- /dev/null
+++ b/app/test/fetch/api_fetch/test_api_fetch_projects.py
@@ -0,0 +1,120 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+from unittest.mock import MagicMock
+from discover.fetchers.api.api_fetch_projects import ApiFetchProjects
+from test.fetch.test_fetch import TestFetch
+from test.fetch.api_fetch.test_data.api_fetch_projects import *
+from test.fetch.api_fetch.test_data.regions import REGIONS
+from test.fetch.api_fetch.test_data.token import TOKEN
+
+
+class TestApiFetchProjects(TestFetch):
+
+ def setUp(self):
+ self.configure_environment()
+ ApiFetchProjects.v2_auth_pwd = MagicMock(return_value=TOKEN)
+ self.fetcher = ApiFetchProjects()
+ self.set_regions_for_fetcher(self.fetcher)
+ self.region = REGIONS[REGION_NAME]
+ self.fetcher.get_region_url_nover = MagicMock(return_value=REGION_URL_NOVER)
+
+ def test_get_for_region(self):
+ # mock request endpoint
+ self.fetcher.get_region_url_nover = MagicMock(return_value=REGION_URL_NOVER)
+ self.fetcher.get_url = MagicMock(return_value=REGION_RESPONSE)
+
+ result = self.fetcher.get_for_region(self.region, TOKEN)
+ self.assertEqual(result, REGION_RESULT, "Can't get correct projects info")
+
+ # TODO does this test case make sense?
+ def test_get_for_region_with_error_region_response(self):
+ self.fetcher.get_region_url_nover = MagicMock(return_value=REGION_URL_NOVER)
+ self.fetcher.get_url = MagicMock(return_value=REGION_ERROR_RESPONSE)
+
+ result = self.fetcher.get_for_region(self.region, TOKEN)
+ self.assertEqual(result, [], "Can't get [] when the " +
+ "region response is wrong")
+
+ def test_get_projects_for_api_user(self):
+ # mock the responses from OpenStack Api
+ self.fetcher.get_url = MagicMock(return_value=PROJECTS_CORRECT_RESPONSE)
+
+ result = self.fetcher.get_projects_for_api_user(self.region, TOKEN)
+ self.assertEqual(result, PROJECT_RESULT, "Can't get correct " +
+ "projects info for api user")
+
+ def test_get_projects_for_api_user_without_projects_response(self):
+ # the projects info from OpenStack Api will be None
+ self.fetcher.get_url = MagicMock(return_value=
+ PROJECTS_RESPONSE_WITHOUT_PROJECTS)
+
+ result = self.fetcher.get_projects_for_api_user(self.region, TOKEN)
+ self.assertIs(result, None, "Can't get None when the project " +
+ "response doesn't contain projects info")
+
+ def check_get_result(self, projects_for_api_user,
+ region_result,
+ token,
+ expected_result, error_msg):
+ self.fetcher.get_projects_for_api_user = MagicMock(return_value=
+ projects_for_api_user)
+ original_method = self.fetcher.get_for_region
+ # mock
+ self.fetcher.get_for_region = MagicMock(return_value=region_result)
+ self.fetcher.v2_auth_pwd = MagicMock(return_value=token)
+
+ result = self.fetcher.get(PROJECT_ID)
+
+ self.fetcher.get_for_region = original_method
+ self.assertEqual(result, expected_result, error_msg)
+
+ def test_get(self):
+ # test get method with different test cases
+ test_cases = [
+ {
+ "projects": PROJECT_RESULT,
+ "regions": REGION_RESULT,
+ "token": TOKEN,
+ "expected_result": REGION_RESULT,
+ "err_msg": "Can't get correct project result"
+ },
+ {
+ "projects": PROJECT_RESULT,
+ "regions": REGION_RESULT_WITH_NON_USER_PROJECT,
+ "token": TOKEN,
+ "expected_result": REGION_RESULT,
+ "err_msg": "Can't get correct project result" +
+ "when the region result contains project " +
+ "that doesn't belong to the user"
+ },
+ {
+ "projects": PROJECT_RESULT,
+ "regions": REGION_RESULT,
+ "token": None,
+ "expected_result": [],
+ "err_msg": "Can't get [] when the token is invalid"
+ },
+ {
+ "projects": None,
+ "regions": REGION_RESULT,
+ "token": TOKEN,
+ "expected_result": REGION_RESULT,
+ "err_msg": "Can't get the region " +
+ "result if the projects " +
+ "for the user doesn't exist"
+ }
+ ]
+
+ for test_case in test_cases:
+ self.check_get_result(test_case["projects"],
+ test_case["regions"],
+ test_case["token"],
+ test_case["expected_result"],
+ test_case["err_msg"])
diff --git a/app/test/fetch/api_fetch/test_api_fetch_regions.py b/app/test/fetch/api_fetch/test_api_fetch_regions.py
new file mode 100644
index 0000000..1ff7999
--- /dev/null
+++ b/app/test/fetch/api_fetch/test_api_fetch_regions.py
@@ -0,0 +1,41 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+from discover.fetchers.api.api_access import ApiAccess
+from discover.fetchers.api.api_fetch_regions import ApiFetchRegions
+from test.fetch.test_fetch import TestFetch
+from test.fetch.api_fetch.test_data.api_fetch_regions import *
+from test.fetch.api_fetch.test_data.token import TOKEN
+from unittest.mock import MagicMock
+
+
+class TestApiFetchRegions(TestFetch):
+
+ def setUp(self):
+ ApiFetchRegions.v2_auth_pwd = MagicMock(return_value=TOKEN)
+ self.configure_environment()
+
+ def test_get(self):
+ fetcher = ApiFetchRegions()
+ fetcher.set_env(ENV)
+
+ ApiAccess.auth_response = AUTH_RESPONSE
+ ret = fetcher.get("test_id")
+ self.assertEqual(ret, REGIONS_RESULT,
+ "Can't get correct regions information")
+
+ def test_get_without_token(self):
+ fetcher = ApiFetchRegions()
+ fetcher.v2_auth_pwd = MagicMock(return_value=[])
+ fetcher.set_env(ENV)
+
+ ret = fetcher.get("test_id")
+
+ ApiFetchRegions.v2_auth_pwd = MagicMock(return_value=TOKEN)
+ self.assertEqual(ret, [], "Can't get [] when the token is invalid")
diff --git a/app/test/fetch/api_fetch/test_data/__init__.py b/app/test/fetch/api_fetch/test_data/__init__.py
new file mode 100644
index 0000000..b0637e9
--- /dev/null
+++ b/app/test/fetch/api_fetch/test_data/__init__.py
@@ -0,0 +1,9 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
diff --git a/app/test/fetch/api_fetch/test_data/api_access.py b/app/test/fetch/api_fetch/test_data/api_access.py
new file mode 100644
index 0000000..2181c48
--- /dev/null
+++ b/app/test/fetch/api_fetch/test_data/api_access.py
@@ -0,0 +1,55 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+from datetime import datetime, timedelta
+
+
+TIME_WITH_DOT = "2016-10-19T23:21:09.418406Z"
+TIME_WITHOUT_DOT = "2016-10-19T23:21:09Z"
+ILLEGAL_TIME = "23243423"
+TEST_PROJECT = "test"
+PROJECT = "admin"
+TEST_URL = "test_url"
+TEST_HEADER = "test_headers"
+TEST_BODY = "test_body"
+
+RESPONSE = {
+ 'server': 'Apache',
+ 'vary': 'X-Auth-Token',
+ 'content-type': 'application/json',
+ 'date': 'Wed, 19 Oct 2016 23:15:36 GMT',
+ 'content-length': '4876',
+ 'x-openstack-request-id': 'req-01cda259-7f60-4440-99a0-508fed90f815',
+ 'connection': 'close',
+ 'status': '200'
+}
+ERROR_RESPONSE = {
+ 'connection': 'close',
+ 'status': '400'
+}
+GET_CONTENT = b'{"text":"test"}'
+CORRECT_AUTH_CONTENT = b'{"access": {"token": {"issued_at": "2016-10-21T23:49:50.000000Z", "expires": "2016-10-22T00:49:50.445603Z", "id": "gAAAAABYCqme1l0qCm6mi3jON4ElweTkhZjGXZ_bYuxLHZGGXgO3T_JLnxKJ7KbK4xA8KjQ-DQe2trDncKQA0M-yeX167wT0xO_rjqqcCA19JV-EeXFfx7QOukkt8eC4pfK1r8Dc_kvBc-bwAemjZ1IvPGu5Nd2f0ktGWre0Qqzbg9QGtCEJUe8", "tenant": {"is_domain": false, "description": "admin tenant", "enabled": true, "id": "8c1751e0ce714736a63fee3c776164da", "parent_id": null, "name": "admin"}, "audit_ids": ["8BvzDPpyRBmeJho-FzKuGA"]}, "serviceCatalog": [{"endpoints": [{"adminURL": "http://192.168.0.2:8774/v2/8c1751e0ce714736a63fee3c776164da", "region": "RegionOne", "internalURL": "http://192.168.0.2:8774/v2/8c1751e0ce714736a63fee3c776164da", "id": "274cbbd9fd6d4311b78e78dd3a1df51f", "publicURL": "http://172.16.0.3:8774/v2/8c1751e0ce714736a63fee3c776164da"}], "endpoints_links": [], "type": "compute", "name": "nova"}, {"endpoints": [{"adminURL": "http://192.168.0.2:9696", "region": "RegionOne", "internalURL": "http://192.168.0.2:9696", "id": "8dc28584da224c4b9671171ead3c982a", "publicURL": "http://172.16.0.3:9696"}], "endpoints_links": [], "type": "network", "name": "neutron"}, {"endpoints": [{"adminURL": "http://192.168.0.2:8776/v2/8c1751e0ce714736a63fee3c776164da", "region": "RegionOne", "internalURL": "http://192.168.0.2:8776/v2/8c1751e0ce714736a63fee3c776164da", "id": "2c30937688e944889db4a64fab6816e6", "publicURL": "http://172.16.0.3:8776/v2/8c1751e0ce714736a63fee3c776164da"}], "endpoints_links": [], "type": "volumev2", "name": "cinderv2"}, {"endpoints": [{"adminURL": "http://192.168.0.2:8774/v3", "region": "RegionOne", "internalURL": "http://192.168.0.2:8774/v3", "id": "1df917160dfb4ce5b469764fde22b3ab", "publicURL": "http://172.16.0.3:8774/v3"}], "endpoints_links": [], "type": "computev3", "name": "novav3"}, {"endpoints": [{"adminURL": "http://192.168.0.2:8080", "region": "RegionOne", "internalURL": "http://192.168.0.2:8080", "id": "4f655c8f2bef46a0a7ba4a20bba53666", "publicURL": "http://172.16.0.3:8080"}], "endpoints_links": [], "type": "s3", "name": "swift_s3"}, {"endpoints": [{"adminURL": "http://192.168.0.2:9292", "region": "RegionOne", "internalURL": "http://192.168.0.2:9292", "id": "475c6c77a94e4e63a5a0f0e767f697a8", "publicURL": "http://172.16.0.3:9292"}], "endpoints_links": [], "type": "image", "name": "glance"}, {"endpoints": [{"adminURL": "http://192.168.0.2:8777", "region": "RegionOne", "internalURL": "http://192.168.0.2:8777", "id": "617177a3dcb64560a5a79ab0a91a7225", "publicURL": "http://172.16.0.3:8777"}], "endpoints_links": [], "type": "metering", "name": "ceilometer"}, {"endpoints": [{"adminURL": "http://192.168.0.2:8000/v1", "region": "RegionOne", "internalURL": "http://192.168.0.2:8000/v1", "id": "0f04ec6ed49f4940822161bf677bdfb2", "publicURL": "http://172.16.0.3:8000/v1"}], "endpoints_links": [], "type": "cloudformation", "name": "heat-cfn"}, {"endpoints": [{"adminURL": "http://192.168.0.2:8776/v1/8c1751e0ce714736a63fee3c776164da", "region": "RegionOne", "internalURL": "http://192.168.0.2:8776/v1/8c1751e0ce714736a63fee3c776164da", "id": "05643f2cf9094265b432376571851841", "publicURL": "http://172.16.0.3:8776/v1/8c1751e0ce714736a63fee3c776164da"}], "endpoints_links": [], "type": "volume", "name": "cinder"}, {"endpoints": [{"adminURL": "http://192.168.0.2:8773/services/Admin", "region": "RegionOne", "internalURL": "http://192.168.0.2:8773/services/Cloud", "id": "390dddc753cc4d378b489129d06c4b7d", "publicURL": "http://172.16.0.3:8773/services/Cloud"}], "endpoints_links": [], "type": "ec2", "name": "nova_ec2"}, {"endpoints": [{"adminURL": "http://192.168.0.2:8004/v1/8c1751e0ce714736a63fee3c776164da", "region": "RegionOne", "internalURL": "http://192.168.0.2:8004/v1/8c1751e0ce714736a63fee3c776164da", "id": "9e60268a5aaf422d9e42f0caab0a19b4", "publicURL": "http://172.16.0.3:8004/v1/8c1751e0ce714736a63fee3c776164da"}], "endpoints_links": [], "type": "orchestration", "name": "heat"}, {"endpoints": [{"adminURL": "http://192.168.0.2:8080/v1/AUTH_8c1751e0ce714736a63fee3c776164da", "region": "RegionOne", "internalURL": "http://192.168.0.2:8080/v1/AUTH_8c1751e0ce714736a63fee3c776164da", "id": "12e78e06595f48339baebdb5d4309c70", "publicURL": "http://172.16.0.3:8080/v1/AUTH_8c1751e0ce714736a63fee3c776164da"}], "endpoints_links": [], "type": "object-store", "name": "swift"}, {"endpoints": [{"adminURL": "http://192.168.0.2:35357/v2.0", "region": "RegionOne", "internalURL": "http://192.168.0.2:5000/v2.0", "id": "404cceb349614eb39857742970408301", "publicURL": "http://172.16.0.3:5000/v2.0"}], "endpoints_links": [], "type": "identity", "name": "keystone"}], "user": {"username": "admin", "roles_links": [], "name": "admin", "roles": [{"id": "888bdf92213a477ba9f10554bc382e57", "name": "admin"}], "enabled": true, "email": "admin@localhost", "id": "13baa553aae44adca6615e711fd2f6d9"}, "metadata": {"is_admin": 0, "roles": []}}}'
+ERROR_AUTH_CONTENT = b'{"access": {}}'
+ERROR_TOKEN_CONTENT = b'{"error":{"code":"code","title":"title","message":"message",", URL":"URL"},"access": {}}'
+
+VALID_TOKENS = {
+ PROJECT: {
+ # make sure the expired time of the token is later than now
+ "expires": (datetime.now() + timedelta(1)).strftime("%Y-%m-%dT%H:%M:%SZ")
+ }
+}
+
+EMPTY_TOKENS = {}
+
+REGION_NAME = "RegionOne"
+ERROR_REGION_NAME = "ERROR"
+SERVICE_NAME = "nova"
+ERROR_SERVICE_NAME = "ERROR"
+
+REGION_URL = "http://10.56.20.239:8774/v2/329e0576da594c62a911d0dccb1238a7"
diff --git a/app/test/fetch/api_fetch/test_data/api_fetch_availability_zones.py b/app/test/fetch/api_fetch/test_data/api_fetch_availability_zones.py
new file mode 100644
index 0000000..f6e717c
--- /dev/null
+++ b/app/test/fetch/api_fetch/test_data/api_fetch_availability_zones.py
@@ -0,0 +1,71 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+AVAILABILITY_ZONE_RESPONSE = {
+ "availabilityZoneInfo": [
+ {
+ "hosts": {
+ "node-6.cisco.com": {
+ }
+ },
+ "zoneName": "internal",
+ "zoneState": {
+ "available": True
+ }
+ },
+ {
+ "hosts": {
+ "node-5.cisco.com": {
+ }
+ },
+ "zoneName": "osdna-zone",
+ "zoneState": {
+ "available": True
+ }
+ }
+ ]
+}
+GET_REGION_RESULT = [
+ {
+ "available": True,
+ "hosts": {
+ "node-6.cisco.com": {
+ }
+ },
+ "id": "internal",
+ "master_parent_id": "RegionOne",
+ "master_parent_type": "region",
+ "name": "internal",
+ "parent_id": "RegionOne-availability_zones",
+ "parent_text": "Availability Zones",
+ "parent_type": "availability_zones_folder"
+ },
+ {
+ "available": True,
+ "hosts": {
+ "node-5.cisco.com": {
+ }
+ },
+ "id": "osdna-zone",
+ "master_parent_id": "RegionOne",
+ "master_parent_type": "region",
+ "name": "osdna-zone",
+ "parent_id": "RegionOne-availability_zones",
+ "parent_text": "Availability Zones",
+ "parent_type": "availability_zones_folder"
+ }
+]
+RESPONSE_WITHOUT_AVAILABILITY_ZONE = {"text": "test"}
+WRONG_RESPONSE = {"status": 400}
+EMPTY_AVAILABILITY_ZONE_RESPONSE = {
+ "availabilityZoneInfo": []
+}
+ENDPOINT = "http://10.56.20.239:8774"
+PROJECT = "admin"
+REGION_NAME = "RegionOne"
diff --git a/app/test/fetch/api_fetch/test_data/api_fetch_host_instances.py b/app/test/fetch/api_fetch/test_data/api_fetch_host_instances.py
new file mode 100644
index 0000000..d6f8ea6
--- /dev/null
+++ b/app/test/fetch/api_fetch/test_data/api_fetch_host_instances.py
@@ -0,0 +1,85 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+PROJECT_LIST = [
+ {
+ 'name': 'OSDNA-project',
+ },
+ {
+ 'name': 'admin',
+ }
+]
+
+HOST = {
+ 'host_type': ['Compute'],
+}
+
+NON_COMPUTE_HOST = {
+ 'host_type': [],
+}
+
+HOST_NAME = "node-5.cisco.com"
+
+GET_INSTANCES_FROM_API = [
+ {
+ "host": "node-5.cisco.com",
+ "id": "6f29c867-9150-4533-8e19-70d749b172fa",
+ "local_name": "instance-00000002",
+ "uuid": "6f29c867-9150-4533-8e19-70d749b172fa"
+ },
+ {
+ "host": "node-5.cisco.com",
+ "id": "79e20dbf-a46d-46ee-870b-e0c9f7b357d9",
+ "local_name": "instance-0000001c",
+ "uuid": "79e20dbf-a46d-46ee-870b-e0c9f7b357d9"
+ },
+ {
+ "host": "node-5.cisco.com",
+ "id": "bf0cb914-b316-486c-a4ce-f22deb453c52",
+ "local_name": "instance-00000026",
+ "uuid": "bf0cb914-b316-486c-a4ce-f22deb453c52"
+ }
+]
+
+GET_SERVERS_RESPONSE = {
+ "hypervisors": [
+ {
+ "hypervisor_hostname": "node-5.cisco.com",
+ "id": 1,
+ "servers": [
+ {
+ "name": "instance-00000002",
+ "uuid": "6f29c867-9150-4533-8e19-70d749b172fa"
+ },
+ {
+ "name": "instance-0000001c",
+ "uuid": "79e20dbf-a46d-46ee-870b-e0c9f7b357d9"
+ },
+ {
+ "name": "instance-00000026",
+ "uuid": "bf0cb914-b316-486c-a4ce-f22deb453c52"
+ }
+ ]
+ }
+ ]
+}
+
+RESPONSE_WITHOUT_HYPERVISORS = {
+ "text": "test"
+}
+
+RESPONSE_WITHOUT_SERVERS = {
+ "hypervisors": [
+ {
+
+ }
+ ]
+}
+
+INSTANCE_FOLDER_ID = "node-5.cisco.com-instances"
diff --git a/app/test/fetch/api_fetch/test_data/api_fetch_host_project_hosts.py b/app/test/fetch/api_fetch/test_data/api_fetch_host_project_hosts.py
new file mode 100644
index 0000000..3ef1ac7
--- /dev/null
+++ b/app/test/fetch/api_fetch/test_data/api_fetch_host_project_hosts.py
@@ -0,0 +1,225 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+HOST_DOC = {
+ "host": "node-6.cisco.com",
+ "host_type": [],
+ "id": "node-6.cisco.com",
+ "name": "node-6.cisco.com",
+ "parent_id": "internal",
+ "parent_type": "availability_zone",
+ "services": {
+ "nova-cert": {
+ "active": True,
+ "available": True,
+ },
+ "nova-conductor": {
+ "active": True,
+ "available": True,
+ },
+ "nova-consoleauth": {
+ "active": True,
+ "available": True,
+ },
+ "nova-scheduler": {
+ "active": True,
+ "available": True,
+ }
+ },
+ "zone": "internal"
+}
+
+NONEXISTENT_TYPE = "nova"
+COMPUTE_TYPE = "Compute"
+ZONE = "internal"
+HOST_ZONE = "Test"
+
+REGION_NAME = "RegionOne"
+TEST_PROJECT_NAME = "Test"
+PROJECT_NAME = "admin"
+
+AVAILABILITY_ZONE_RESPONSE = {
+ "availabilityZoneInfo": [
+ {
+ "hosts": {
+ "node-6.cisco.com": {
+ "nova-cert": {
+ "active": True,
+ "available": True,
+ },
+ "nova-conductor": {
+ "active": True,
+ "available": True,
+ },
+ "nova-consoleauth": {
+ "active": True,
+ "available": True,
+ },
+ "nova-scheduler": {
+ "active": True,
+ "available": True,
+ }
+ }
+ },
+ "zoneName": "internal",
+ "zoneState": {
+ "available": True
+ }
+ },
+ {
+ "hosts": {
+ "node-5.cisco.com": {
+ "nova-compute": {
+ "active": True,
+ "available": True,
+ }
+ }
+ },
+ "zoneName": "osdna-zone",
+ "zoneState": {
+ "available": True
+ }
+ }
+ ]
+}
+
+AVAILABILITY_ERROR_RESPONSE = {'status': 400}
+
+HYPERVISORS_RESPONSE = {
+ "hypervisors": []
+}
+
+HYPERVISORS_ERROR_RESPONSE = {'status': 400}
+
+HOST_TO_BE_FETCHED_IP = {
+ "host": "node-5.cisco.com",
+ "id": "node-5.cisco.com"
+}
+
+IP_ADDRESS_RESPONSE = [
+ {
+ "ip_address": "192.168.0.4"
+ }
+]
+
+HOSTS_TO_BE_FETCHED_NETWORK_DETAILS = [
+ {
+ "host": "node-6.cisco.com",
+ "host_type": [
+ "Controller"
+ ],
+ "id": "node-6.cisco.com",
+ "name": "node-6.cisco.com",
+ },
+ {
+ "host": "node-5.cisco.com",
+ "host_type": [
+ "Compute"
+ ],
+ "id": "node-5.cisco.com",
+ "name": "node-5.cisco.com",
+ }
+]
+
+NETWORKS_DETAILS_RESPONSE = [
+ {
+ "configurations": "{}",
+ "host": "node-6.cisco.com"
+ },
+ {
+ "configurations": "{}",
+ "host": "node-6.cisco.com",
+ },
+ {
+ "configurations": "{}",
+ "host": "node-6.cisco.com",
+ }
+]
+
+REGION_URL = "http://192.168.0.2:8776/v2/329e0576da594c62a911d0dccb1238a7"
+AVAILABILITY_ZONE = {
+ "hosts": {
+ "node-6.cisco.com": {
+ "nova-cert": {
+ "active": True,
+ "available": True,
+ },
+ "nova-conductor": {
+ "active": True,
+ "available": True,
+ },
+ "nova-consoleauth": {
+ "active": True,
+ "available": True,
+ },
+ "nova-scheduler": {
+ "active": True,
+ "available": True,
+ }
+ }
+ },
+ "zoneName": "internal"
+}
+
+HOST_NAME = "node-6.cisco.com"
+
+GET_FOR_REGION_INFO = [
+ {
+ "config": {
+ },
+ "host": "node-6.cisco.com",
+ "host_type": [
+ "Controller",
+ "Network"
+ ],
+ "id": "node-6.cisco.com",
+ "name": "node-6.cisco.com",
+ "parent_id": "internal",
+ "parent_type": "availability_zone",
+ "services": {
+ "nova-cert": {
+ "active": True,
+ "available": True,
+ },
+ "nova-conductor": {
+ "active": True,
+ "available": True,
+ },
+ "nova-consoleauth": {
+ "active": True,
+ "available": True,
+ },
+ "nova-scheduler": {
+ "active": True,
+ "available": True,
+ }
+ },
+ "zone": "internal"
+ },
+ {
+ "host": "node-5.cisco.com",
+ "host_type": [
+ "Compute"
+ ],
+ "id": "node-5.cisco.com",
+ "ip_address": "192.168.0.4",
+ "name": "node-5.cisco.com",
+ "os_id": "1",
+ "parent_id": "osdna-zone",
+ "parent_type": "availability_zone",
+ "services": {
+ "nova-compute": {
+ "active": True,
+ "available": True,
+ "updated_at": "2016-10-21T18:22:42.000000"
+ }
+ },
+ "zone": "osdna-zone"
+ }
+]
diff --git a/app/test/fetch/api_fetch/test_data/api_fetch_networks.py b/app/test/fetch/api_fetch/test_data/api_fetch_networks.py
new file mode 100644
index 0000000..5079a92
--- /dev/null
+++ b/app/test/fetch/api_fetch/test_data/api_fetch_networks.py
@@ -0,0 +1,72 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+NETWORKS_RESPONSE = {
+ "networks": [
+ {
+ "id": "8673c48a-f137-4497-b25d-08b7b218fd17",
+ "subnets": [
+ "cae3c81d-9a27-48c4-b8f6-32867ca03134"
+ ],
+ "tenant_id": "75c0eb79ff4a42b0ae4973c8375ddf40"
+ }
+ ]
+}
+
+NETWORKS_RESULT = [
+ {
+ "id": "8673c48a-f137-4497-b25d-08b7b218fd17",
+ "subnets": {
+ "test23": {
+ "cidr": "172.16.12.0/24",
+ "id": "cae3c81d-9a27-48c4-b8f6-32867ca03134",
+ "name": "test23",
+ "network_id": "0abe6331-0d74-4bbd-ad89-a5719c3793e4",
+ "tenant_id": "75c0eb79ff4a42b0ae4973c8375ddf40"
+ }
+ },
+ "tenant_id": "75c0eb79ff4a42b0ae4973c8375ddf40",
+ "master_parent_type": "project",
+ "master_parent_id": "75c0eb79ff4a42b0ae4973c8375ddf40",
+ "parent_type": "networks_folder",
+ "parent_id": "75c0eb79ff4a42b0ae4973c8375ddf40-networks",
+ "parent_text": "Networks",
+ "project": "Calipso-project",
+ "cidrs": ["172.16.12.0/24"],
+ "subnet_ids": ["cae3c81d-9a27-48c4-b8f6-32867ca03134"],
+ "network": "8673c48a-f137-4497-b25d-08b7b218fd17"
+ }
+]
+
+WRONG_NETWORK_RESPONSE = {
+}
+
+SUBNETS_RESPONSE = {
+ "subnets": [
+ {
+ "cidr": "172.16.12.0/24",
+ "id": "cae3c81d-9a27-48c4-b8f6-32867ca03134",
+ "name": "test23",
+ "network_id": "0abe6331-0d74-4bbd-ad89-a5719c3793e4",
+ "tenant_id": "75c0eb79ff4a42b0ae4973c8375ddf40"
+ }
+ ]
+}
+
+ENDPOINT = "http://10.56.20.239:9696"
+WRONG_SUBNETS_RESPONSE = {}
+
+PROJECT = {
+ "description": "",
+ "enabled": True,
+ "id": "75c0eb79ff4a42b0ae4973c8375ddf40",
+ "name": "Calipso-project"
+ }
+
+REGION_NAME = "RegionOne"
diff --git a/app/test/fetch/api_fetch/test_data/api_fetch_ports.py b/app/test/fetch/api_fetch/test_data/api_fetch_ports.py
new file mode 100644
index 0000000..fc0552c
--- /dev/null
+++ b/app/test/fetch/api_fetch/test_data/api_fetch_ports.py
@@ -0,0 +1,72 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+PORTS_RESPONSE = {
+ "ports": [
+ {
+ "id": "16620a58-c48c-4195-b9c1-779a8ba2e6f8",
+ "mac_address": "fa:16:3e:d7:c5:16",
+ "name": "",
+ "network_id": "b6fd5175-4b22-4256-9b1a-9fc4b9dce1fe",
+ "tenant_id": "75c0eb79ff4a42b0ae4973c8375ddf40"
+ }
+ ]
+}
+
+PORTS_RESULT_WITH_NET = [
+ {
+ "id": "16620a58-c48c-4195-b9c1-779a8ba2e6f8",
+ "mac_address": "fa:16:3e:d7:c5:16",
+ "name": "fa:16:3e:d7:c5:16",
+ "network_id": "b6fd5175-4b22-4256-9b1a-9fc4b9dce1fe",
+ "tenant_id": "75c0eb79ff4a42b0ae4973c8375ddf40",
+ "master_parent_type": "network",
+ "master_parent_id": "b6fd5175-4b22-4256-9b1a-9fc4b9dce1fe",
+ "parent_type": "ports_folder",
+ "parent_id": "b6fd5175-4b22-4256-9b1a-9fc4b9dce1fe-ports",
+ "parent_text": "Ports",
+ }
+]
+
+PORTS_RESULT_WITHOUT_NET = [
+ {
+ "id": "16620a58-c48c-4195-b9c1-779a8ba2e6f8",
+ "mac_address": "fa:16:3e:d7:c5:16",
+ "name": "16620a58-c48c-4195-b9c1-779a8ba2e6f8",
+ "network_id": "b6fd5175-4b22-4256-9b1a-9fc4b9dce1fe",
+ "tenant_id": "75c0eb79ff4a42b0ae4973c8375ddf40",
+ "master_parent_type": "network",
+ "master_parent_id": "b6fd5175-4b22-4256-9b1a-9fc4b9dce1fe",
+ "parent_type": "ports_folder",
+ "parent_id": "b6fd5175-4b22-4256-9b1a-9fc4b9dce1fe-ports",
+ "parent_text": "Ports",
+ }
+]
+
+PORTS_RESULT_WITH_PROJECT = [
+ {
+ "id": "16620a58-c48c-4195-b9c1-779a8ba2e6f8",
+ "mac_address": "fa:16:3e:d7:c5:16",
+ "name": "fa:16:3e:d7:c5:16",
+ "network_id": "b6fd5175-4b22-4256-9b1a-9fc4b9dce1fe",
+ "tenant_id": "75c0eb79ff4a42b0ae4973c8375ddf40",
+ "master_parent_type": "network",
+ "master_parent_id": "b6fd5175-4b22-4256-9b1a-9fc4b9dce1fe",
+ "parent_type": "ports_folder",
+ "parent_id": "b6fd5175-4b22-4256-9b1a-9fc4b9dce1fe-ports",
+ "parent_text": "Ports",
+ "project": "Calipso-project"
+ }
+]
+
+ERROR_PORTS_RESPONSE = {}
+NETWORK = {"id": "b6fd5175-4b22-4256-9b1a-9fc4b9dce1fe"}
+TENANT = {"id": "75c0eb79ff4a42b0ae4973c8375ddf40", "name": "Calipso-project"}
+ENDPOINT = "http://10.56.20.239:9696"
+REGION_NAME = "RegionOne"
diff --git a/app/test/fetch/api_fetch/test_data/api_fetch_projects.py b/app/test/fetch/api_fetch/test_data/api_fetch_projects.py
new file mode 100644
index 0000000..4b2c678
--- /dev/null
+++ b/app/test/fetch/api_fetch/test_data/api_fetch_projects.py
@@ -0,0 +1,88 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+PROJECTS_CORRECT_RESPONSE = {
+ "projects": [
+ {
+ "name": "Calipso-project"
+ },
+ {
+ "name": "admin",
+ }
+ ]
+}
+
+PROJECT_RESULT = [
+ "Calipso-project",
+ "admin"
+]
+
+PROJECTS_RESPONSE_WITHOUT_PROJECTS = ""
+
+REGION_PROJECTS = [
+ {
+ "description": "",
+ "enabled": True,
+ "id": "75c0eb79ff4a42b0ae4973c8375ddf40",
+ "name": "OSDNA-project"
+ },
+ {
+ "description": "admin tenant",
+ "enabled": True,
+ "id": "8c1751e0ce714736a63fee3c776164da",
+ "name": "admin"
+ }
+]
+
+USERS_PROJECTS = [
+ "OSDNA-project",
+ "admin"
+]
+
+REGION_URL_NOVER = "http://10.56.20.239:35357"
+
+REGION_RESPONSE = {
+ "tenants": [
+ {
+ "name": "Calipso-project"
+ },
+ {
+ "name": "admin"
+ },
+ {
+ "name": "services"
+ }
+ ]
+}
+
+REGION_RESULT = [
+ {
+ "name": "Calipso-project"
+ },
+ {
+ "name": "admin"
+ }
+]
+
+REGION_RESULT_WITH_NON_USER_PROJECT = [
+ {
+ "name": "Calipso-project"
+ },
+ {
+ "name": "admin"
+ },
+ {
+ "name": "non-user project"
+ }
+]
+
+REGION_ERROR_RESPONSE = []
+
+REGION_NAME = "RegionOne"
+PROJECT_ID = "admin"
diff --git a/app/test/fetch/api_fetch/test_data/api_fetch_regions.py b/app/test/fetch/api_fetch/test_data/api_fetch_regions.py
new file mode 100644
index 0000000..bd7be78
--- /dev/null
+++ b/app/test/fetch/api_fetch/test_data/api_fetch_regions.py
@@ -0,0 +1,50 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+REGION = "RegionOne"
+ENV = "Mirantis-Liberty"
+
+AUTH_RESPONSE = {
+ "access": {
+ "serviceCatalog": [
+ {
+ "endpoints": [
+ {
+ "adminURL": "http://192.168.0.2:8774/v2/8c1751e0ce714736a63fee3c776164da",
+ "id": "274cbbd9fd6d4311b78e78dd3a1df51f",
+ "internalURL": "http://192.168.0.2:8774/v2/8c1751e0ce714736a63fee3c776164da",
+ "publicURL": "http://172.16.0.3:8774/v2/8c1751e0ce714736a63fee3c776164da",
+ "region": "RegionOne"
+ }
+ ],
+ "endpoints_links": [],
+ "name": "nova",
+ "type": "compute"
+ }
+ ]
+ }
+}
+
+REGIONS_RESULT = [
+ {
+ "id": "RegionOne",
+ "endpoints": {
+ "nova": {
+ "adminURL": "http://192.168.0.2:8774/v2/8c1751e0ce714736a63fee3c776164da",
+ "id": "274cbbd9fd6d4311b78e78dd3a1df51f",
+ "internalURL": "http://192.168.0.2:8774/v2/8c1751e0ce714736a63fee3c776164da",
+ "publicURL": "http://172.16.0.3:8774/v2/8c1751e0ce714736a63fee3c776164da",
+ "service_type": "compute"
+ }
+ },
+ "name": "RegionOne",
+ "parent_type": "regions_folder",
+ "parent_id": ENV + "-regions",
+ }
+]
diff --git a/app/test/fetch/api_fetch/test_data/configurations.py b/app/test/fetch/api_fetch/test_data/configurations.py
new file mode 100644
index 0000000..ba15346
--- /dev/null
+++ b/app/test/fetch/api_fetch/test_data/configurations.py
@@ -0,0 +1,52 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+CONFIGURATIONS = {
+ "configuration": [
+ {
+ "mock": "True",
+ "host": "10.56.20.239",
+ "name": "mysql",
+ "password": "102QreDdiD5sKcvNf9qbHrmr",
+ "port": 3307.0,
+ "user": "root",
+ "schema": "nova"
+ },
+ {
+ "name": "OpenStack",
+ "host": "10.56.20.239",
+ "admin_token": "38MUh19YWcgQQUlk2VEFQ7Ec",
+ "port": "5000",
+ "user": "admin",
+ "pwd": "admin"
+ },
+ {
+ "host": "10.56.20.239",
+ "key": "/Users/xiaocdon/.ssh/id_rsa",
+ "name": "CLI",
+ "pwd": "",
+ "user": "root"
+ },
+ {
+ "name": "AMQP",
+ "host": "10.56.20.239",
+ "port": "5673",
+ "user": "nova",
+ "password": "NF2nSv3SisooxPkCTr8fbfOa"
+ }
+ ],
+ "distribution": "Mirantis-8.0",
+ "last_scanned:": "5/8/16",
+ "name": "Mirantis-Liberty-Xiaocong",
+ "network_plugins": [
+ "OVS"
+ ],
+ "operational": "yes",
+ "type": "environment"
+} \ No newline at end of file
diff --git a/app/test/fetch/api_fetch/test_data/regions.py b/app/test/fetch/api_fetch/test_data/regions.py
new file mode 100644
index 0000000..9945386
--- /dev/null
+++ b/app/test/fetch/api_fetch/test_data/regions.py
@@ -0,0 +1,110 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+REGIONS = {
+ "RegionOne": {
+ "endpoints": {
+ "ceilometer": {
+ "adminURL": "http://192.168.0.2:8777",
+ "id": "617177a3dcb64560a5a79ab0a91a7225",
+ "internalURL": "http://192.168.0.2:8777",
+ "publicURL": "http://172.16.0.3:8777",
+ "service_type": "metering"
+ },
+ "cinder": {
+ "adminURL": "http://192.168.0.2:8776/v1/8c1751e0ce714736a63fee3c776164da",
+ "id": "05643f2cf9094265b432376571851841",
+ "internalURL": "http://192.168.0.2:8776/v1/8c1751e0ce714736a63fee3c776164da",
+ "publicURL": "http://172.16.0.3:8776/v1/8c1751e0ce714736a63fee3c776164da",
+ "service_type": "volume"
+ },
+ "cinderv2": {
+ "adminURL": "http://192.168.0.2:8776/v2/8c1751e0ce714736a63fee3c776164da",
+ "id": "2c30937688e944889db4a64fab6816e6",
+ "internalURL": "http://192.168.0.2:8776/v2/8c1751e0ce714736a63fee3c776164da",
+ "publicURL": "http://172.16.0.3:8776/v2/8c1751e0ce714736a63fee3c776164da",
+ "service_type": "volumev2"
+ },
+ "glance": {
+ "adminURL": "http://192.168.0.2:9292",
+ "id": "475c6c77a94e4e63a5a0f0e767f697a8",
+ "internalURL": "http://192.168.0.2:9292",
+ "publicURL": "http://172.16.0.3:9292",
+ "service_type": "image"
+ },
+ "heat": {
+ "adminURL": "http://192.168.0.2:8004/v1/8c1751e0ce714736a63fee3c776164da",
+ "id": "9e60268a5aaf422d9e42f0caab0a19b4",
+ "internalURL": "http://192.168.0.2:8004/v1/8c1751e0ce714736a63fee3c776164da",
+ "publicURL": "http://172.16.0.3:8004/v1/8c1751e0ce714736a63fee3c776164da",
+ "service_type": "orchestration"
+ },
+ "heat-cfn": {
+ "adminURL": "http://192.168.0.2:8000/v1",
+ "id": "0f04ec6ed49f4940822161bf677bdfb2",
+ "internalURL": "http://192.168.0.2:8000/v1",
+ "publicURL": "http://172.16.0.3:8000/v1",
+ "service_type": "cloudformation"
+ },
+ "keystone": {
+ "adminURL": "http://192.168.0.2:35357/v2.0",
+ "id": "404cceb349614eb39857742970408301",
+ "internalURL": "http://192.168.0.2:5000/v2.0",
+ "publicURL": "http://172.16.0.3:5000/v2.0",
+ "service_type": "identity"
+ },
+ "neutron": {
+ "adminURL": "http://192.168.0.2:9696",
+ "id": "8dc28584da224c4b9671171ead3c982a",
+ "internalURL": "http://192.168.0.2:9696",
+ "publicURL": "http://172.16.0.3:9696",
+ "service_type": "network"
+ },
+ "nova": {
+ "adminURL": "http://192.168.0.2:8774/v2/8c1751e0ce714736a63fee3c776164da",
+ "id": "274cbbd9fd6d4311b78e78dd3a1df51f",
+ "internalURL": "http://192.168.0.2:8774/v2/8c1751e0ce714736a63fee3c776164da",
+ "publicURL": "http://172.16.0.3:8774/v2/8c1751e0ce714736a63fee3c776164da",
+ "service_type": "compute"
+ },
+ "nova_ec2": {
+ "adminURL": "http://192.168.0.2:8773/services/Admin",
+ "id": "390dddc753cc4d378b489129d06c4b7d",
+ "internalURL": "http://192.168.0.2:8773/services/Cloud",
+ "publicURL": "http://172.16.0.3:8773/services/Cloud",
+ "service_type": "ec2"
+ },
+ "novav3": {
+ "adminURL": "http://192.168.0.2:8774/v3",
+ "id": "1df917160dfb4ce5b469764fde22b3ab",
+ "internalURL": "http://192.168.0.2:8774/v3",
+ "publicURL": "http://172.16.0.3:8774/v3",
+ "service_type": "computev3"
+ },
+ "swift": {
+ "adminURL": "http://192.168.0.2:8080/v1/AUTH_8c1751e0ce714736a63fee3c776164da",
+ "id": "12e78e06595f48339baebdb5d4309c70",
+ "internalURL": "http://192.168.0.2:8080/v1/AUTH_8c1751e0ce714736a63fee3c776164da",
+ "publicURL": "http://172.16.0.3:8080/v1/AUTH_8c1751e0ce714736a63fee3c776164da",
+ "service_type": "object-store"
+ },
+ "swift_s3": {
+ "adminURL": "http://192.168.0.2:8080",
+ "id": "4f655c8f2bef46a0a7ba4a20bba53666",
+ "internalURL": "http://192.168.0.2:8080",
+ "publicURL": "http://172.16.0.3:8080",
+ "service_type": "s3"
+ }
+ },
+ "id": "RegionOne",
+ "name": "RegionOne",
+ "parent_id": "Mirantis-Liberty-Xiaocong-regions",
+ "parent_type": "regions_folder"
+ }
+} \ No newline at end of file
diff --git a/app/test/fetch/api_fetch/test_data/token.py b/app/test/fetch/api_fetch/test_data/token.py
new file mode 100644
index 0000000..2abbdd8
--- /dev/null
+++ b/app/test/fetch/api_fetch/test_data/token.py
@@ -0,0 +1,23 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+TOKEN = {'tenant': {
+ 'description': 'admin tenant',
+ 'name': 'admin',
+ 'is_domain': False,
+ 'id': '8c1751e0ce714736a63fee3c776164da',
+ 'enabled': True,
+ 'parent_id': None
+ },
+ 'issued_at': '2016-10-19T23:06:29.000000Z',
+ 'expires': '2016-10-20T00:06:28.615780Z',
+ 'id': 'gAAAAABYB_x10x_6AlA2Y5RJZ6HCcCDSXe0f8vfisKnOM_XCDZvwl2qiwzCQIOYX9mCmRyGojZ2JEjIb0vHL0f0hxqSq84g5jbZpN0h0Un_RkTZXSKf0K1uigbr3q__ilhctLvwWNem6XQSGrav1fQrec_DjdvUxSwuoBSSo82kKQ7SvPSdVwrA',
+ 'token_expiry_time': 1476921988,
+ 'audit_ids': ['2Ps0lRlHRIG80FWamMkwWg']
+} \ No newline at end of file
diff --git a/app/test/fetch/cli_fetch/__init__.py b/app/test/fetch/cli_fetch/__init__.py
new file mode 100644
index 0000000..b0637e9
--- /dev/null
+++ b/app/test/fetch/cli_fetch/__init__.py
@@ -0,0 +1,9 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
diff --git a/app/test/fetch/cli_fetch/test_cli_access.py b/app/test/fetch/cli_fetch/test_cli_access.py
new file mode 100644
index 0000000..f393538
--- /dev/null
+++ b/app/test/fetch/cli_fetch/test_cli_access.py
@@ -0,0 +1,159 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+import time
+
+from discover.fetchers.cli.cli_access import CliAccess
+from test.fetch.cli_fetch.test_data.cli_access import *
+from test.fetch.test_fetch import TestFetch
+from unittest.mock import MagicMock, patch
+from utils.ssh_conn import SshConn
+
+
+class TestCliAccess(TestFetch):
+
+ def setUp(self):
+ self.configure_environment()
+ self.cli_access = CliAccess()
+
+ @patch("utils.ssh_conn.SshConn.exec")
+ def check_run_result(self, is_gateway_host,
+ enable_cache,
+ cached_command_result, exec_result,
+ expected_result, err_msg,
+ ssh_con_exec):
+ # mock cached commands
+ if not is_gateway_host:
+ self.cli_access.cached_commands = {
+ NON_GATEWAY_CACHED_COMMAND: cached_command_result
+ }
+ else:
+ self.cli_access.cached_commands = {
+ GATEWAY_CACHED_COMMAND: cached_command_result
+ }
+ original_is_gateway_host = SshConn.is_gateway_host
+ SshConn.is_gateway_host = MagicMock(return_value=is_gateway_host)
+ ssh_con_exec.return_value = exec_result
+ result = self.cli_access.run(COMMAND, COMPUTE_HOST_ID,
+ on_gateway=False, enable_cache=enable_cache)
+ self.assertEqual(result, expected_result, err_msg)
+
+ # reset the cached commands after testing
+ self.cli_access.cached_commands = {}
+ # reset method
+ SshConn.is_gateway_host = original_is_gateway_host
+
+ def test_run(self):
+ curr_time = time.time()
+ test_cases = [
+ {
+ "is_gateway_host": True,
+ "enable_cache": False,
+ "cached_command_result": None,
+ "exec_result": RUN_RESULT,
+ "expected_result": RUN_RESULT,
+ "err_msg": "Can't get the " +
+ "result of the command"
+ },
+ {
+ "is_gateway_host": True,
+ "enable_cache": True,
+ "cached_command_result": {
+ "timestamp": curr_time,
+ "result": CACHED_COMMAND_RESULT
+ },
+ "exec_result": None,
+ "expected_result": CACHED_COMMAND_RESULT,
+ "err_msg": "Can't get the cached " +
+ "result of the command " +
+ "when the host is a gateway host"
+ },
+ {
+ "is_gateway_host": False,
+ "enable_cache": True,
+ "cached_command_result": {
+ "timestamp": curr_time,
+ "result": CACHED_COMMAND_RESULT
+ },
+ "exec_result": None,
+ "expected_result": CACHED_COMMAND_RESULT,
+ "err_msg": "Can't get the cached " +
+ "result of the command " +
+ "when the host is not a gateway host"
+ },
+ {
+ "is_gateway_host": True,
+ "enable_cache": True,
+ "cached_command_result": {
+ "timestamp": curr_time - self.cli_access.cache_lifetime,
+ "result": CACHED_COMMAND_RESULT
+ },
+ "exec_result": RUN_RESULT,
+ "expected_result": RUN_RESULT,
+ "err_msg": "Can't get the result " +
+ "of the command when the cached result expired " +
+ "and the host is a gateway host"
+ },
+ {
+ "is_gateway_host": False,
+ "enable_cache": True,
+ "cached_command_result": {
+ "timestamp": curr_time - self.cli_access.cache_lifetime,
+ "result": CACHED_COMMAND_RESULT
+ },
+ "exec_result": RUN_RESULT,
+ "expected_result": RUN_RESULT,
+ "err_msg": "Can't get the result " +
+ "of the command when the cached result expired " +
+ "and the host is a not gateway host"
+ }
+ ]
+
+ for test_case in test_cases:
+ self.check_run_result(test_case["is_gateway_host"],
+ test_case["enable_cache"],
+ test_case["cached_command_result"],
+ test_case["exec_result"],
+ test_case["expected_result"],
+ test_case["err_msg"])
+
+ def test_run_fetch_lines(self):
+ original_run = self.cli_access.run
+ self.cli_access.run = MagicMock(return_value=RUN_RESULT)
+
+ result = self.cli_access.run_fetch_lines(COMMAND, COMPUTE_HOST_ID)
+
+ self.assertEqual(result, FETCH_LINES_RESULT,
+ "Can't get correct result of the command line")
+ self.cli_access.run = original_run
+
+ def test_run_fetch_lines_with_empty_command_result(self):
+ original_run = self.cli_access.run
+ self.cli_access.run = MagicMock(return_value="")
+
+ result = self.cli_access.run_fetch_lines(COMMAND, COMPUTE_HOST_ID)
+ self.assertEqual(result, [], "Can't get [] when the command " +
+ "result is empty")
+ self.cli_access.run = original_run
+
+ def test_merge_ws_spillover_lines(self):
+ fixed_lines = self.cli_access.merge_ws_spillover_lines(LINES_FOR_FIX)
+ self.assertEqual(fixed_lines, FIXED_LINES, "Can't merge the " +
+ "ws-separated spillover lines")
+
+ def test_parse_line_with_ws(self):
+ parse_line = self.cli_access.parse_line_with_ws(LINE_FOR_PARSE, HEADERS)
+ self.assertEqual(parse_line, PARSED_LINE, "Can't parse the line with ws")
+
+ def test_parse_cmd_result_with_whitespace(self):
+ result = self.cli_access.parse_cmd_result_with_whitespace(FIXED_LINES,
+ HEADERS,
+ remove_first=False)
+ self.assertEqual(result, PARSED_CMD_RESULT,
+ "Can't parse the cmd result with whitespace")
diff --git a/app/test/fetch/cli_fetch/test_cli_fetch_host_pnics.py b/app/test/fetch/cli_fetch/test_cli_fetch_host_pnics.py
new file mode 100644
index 0000000..f5f327e
--- /dev/null
+++ b/app/test/fetch/cli_fetch/test_cli_fetch_host_pnics.py
@@ -0,0 +1,135 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+from discover.fetchers.cli.cli_fetch_host_pnics import CliFetchHostPnics
+from test.fetch.cli_fetch.test_data.cli_fetch_host_pnics import *
+from test.fetch.test_fetch import TestFetch
+from unittest.mock import MagicMock
+from unittest.mock import call
+
+
+class TestCliFetchHostPnics(TestFetch):
+
+ def setUp(self):
+ self.configure_environment()
+ self.fetcher = CliFetchHostPnics()
+ self.fetcher.set_env(self.env)
+
+ def check_get_result(self, host,
+ interface_lines, interface_names,
+ interface_details, expected_result,
+ err_msg):
+ original_get_by_id = self.fetcher.inv.get_by_id
+ original_run_fetch_lines = self.fetcher.run_fetch_lines
+ original_find_interface_details = self.fetcher.find_interface_details
+
+ self.fetcher.inv.get_by_id = MagicMock(return_value=host)
+ self.fetcher.run_fetch_lines = MagicMock(return_value=interface_lines)
+ self.fetcher.find_interface_details = MagicMock(side_effect=
+ interface_details)
+ result = self.fetcher.get(PNICS_FOLDER_ID)
+ self.assertEqual(result, expected_result, err_msg)
+
+ if interface_names:
+ interface_calls = [call(HOST_ID, interface_name) for
+ interface_name in interface_names]
+ self.fetcher.find_interface_details.assert_has_calls(interface_calls,
+ any_order=True)
+ # reset the methods
+ self.fetcher.inv.get_by_id = original_get_by_id
+ self.fetcher.run_fetch_lines = original_run_fetch_lines
+ self.fetcher.find_interface_details = original_find_interface_details
+
+ def test_get(self):
+ test_cases = [
+ {
+ "host": NETWORK_NODE,
+ "interface_lines": INTERFACE_LINES,
+ "interface_names": INTERFACE_NAMES,
+ "interface_details": [INTERFACE, None],
+ "expected_results": INTERFACES_GET_RESULTS,
+ "err_msg": "Can't get interfaces"
+ },
+ {
+ "host": [],
+ "interface_lines": None,
+ "interface_names": None,
+ "interface_details": None,
+ "expected_results": [],
+ "err_msg": "Can't get [] when the host " +
+ "doesn't exist in the database"
+ },
+ {
+ "host": WRONG_NODE,
+ "interface_lines": None,
+ "interface_names": None,
+ "interface_details": None,
+ "expected_results": [],
+ "err_msg": "Can't get [] when the host doesn't " +
+ "have required host type"
+ },
+ {
+ "host": NETWORK_NODE,
+ "interface_lines": [],
+ "interface_names": None,
+ "interface_details":None,
+ "expected_results": [],
+ "err_msg": "Can't get [] when " +
+ "the interface lines is []"
+ }
+ ]
+ for test_case in test_cases:
+ self.check_get_result(test_case["host"],
+ test_case["interface_lines"],
+ test_case["interface_names"],
+ test_case["interface_details"],
+ test_case["expected_results"],
+ test_case["err_msg"])
+
+ def test_find_interface_details(self):
+ original_run_fetch_lines = self.fetcher.run_fetch_lines
+ original_handle_line = self.fetcher.handle_line
+ original_set_interface_data = self.fetcher.set_interface_data
+
+ self.fetcher.run_fetch_lines = MagicMock(return_value=IFCONFIG_CM_RESULT)
+ self.fetcher.handle_line = MagicMock()
+ self.fetcher.set_interface_data = MagicMock()
+
+ result = self.fetcher.find_interface_details(HOST_ID, INTERFACE_NAME)
+
+ self.fetcher.run_fetch_lines = original_run_fetch_lines
+ self.fetcher.handle_line = original_handle_line
+ self.fetcher.set_interface_data = original_set_interface_data
+
+ self.assertEqual(result, INTERFACE_DETAILS, "Can't get interface details")
+
+ def test_handle_mac_address_line(self):
+ self.fetcher.handle_line(RAW_INTERFACE, MAC_ADDRESS_LINE)
+ self.assertEqual(RAW_INTERFACE["mac_address"], MAC_ADDRESS,
+ "Can't get the correct mac address")
+
+ # Test failed, defect, result: addr: expected result: fe80::f816:3eff:fea1:eb73/64
+ def test_handle_ipv6_address_line(self):
+ self.fetcher.handle_line(RAW_INTERFACE, IPV6_ADDRESS_LINE)
+ self.assertEqual(RAW_INTERFACE['IPv6 Address'], IPV6_ADDRESS,
+ "Can' get the correct ipv6 address")
+
+ def test_handle_ipv4_address_line(self):
+ self.fetcher.handle_line(RAW_INTERFACE, IPV4_ADDRESS_LINE)
+ self.assertEqual(RAW_INTERFACE['IP Address'], IPV4_ADDRESS,
+ "Can't get the correct ipv4 address")
+
+ def test_set_interface_data(self):
+ original_run_fetch_lines = self.fetcher.run_fetch_lines
+ self.fetcher.run_fetch_lines = MagicMock(return_value=ETHTOOL_RESULT)
+ self.fetcher.set_interface_data(INTERFACE_FOR_SET)
+ self.assertEqual(INTERFACE_FOR_SET, INTERFACE_AFTER_SET, "Can't get the attributes of the "
+ "interface from the CMD result")
+
+ self.fetcher.run_fetch_lines = original_run_fetch_lines
diff --git a/app/test/fetch/cli_fetch/test_cli_fetch_host_pnics_vpp.py b/app/test/fetch/cli_fetch/test_cli_fetch_host_pnics_vpp.py
new file mode 100644
index 0000000..805e36d
--- /dev/null
+++ b/app/test/fetch/cli_fetch/test_cli_fetch_host_pnics_vpp.py
@@ -0,0 +1,34 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+from discover.fetchers.cli.cli_fetch_host_pnics_vpp import CliFetchHostPnicsVpp
+from test.fetch.test_fetch import TestFetch
+from unittest.mock import MagicMock
+from test.fetch.cli_fetch.test_data.cli_fetch_host_pnics_vpp import *
+
+
+class TestCliFetchHostPnicsVpp(TestFetch):
+
+ def setUp(self):
+ self.configure_environment()
+ self.fetcher = CliFetchHostPnicsVpp()
+ self.fetcher.set_env(self.env)
+
+ def test_get(self):
+ # store original method
+ original_find_items = self.fetcher.inv.find_items
+
+ # mock the method
+ self.fetcher.inv.find_items = MagicMock(return_value=VEDGES)
+
+ result = self.fetcher.get(ID)
+ # reset the method
+ self.fetcher.inv.find_items = original_find_items
+
+ self.assertNotEqual(result, [], "Can't get the pnics info") \ No newline at end of file
diff --git a/app/test/fetch/cli_fetch/test_cli_fetch_host_vservices.py b/app/test/fetch/cli_fetch/test_cli_fetch_host_vservices.py
new file mode 100644
index 0000000..c33faca
--- /dev/null
+++ b/app/test/fetch/cli_fetch/test_cli_fetch_host_vservices.py
@@ -0,0 +1,132 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+from discover.fetchers.cli.cli_fetch_host_vservices import CliFetchHostVservices
+from test.fetch.test_fetch import TestFetch
+from test.fetch.cli_fetch.test_data.cli_fetch_host_verservices import *
+from unittest.mock import MagicMock
+
+
+class TestCliFetchHostVservices(TestFetch):
+
+ def setUp(self):
+ self.configure_environment()
+ self.fetcher = CliFetchHostVservices()
+ self.fetcher.set_env(self.env)
+
+ def test_get(self):
+ # store original get_single method
+ original_get_single = self.fetcher.inv.get_single
+ # mock the host data
+ self.fetcher.inv.get_single = MagicMock(return_value=NETWORK_HOST)
+ # store original run_fetch_lines method
+ original_run_fetch_lines = self.fetcher.run_fetch_lines
+ # mock command line results
+ self.fetcher.run_fetch_lines = MagicMock(return_value=NAMESPACES)
+
+ # only test the logic on get method, mock the set_details method
+ original_set_details = self.fetcher.set_details
+ self.fetcher.set_details = MagicMock()
+
+ result = self.fetcher.get(NETWORK_HOST['id'])
+ # reset methods
+ self.fetcher.run_fetch_lines = original_run_fetch_lines
+ self.fetcher.set_details = original_set_details
+ self.fetcher.inv.get_single = original_get_single
+
+ self.assertNotEqual(result, [], "Can't get verservices")
+
+ def test_get_with_wrong_host_type(self):
+ # store original get_single method
+ original_get_single = self.fetcher.inv.get_single
+ # mock the host data
+ self.fetcher.inv.get_single = MagicMock(return_value=COMPUTE_HOST)
+ result = self.fetcher.get(COMPUTE_HOST['id'])
+
+ self.fetcher.inv.get_single = original_get_single
+
+ self.assertEqual(result, [], "Can't get empty array when the host_type doesn't contain Network")
+
+ def test_set_details(self):
+ # store orginal methods
+ original_get_router_name = self.fetcher.get_router_name
+ original_get_network_name = self.fetcher.get_network_name
+ original_get_type = self.fetcher.agents_list.get_type
+
+ # mock methods
+ self.fetcher.get_network_name = MagicMock(return_value=ROUTER[0]['name'])
+ self.fetcher.get_router_name = MagicMock(return_value=ROUTER[0]['name'])
+ self.fetcher.agents_list.get_type = MagicMock(return_value=AGENT)
+
+ self.fetcher.set_details(NETWORK_HOST['id'], LOCAL_SERVICES_IDS[0])
+
+ # reset methods
+ self.fetcher.get_network_name = original_get_network_name
+ self.fetcher.get_router_name = original_get_router_name
+ self.fetcher.agents_list.get_type = original_get_type
+
+ self.assertIn("name", LOCAL_SERVICES_IDS[0], "Can't add name")
+ self.assertIn("parent_id", LOCAL_SERVICES_IDS[0], "Can't add parent id")
+
+ def test_get_network_name(self):
+ # store original method
+ original_get_objects_list_for_id = self.fetcher.get_objects_list_for_id
+ # mock the result
+ self.fetcher.get_objects_list_for_id = MagicMock(return_value=ROUTER)
+
+ name = self.fetcher.get_network_name(ID_CLEAN)
+
+ self.fetcher.get_objects_list_for_id = original_get_objects_list_for_id
+ self.assertEqual(name, ROUTER[0]['name'], "Can't get network name")
+
+ def test_get_network_without_router(self):
+ # store original method
+ original_get_objects_list_for_id = self.fetcher.get_objects_list_for_id
+ # mock the result
+ self.fetcher.get_objects_list_for_id = MagicMock(return_value=[])
+
+ name = self.fetcher.get_network_name(ID_CLEAN)
+
+ self.fetcher.get_objects_list_for_id = original_get_objects_list_for_id
+ self.assertEqual(name, ID_CLEAN, "Can't use the id as the name when network info from database is empty")
+
+ def test_get_router_name(self):
+ # store original method
+ original_get_objects_list_for_id = self.fetcher.get_objects_list_for_id
+ # mock the result
+ self.fetcher.get_objects_list_for_id = MagicMock(return_value=ROUTER)
+
+ name = self.fetcher.get_router_name(LOCAL_SERVICES_IDS[0], ID_CLEAN)
+
+ self.fetcher.get_objects_list_for_id = original_get_objects_list_for_id
+
+ self.assertIn("name", LOCAL_SERVICES_IDS[0], "Can't get network name")
+ self.assertEqual(name, ROUTER[0]['name'], "Can't get router name")
+
+ def test_set_agent_type(self):
+ # store original get_type method
+ original_get_type = self.fetcher.agents_list.get_type
+ self.fetcher.agents_list.get_type = MagicMock(return_value=AGENT)
+
+ self.fetcher.set_agent_type(VSERVICE)
+ # reset method
+ self.fetcher.set_agent_type = original_get_type
+ self.assertIn("parent_id", VSERVICE, "Can't add parent id to vservice document")
+
+ def test_set_agent_type_without_agent(self):
+ # store original get_type method
+ original_get_type = self.fetcher.agents_list.get_type
+ self.fetcher.agents_list.get_type = MagicMock(return_value={})
+
+ self.fetcher.set_agent_type(VSERVICE)
+ # reset method
+ self.fetcher.set_agent_type = original_get_type
+ self.assertIn("parent_id", VSERVICE, "Can't add parent id to vservice document")
+ self.assertEqual(VSERVICE['parent_type'], "vservice_miscellenaous_folder",
+ "Can't add document to miscellenaous folder when it doesn't have agent") \ No newline at end of file
diff --git a/app/test/fetch/cli_fetch/test_cli_fetch_instance_vnics.py b/app/test/fetch/cli_fetch/test_cli_fetch_instance_vnics.py
new file mode 100644
index 0000000..5a57b9c
--- /dev/null
+++ b/app/test/fetch/cli_fetch/test_cli_fetch_instance_vnics.py
@@ -0,0 +1,111 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+from discover.fetchers.cli.cli_fetch_instance_vnics import CliFetchInstanceVnics
+from test.fetch.test_fetch import TestFetch
+from test.fetch.cli_fetch.test_data.cli_fetch_instance_vnics import *
+from unittest.mock import MagicMock
+
+
+class TestCliFetchInstanceVnics(TestFetch):
+
+ def setUp(self):
+ self.configure_environment()
+ self.fetcher = CliFetchInstanceVnics()
+ self.fetcher.set_env(self.env)
+
+ def test_get(self):
+ # store original methods
+ original_get_by_id = self.fetcher.inv.get_by_id
+ original_run_fetch_lines = self.fetcher.run_fetch_lines
+ original_get_vnics_from_dumpxml = self.fetcher.get_vnics_from_dumpxml
+
+ # mock methods
+ self.fetcher.inv.get_by_id = MagicMock(side_effect=[INSATNCE, COMPUTE_HOST])
+ self.fetcher.run_fetch_lines = MagicMock(return_value=INSTANCES_LIST)
+ self.fetcher.get_vnics_from_dumpxml = MagicMock(return_value=VNICS_FROM_DUMP_XML)
+
+ result = self.fetcher.get(VNICS_FOLDER['id'])
+
+ # reset methods
+ self.fetcher.inv.get_by_id = original_get_by_id
+ self.fetcher.run_fetch_lines = original_run_fetch_lines
+ self.fetcher.get_vnics_from_dumpxml = original_get_vnics_from_dumpxml
+
+ self.assertNotEqual(result, [], "Can't get vnics with VNICS folder id")
+
+ def test_get_without_instance(self):
+ # store original methods
+ original_get_by_id = self.fetcher.inv.get_by_id
+
+ # mock methods
+ self.fetcher.inv.get_by_id = MagicMock(return_value=[])
+
+ result = self.fetcher.get(VNICS_FOLDER['id'])
+
+ # reset methods
+ self.fetcher.inv.get_by_id = original_get_by_id
+
+ self.assertEqual(result, [], "Can't get empty array when the instance can't be found")
+
+ def test_get_without_host(self):
+ # store original methods
+ original_get_by_id = self.fetcher.inv.get_by_id
+
+ # mock methods
+ self.fetcher.inv.get_by_id = MagicMock(side_effect=[[], NETWORK_HOST])
+
+ result = self.fetcher.get(VNICS_FOLDER['id'])
+
+ # reset methods
+ self.fetcher.inv.get_by_id = original_get_by_id
+
+ self.assertEqual(result, [], "Can't get empty array when the host doesn't contain network host type")
+
+ def test_get_vnics_from_dumpxml(self):
+ # store original functions
+ original_run = self.fetcher.run
+ original_set_vnic_properties = self.fetcher.set_vnic_properties
+
+ # mock the functions
+ self.fetcher.run = MagicMock(return_value=DUMPXML)
+ self.fetcher.set_vnic_properties = MagicMock()
+
+ vnics = self.fetcher.get_vnics_from_dumpxml(ID, INSATNCE)
+ # reset functions
+ self.fetcher.run = original_run
+ self.fetcher.set_vnic_properties = original_set_vnic_properties
+
+ self.assertNotEqual(vnics, [], "Can't get vnics")
+
+ def test_get_vnics_from_dumpxml_with_empty_command_result(self):
+ # store original functions
+ original_run = self.fetcher.run
+
+ # mock the functions
+ self.fetcher.run = MagicMock(return_value=" ")
+
+ vnics = self.fetcher.get_vnics_from_dumpxml(ID, INSATNCE)
+ # reset functions
+ self.fetcher.run = original_run
+
+ self.assertEqual(vnics, [], "Can't get empty array when the dumpxml is empty")
+
+ def test_get_vnics_from_dumpxml_with_wrong_instance(self):
+ # store original functions
+ original_run = self.fetcher.run
+
+ # mock the functions
+ self.fetcher.run = MagicMock(return_value=WRONG_DUMPXML)
+
+ vnics = self.fetcher.get_vnics_from_dumpxml(ID, INSATNCE)
+ # reset functions
+ self.fetcher.run = original_run
+
+ self.assertEqual(vnics, [], "Can't get empty array when the instance is wrong") \ No newline at end of file
diff --git a/app/test/fetch/cli_fetch/test_cli_fetch_instance_vnics_ovs.py b/app/test/fetch/cli_fetch/test_cli_fetch_instance_vnics_ovs.py
new file mode 100644
index 0000000..24a1b5d
--- /dev/null
+++ b/app/test/fetch/cli_fetch/test_cli_fetch_instance_vnics_ovs.py
@@ -0,0 +1,36 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+from discover.cli_fetch_instance_vnics_ovs import CliFetchInstanceVnicsOvs
+from test.fetch.test_fetch import TestFetch
+from test.fetch.cli_fetch.test_data.cli_fetch_instance_vnics import *
+from unittest.mock import MagicMock
+
+
+class TestCliFetchInstanceVnicsOvs(TestFetch):
+
+ def setUp(self):
+ self.configure_environment()
+ self.fetcher = CliFetchInstanceVnicsOvs()
+ self.fetcher.set_env(self.env)
+
+ def test_set_vnic_properties(self):
+ # store original method
+ original_set = self.fetcher.inv.set
+ self.fetcher.inv.set = MagicMock()
+
+ self.fetcher.set_vnic_properties(VNIC, INSATNCE)
+ # reset method
+ self.fetcher.inv.set = original_set
+
+ self.assertIn("source_bridge", VNIC, "Can't set source_bridge for ovs vnic")
+
+ def test_get_vnic_name(self):
+ name = self.fetcher.get_vnic_name(VNIC, INSATNCE)
+ self.assertNotEqual(name, None, "Can't get vnic name") \ No newline at end of file
diff --git a/app/test/fetch/cli_fetch/test_cli_fetch_instance_vnics_vpp.py b/app/test/fetch/cli_fetch/test_cli_fetch_instance_vnics_vpp.py
new file mode 100644
index 0000000..46c25fb
--- /dev/null
+++ b/app/test/fetch/cli_fetch/test_cli_fetch_instance_vnics_vpp.py
@@ -0,0 +1,23 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+from discover.cli_fetch_instance_vnics_vpp import CliFetchInstanceVnicsVpp
+from test.fetch.cli_fetch.test_data.cli_fetch_instance_vnics import *
+from test.fetch.test_fetch import TestFetch
+
+
+class TestCliFetchInstanceVnicsVpp(TestFetch):
+
+ def setUp(self):
+ self.configure_environment()
+ self.fetcher = CliFetchInstanceVnicsVpp()
+
+ def test_get_name(self):
+ name = self.fetcher.get_vnic_name(VNIC, INSATNCE)
+ self.assertNotEqual(name, None, "Can't get vnic name") \ No newline at end of file
diff --git a/app/test/fetch/cli_fetch/test_cli_fetch_vconnectors.py b/app/test/fetch/cli_fetch/test_cli_fetch_vconnectors.py
new file mode 100644
index 0000000..23e0a99
--- /dev/null
+++ b/app/test/fetch/cli_fetch/test_cli_fetch_vconnectors.py
@@ -0,0 +1,66 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+from discover.fetchers.cli.cli_fetch_vconnectors import CliFetchVconnectors
+from test.fetch.test_fetch import TestFetch
+from test.fetch.cli_fetch.test_data.cli_fetch_vconnectors import *
+from unittest.mock import MagicMock
+
+
+class TestCliFetchVconnectors(TestFetch):
+
+ def setUp(self):
+ self.configure_environment()
+ self.fetcher = CliFetchVconnectors()
+ self.fetcher.set_env(self.env)
+
+ def test_get(self):
+ # store original methods
+ original_get_by_id = self.fetcher.inv.get_by_id
+ original_get_vconnectors = self.fetcher.get_vconnectors
+
+ # mock methods
+ self.fetcher.inv.get_by_id = MagicMock(return_value=HOST)
+ self.fetcher.get_vconnectors = MagicMock(return_value=VCONNECTORS)
+
+ result = self.fetcher.get(VCONNECTORS_FOLDER['id'])
+
+ # reset methods
+ self.fetcher.inv.get_by_id = original_get_by_id
+ self.fetcher.get_vconnectors = original_get_vconnectors
+
+ self.assertEqual(result, VCONNECTORS, "Can't get the vconnectors")
+
+ def test_get_without_host(self):
+ # store original methods
+ original_get_by_id = self.fetcher.inv.get_by_id
+
+ # mock methods
+ self.fetcher.inv.get_by_id = MagicMock(return_value=[])
+
+ result = self.fetcher.get(VCONNECTORS_FOLDER['id'])
+
+ # reset methods
+ self.fetcher.inv.get_by_id = original_get_by_id
+
+ self.assertEqual(result, [], "Can't get empty array when the host doesn't exist")
+
+ def test_get_with_wrong_host(self):
+ # store original methods
+ original_get_by_id = self.fetcher.inv.get_by_id
+
+ # mock methods
+ self.fetcher.inv.get_by_id = MagicMock(return_value=WRONG_HOST)
+
+ result = self.fetcher.get(VCONNECTORS_FOLDER['id'])
+
+ # reset methods
+ self.fetcher.inv.get_by_id = original_get_by_id
+
+ self.assertEqual(result, [], "Can't get empty array when the host doesn't contain host type") \ No newline at end of file
diff --git a/app/test/fetch/cli_fetch/test_cli_fetch_vconnectors_ovs.py b/app/test/fetch/cli_fetch/test_cli_fetch_vconnectors_ovs.py
new file mode 100644
index 0000000..cc882a1
--- /dev/null
+++ b/app/test/fetch/cli_fetch/test_cli_fetch_vconnectors_ovs.py
@@ -0,0 +1,38 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+from discover.fetchers.cli.cli_fetch_vconnectors_ovs import CliFetchVconnectorsOvs
+from test.fetch.test_fetch import TestFetch
+from test.fetch.cli_fetch.test_data.cli_fetch_vconnectors_ovs import *
+from unittest.mock import MagicMock
+
+
+class TestCliFetchVconnectorsOvs(TestFetch):
+
+ def setUp(self):
+ self.configure_environment()
+ self.fetcher = CliFetchVconnectorsOvs()
+ self.fetcher.set_env(self.env)
+
+ def test_get_vconnectors(self):
+ # store the original methods
+ original_run_fetch_lines = self.fetcher.run_fetch_lines
+ original_find_items = self.fetcher.inv.find_items
+
+ # mock the methods
+ self.fetcher.run_fetch_lines = MagicMock(return_value=BRIDGE_RESULT)
+ self.fetcher.inv.find_items = MagicMock(return_value=[])
+
+ result = self.fetcher.get_vconnectors(NETWORK_NODE)
+
+ # reset methods
+ self.fetcher.run_fetch_lines = original_run_fetch_lines
+ self.fetcher.inv.find_items = original_find_items
+
+ self.assertNotEqual(result, [], "Can't get vconnectors with the host id")
diff --git a/app/test/fetch/cli_fetch/test_cli_fetch_vconnectors_vpp.py b/app/test/fetch/cli_fetch/test_cli_fetch_vconnectors_vpp.py
new file mode 100644
index 0000000..f729c2c
--- /dev/null
+++ b/app/test/fetch/cli_fetch/test_cli_fetch_vconnectors_vpp.py
@@ -0,0 +1,50 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+from discover.fetchers.cli.cli_fetch_vconnectors_vpp import CliFetchVconnectorsVpp
+from test.fetch.test_fetch import TestFetch
+from unittest.mock import MagicMock
+from test.fetch.cli_fetch.test_data.cli_fetch_vconnectors_vpp import *
+
+
+class TestCliFetchVconnectorsVpp(TestFetch):
+
+ def setUp(self):
+ self.configure_environment()
+ self.fetcher = CliFetchVconnectorsVpp()
+ self.fetcher.set_env(self.env)
+
+ def test_get_vconnectors(self):
+ # store original method
+ original_run_fetch_lines = self.fetcher.run_fetch_lines
+ original_get_interface_details = self.fetcher.get_interface_details
+
+ # mock methods
+ self.fetcher.run_fetch_lines = MagicMock(return_value=MODE_RESULT)
+ self.fetcher.get_interface_details = MagicMock(return_value=None)
+
+ result = self.fetcher.get_vconnectors(HOST)
+
+ # reset methods
+ self.fetcher.run_fetch_lines = original_run_fetch_lines
+ self.fetcher.get_interface_details = original_get_interface_details
+
+ self.assertNotEqual(result, {}, "Can't get vconnectors info")
+
+ def test_set_interface_details(self):
+ # store original methods
+ original_run_fetch_lines = self.fetcher.run_fetch_lines
+
+ # mock method
+ self.fetcher.run_fetch_lines = MagicMock(return_value=INTERFACE_LINES)
+
+ result = self.fetcher.get_interface_details(HOST, INTERFACE_NAME)
+ # restore method
+ self.fetcher.run_fetch_lines = original_run_fetch_lines
+ self.assertNotEqual(result, None, "Can't get the interface details") \ No newline at end of file
diff --git a/app/test/fetch/cli_fetch/test_cli_fetch_vservice_vnics.py b/app/test/fetch/cli_fetch/test_cli_fetch_vservice_vnics.py
new file mode 100644
index 0000000..b77f41e
--- /dev/null
+++ b/app/test/fetch/cli_fetch/test_cli_fetch_vservice_vnics.py
@@ -0,0 +1,124 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+from discover.fetchers.cli.cli_fetch_vservice_vnics import CliFetchVserviceVnics
+from test.fetch.test_fetch import TestFetch
+from test.fetch.cli_fetch.test_data.cli_fetch_vservice_vnics import *
+from unittest.mock import MagicMock
+
+
+class TestCliFetchVserviceVnics(TestFetch):
+
+ def setUp(self):
+ self.configure_environment()
+ self.fetcher = CliFetchVserviceVnics()
+ self.fetcher.set_env(self.env)
+
+ def test_get(self):
+ # store original methods
+ original_get_by_id = self.fetcher.inv.get_by_id
+ original_run_fetch_lines = self.fetcher.run_fetch_lines
+ original_handle_service = self.fetcher.handle_service
+ # mock methods
+ self.fetcher.inv.get_by_id = MagicMock(return_value=NETWORK_NODE)
+ self.fetcher.run_fetch_lines = MagicMock(return_value=NAME_SPACES)
+ self.fetcher.handle_service = MagicMock(return_value=SERVICES)
+
+ result = self.fetcher.get(NETWORK_NODE['id'])
+
+ # reset methods
+ self.fetcher.inv.get_by_id = original_get_by_id
+ self.fetcher.run_fetch_lines = original_run_fetch_lines
+ self.fetcher.handle_service = original_handle_service
+
+ self.assertNotEqual(result, [], "Can't get vnics")
+
+ def test_get_with_error_host(self):
+ # store original methods
+ original_get_by_id = self.fetcher.inv.get_by_id
+
+ # mock methods
+ self.fetcher.inv.get_by_id = MagicMock(return_value=ERROR_NODE)
+
+ result = self.fetcher.get(NETWORK_NODE['id'])
+
+ # reset methods
+ self.fetcher.inv.get_by_id = original_get_by_id
+
+ self.assertEqual(result, [], "Can't get empty array when the host doesn't contain host_type")
+
+ def test_get_with_compute_host(self):
+ # store original methods
+ original_get_by_id = self.fetcher.inv.get_by_id
+
+ # mock methods
+ self.fetcher.inv.get_by_id = MagicMock(return_value=COMPUTE_NODE)
+
+ result = self.fetcher.get(NETWORK_NODE['id'])
+
+ # reset methods
+ self.fetcher.inv.get_by_id = original_get_by_id
+
+ self.assertEqual(result, [], "Can't get empty array when the host type doesn't contain network")
+
+ def test_handle_service(self):
+ # store original method
+ original_run_fetch_lines = self.fetcher.run_fetch_lines
+ original_set_interface_data = self.fetcher.set_interface_data
+ # mock the method
+ self.fetcher.run_fetch_lines = MagicMock(return_value=IFCONFIG_RESULT)
+ self.fetcher.set_interface_data = MagicMock()
+ result = self.fetcher.handle_service(NETWORK_NODE['id'], SERVICE_ID)
+ # reset method
+ self.fetcher.run_fetch_lines = original_run_fetch_lines
+ self.fetcher.set_interface_data = original_set_interface_data
+
+ self.assertNotEqual(result, [], "Can't get interfaces data")
+
+ def test_set_interface_data(self):
+ # store original methods
+ original_get_by_field = self.fetcher.inv.get_by_field
+ original_get_by_id = self.fetcher.inv.get_by_id
+ original_set = self.fetcher.inv.set
+
+ # mock the methods
+ self.fetcher.inv.get_by_field = MagicMock(return_value=NETWORK)
+ self.fetcher.inv.get_by_id = MagicMock(return_value=VSERVICE)
+ self.fetcher.inv.set = MagicMock()
+
+ self.fetcher.set_interface_data(VNIC)
+
+ # reset methods
+ self.fetcher.inv.get_by_field = original_get_by_field
+ self.fetcher.inv.get_by_id = original_get_by_id
+ self.fetcher.inv.set = original_set
+
+ self.assertIn("data", VNIC, "Can't set data")
+ self.assertIn("cidr", VNIC, "Can't set cidr")
+ self.assertIn("network", VNIC, "Can't set network")
+
+ def test_handle_mac_address_line(self):
+ self.fetcher.handle_line(RAW_VNIC, MAC_ADDRESS_LINE)
+ self.assertEqual(RAW_VNIC['mac_address'], MAC_ADDRESS, "Can't get the correct mac address from the line")
+
+ def test_handle_ipv4_address_line(self):
+ self.fetcher.handle_line(RAW_VNIC, IPV4_ADDRESS_LINE)
+ self.assertEqual(RAW_VNIC['IP Address'], IPV4_ADDRESS, "Can't get the correct ipv4 address from the line")
+
+ def test_handle_ipv6_address_line(self):
+ self.fetcher.handle_line(RAW_VNIC, IPV6_ADDRESS_LINE)
+ self.assertEqual(RAW_VNIC['IPv6 Address'], IPV6_ADDRESS, "Can't get the correct ipv6 address from the line")
+
+ def test_get_net_size(self):
+ size = self.fetcher.get_net_size(NET_MASK_ARRAY)
+ self.assertEqual(size, SIZE, "Can't get the size of network by netmask")
+
+ def test_get_cidr_for_vnic(self):
+ cidr = self.fetcher.get_cidr_for_vnic(VNIC)
+ self.assertEqual(cidr, CIDR, "the cidr info is wrong")
diff --git a/app/test/fetch/cli_fetch/test_data/__init__.py b/app/test/fetch/cli_fetch/test_data/__init__.py
new file mode 100644
index 0000000..b0637e9
--- /dev/null
+++ b/app/test/fetch/cli_fetch/test_data/__init__.py
@@ -0,0 +1,9 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
diff --git a/app/test/fetch/cli_fetch/test_data/cli_access.py b/app/test/fetch/cli_fetch/test_data/cli_access.py
new file mode 100644
index 0000000..b151dc6
--- /dev/null
+++ b/app/test/fetch/cli_fetch/test_data/cli_access.py
@@ -0,0 +1,58 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+COMPUTE_HOST_ID = "node-5.cisco.com"
+COMMAND = "virsh list"
+NON_GATEWAY_CACHED_COMMAND = COMPUTE_HOST_ID + "," + "ssh -o StrictHostKeyChecking=no " + \
+ COMPUTE_HOST_ID + " sudo " + COMMAND
+GATEWAY_CACHED_COMMAND = COMPUTE_HOST_ID + "," + "sudo " + COMMAND
+CACHED_COMMAND_RESULT = " Id Name State\n---\n 2 instance-00000003 running"
+RUN_RESULT = " Id Name State\n---\n 2 instance-00000002 running"
+FETCH_LINES_RESULT = [
+ " Id Name State",
+ "---",
+ " 2 instance-00000002 running"
+]
+
+LINES_FOR_FIX = [
+ "br-ex\t\t8000.005056acc9a2\tno\t\teno33554952",
+ "\t\t\t\t\t\t\tp_ff798dba-0",
+ "\t\t\t\t\t\t\tv_public",
+ "\t\t\t\t\t\t\tv_vrouter_pub",
+ "br-fw-admin\t\t8000.005056ace897\tno\t\teno16777728"
+]
+
+FIXED_LINES = [
+ "br-ex\t\t8000.005056acc9a2\tno\t\teno33554952,p_ff798dba-0,v_public,v_vrouter_pub",
+ "br-fw-admin\t\t8000.005056ace897\tno\t\teno16777728"
+]
+
+PARSED_CMD_RESULT = [
+ {
+ "bridge_id": "8000.005056acc9a2",
+ "bridge_name": "br-ex",
+ "interfaces": "eno33554952,p_ff798dba-0,v_public,v_vrouter_pub",
+ "stp_enabled": "no"
+ },
+ {
+ "bridge_id": "8000.005056ace897",
+ "bridge_name": "br-fw-admin",
+ "interfaces": "eno16777728",
+ "stp_enabled": "no"
+ }
+]
+
+LINE_FOR_PARSE = "br-ex\t\t8000.005056acc9a2\tno\t\teno33554952,p_ff798dba-0,v_public,v_vrouter_pub"
+PARSED_LINE = {
+ "bridge_id": "8000.005056acc9a2",
+ "bridge_name": "br-ex",
+ "interfaces": "eno33554952,p_ff798dba-0,v_public,v_vrouter_pub",
+ "stp_enabled": "no"
+}
+HEADERS = ["bridge_name", "bridge_id", "stp_enabled", "interfaces"]
diff --git a/app/test/fetch/cli_fetch/test_data/cli_fetch_host_pnics.py b/app/test/fetch/cli_fetch/test_data/cli_fetch_host_pnics.py
new file mode 100644
index 0000000..316c68a
--- /dev/null
+++ b/app/test/fetch/cli_fetch/test_data/cli_fetch_host_pnics.py
@@ -0,0 +1,147 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+PNICS_FOLDER_ID = "node-6.cisco.com-pnics"
+HOST_ID = "node-6.cisco.com"
+
+NETWORK_NODE = {
+ "host_type": [
+ "Controller",
+ "Network"
+ ],
+ "id": "node-6.cisco.com"
+}
+
+WRONG_NODE = {
+ "host_type": [
+ "Controller"
+ ]
+}
+
+INTERFACE_LINES = [
+ "lrwxrwxrwx 1 root 0 Jul 5 17:17 eno16777728 -> ../../devices/0000:02:00.0/net/eno16777728",
+ "lrwxrwxrwx 1 root 0 Jul 5 17:17 eno33554952 -> ../../devices/0000:02:01.0/net/eno33554952"
+]
+
+INTERFACE_NAMES = ["eno16777728", "eno33554952"]
+
+INTERFACE_NAME = INTERFACE_NAMES[0]
+IFCONFIG_CM_RESULT = [
+ "eno16777728 Link encap:Ethernet HWaddr 00:50:56:ac:e8:97 ",
+ " UP BROADCAST RUNNING MULTICAST MTU:1500 Metric:1",
+ " RX packets:409056348 errors:0 dropped:0 overruns:0 frame:0",
+ " TX packets:293898173 errors:0 dropped:0 overruns:0 carrier:0",
+ " collisions:0 txqueuelen:1000 ",
+ " RX bytes:103719003730 (103.7 GB) TX bytes:165090993470 (165.0 GB)",
+ ""
+]
+
+INTERFACE_DETAILS = {
+ "host": "node-6.cisco.com",
+ "id": "eno16777728-unknown_mac",
+ "lines": [],
+ "local_name": "eno16777728",
+ "name": "eno16777728",
+ "state": "UP"
+}
+
+MAC_ADDRESS_LINE = "eno16777728 Link encap:Ethernet HWaddr 00:50:56:ac:e8:97 "
+MAC_ADDRESS = "00:50:56:ac:e8:97"
+RAW_INTERFACE = {
+ "host": "node-6.cisco.com",
+ "lines": [],
+ "local_name": "eno16777728",
+ "name": "eno16777728"
+}
+
+INTERFACE_AFTER_LINE_HANDLE = {
+ "host": "node-6.cisco.com",
+ "lines": [MAC_ADDRESS_LINE.strip()],
+ "local_name": "eno16777728",
+ "name": "eno16777728",
+ "id": "eno16777728-" + MAC_ADDRESS,
+ "mac_address": MAC_ADDRESS
+}
+
+INTERFACE_FOR_SET = {
+ "host": "node-6.cisco.com",
+ "lines": [
+ "Link encap:Ethernet HWaddr 00:50:56:ac:e8:97",
+ "UP BROADCAST RUNNING MULTICAST MTU:1500 Metric:1"
+ ],
+ "local_name": "eno16777728",
+ "mac_address": "00:50:56:ac:e8:97"
+}
+
+INTERFACE_AFTER_SET = {
+ "host": "node-6.cisco.com",
+ "data": "Link encap:Ethernet HWaddr 00:50:56:ac:e8:97" +
+ "\nUP BROADCAST RUNNING MULTICAST MTU:1500 Metric:1",
+ "local_name": "eno16777728",
+ "mac_address": "00:50:56:ac:e8:97",
+ "Supported ports": "[ TP ]",
+ "Supported link modes": ["10baseT/Half 10baseT/Full",
+ "100baseT/Half 100baseT/Full",
+ "1000baseT/Full"],
+ "Supported pause frame use": "No"
+}
+
+INTERFACE = {
+ "Advertised auto-negotiation": "Yes",
+ "Advertised link modes": [
+ "10baseT/Half 10baseT/Full",
+ "100baseT/Half 100baseT/Full",
+ "1000baseT/Full"
+ ],
+ "Advertised pause frame use": "No",
+ "Auto-negotiation": "on",
+ "Current message level": [
+ "0x00000007 (7)",
+ "drv probe link"
+ ],
+ "Duplex": "Full",
+ "Link detected": "yes",
+ "MDI-X": "off (auto)",
+ "PHYAD": "0",
+ "Port": "Twisted Pair",
+ "Speed": "1000Mb/s",
+ "Supported link modes": [
+ "10baseT/Half 10baseT/Full",
+ "100baseT/Half 100baseT/Full",
+ "1000baseT/Full"
+ ],
+ "Supported pause frame use": "No",
+ "Supported ports": "[ TP ]",
+ "Supports Wake-on": "d",
+ "Supports auto-negotiation": "Yes",
+ "Transceiver": "internal",
+ "Wake-on": "d",
+ "data": "Link encap:Ethernet HWaddr 00:50:56:ac:e8:97\nUP BROADCAST RUNNING MULTICAST MTU:1500 Metric:1\nRX packets:408989052 errors:0 dropped:0 overruns:0 frame:0\nTX packets:293849880 errors:0 dropped:0 overruns:0 carrier:0\ncollisions:0 txqueuelen:1000\nRX bytes:103702814216 (103.7 GB) TX bytes:165063440009 (165.0 GB)\n",
+ "host": "node-6.cisco.com",
+ "id": "eno16777728-00:50:56:ac:e8:97",
+ "local_name": "eno16777728",
+ "mac_address": "00:50:56:ac:e8:97",
+ "name": "eno16777728"
+}
+
+INTERFACES_GET_RESULTS = [INTERFACE]
+
+IPV6_ADDRESS_LINE = " inet6 addr: fe80::f816:3eff:fea1:eb73/64 Scope:Link"
+IPV6_ADDRESS = "fe80::f816:3eff:fea1:eb73/64"
+IPV4_ADDRESS_LINE = " inet addr:172.16.13.2 Bcast:172.16.13.255 Mask:255.255.255.0"
+IPV4_ADDRESS = "172.16.13.2"
+
+ETHTOOL_RESULT = [
+ "Settings for eno16777728:",
+ "\tSupported ports: [ TP ]",
+ "\tSupported link modes: 10baseT/Half 10baseT/Full ",
+ "\t 100baseT/Half 100baseT/Full ",
+ "\t 1000baseT/Full ",
+ "\tSupported pause frame use: No",
+]
diff --git a/app/test/fetch/cli_fetch/test_data/cli_fetch_host_pnics_vpp.py b/app/test/fetch/cli_fetch/test_data/cli_fetch_host_pnics_vpp.py
new file mode 100644
index 0000000..99bd4cd
--- /dev/null
+++ b/app/test/fetch/cli_fetch/test_data/cli_fetch_host_pnics_vpp.py
@@ -0,0 +1,204 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+ID = "node-4.cisco.com-VPP-folders"
+VEDGES = [
+ {
+ "agent_type": "Open vSwitch agent",
+ "binary": "neutron-openvswitch-agent",
+ "configurations" : {
+ "tunneling_ip": "192.168.2.3",
+ "arp_responder_enabled" : True,
+ "extensions" : [
+
+ ],
+ "l2_population" : True,
+ "enable_distributed_routing" : False,
+ "bridge_mappings" : {
+ "physnet1": "br-floating"
+ },
+ "log_agent_heartbeats" : False,
+ "tunnel_types" : [
+ "vxlan"
+ ],
+ "in_distributed_mode" : False
+ },
+ "description" : None,
+ "environment": "Mirantis-Liberty-Xiaocong",
+ "host": "node-6.cisco.com",
+ "id": "1764430c-c09e-4717-86fa-c04350b1fcbb",
+ "id_path": "/Mirantis-Liberty-Xiaocong/Mirantis-Liberty-Xiaocong-regions/RegionOne/RegionOne-availability_zones/internal/node-6.cisco.com/node-6.cisco.com-vedges/1764430c-c09e-4717-86fa-c04350b1fcbb",
+ "name": "node-6.cisco.com-OVS",
+ "name_path": "/Mirantis-Liberty-Xiaocong/Regions/RegionOne/Availability Zones/internal/node-6.cisco.com/vEdges/node-6.cisco.com-OVS",
+ "object_name": "node-6.cisco.com-OVS",
+ "parent_id": "node-6.cisco.com-vedges",
+ "parent_type": "vedges_folder",
+ "ports" : {
+ "TenGigabitEthernet-g-63489f34-af" : {
+ "id": "8",
+ "name": "qg-63489f34-af",
+ "internal" : True,
+ "tag": "2",
+ "host": "node-4.cisco.com",
+ "state": "up"
+ },
+ "qr-3ff411a2-54" : {
+ "id": "7",
+ "name": "qr-3ff411a2-54",
+ "internal" : True,
+ "tag": "5"
+ },
+ "tap31c19fbe-5d" : {
+ "id": "19",
+ "name": "tap31c19fbe-5d",
+ "internal" : True,
+ "tag": "117"
+ },
+ "br-int" : {
+ "id": "3",
+ "name": "br-int",
+ "internal" : True
+ },
+ "qr-18f029db-77" : {
+ "id": "17",
+ "name": "qr-18f029db-77",
+ "internal" : True,
+ "tag": "105"
+ },
+ "br-tun" : {
+ "id": "13",
+ "name": "br-tun",
+ "internal" : True
+ },
+ "tap82d4992f-4d" : {
+ "id": "9",
+ "name": "tap82d4992f-4d",
+ "internal" : True,
+ "tag": "5"
+ },
+ "tap16620a58-c4" : {
+ "id": "16",
+ "name": "tap16620a58-c4",
+ "internal" : True,
+ "tag": "6"
+ },
+ "p_ff798dba-0" : {
+ "id": "15",
+ "name": "p_ff798dba-0",
+ "internal" : True
+ },
+ "tapee8e5dbb-03" : {
+ "id": "6",
+ "name": "tapee8e5dbb-03",
+ "internal" : True,
+ "tag": "1"
+ },
+ "tap702e9683-0c" : {
+ "id": "20",
+ "name": "tap702e9683-0c",
+ "internal" : True,
+ "tag": "118"
+ },
+ "tapaf69959f-ef" : {
+ "id": "18",
+ "name": "tapaf69959f-ef",
+ "internal" : True,
+ "tag": "105"
+ },
+ "tap5f22f397-d8" : {
+ "id": "11",
+ "name": "tap5f22f397-d8",
+ "internal" : True,
+ "tag": "3"
+ },
+ "qr-bb9b8340-72" : {
+ "id": "1",
+ "name": "qr-bb9b8340-72",
+ "internal" : True,
+ "tag": "3"
+ },
+ "qr-8733cc5d-b3" : {
+ "id": "2",
+ "name": "qr-8733cc5d-b3",
+ "internal" : True,
+ "tag": "4"
+ },
+ "ovs-system" : {
+ "id": "0",
+ "name": "ovs-system",
+ "internal" : True
+ },
+ "br-floating" : {
+ "id": "14",
+ "name": "br-floating",
+ "internal" : True
+ },
+ "qg-57e65d34-3d" : {
+ "id": "10",
+ "name": "qg-57e65d34-3d",
+ "internal" : True,
+ "tag": "2"
+ },
+ "qr-f7b44150-99" : {
+ "id": "4",
+ "name": "qr-f7b44150-99",
+ "internal" : True,
+ "tag": "1"
+ },
+ "tapbf16c3ab-56" : {
+ "id": "5",
+ "name": "tapbf16c3ab-56",
+ "internal" : True,
+ "tag": "4"
+ }
+ },
+ "show_in_tree" : True,
+ "topic": "N/A",
+ "tunnel_ports" : {
+ "br-tun" : {
+ "name": "br-tun",
+ "interface": "br-tun",
+ "type": "internal"
+ },
+ "vxlan-c0a80201" : {
+ "name": "vxlan-c0a80201",
+ "options" : {
+ "local_ip": "192.168.2.3",
+ "out_key": "flow",
+ "in_key": "flow",
+ "df_default": "True",
+ "remote_ip": "192.168.2.1"
+ },
+ "interface": "vxlan-c0a80201",
+ "type": "vxlan"
+ },
+ "vxlan-c0a80202" : {
+ "name": "vxlan-c0a80202",
+ "options" : {
+ "local_ip": "192.168.2.3",
+ "out_key": "flow",
+ "in_key": "flow",
+ "df_default": "True",
+ "remote_ip": "192.168.2.2"
+ },
+ "interface": "vxlan-c0a80202",
+ "type": "vxlan"
+ },
+ "patch-int" : {
+ "name": "patch-int",
+ "options" : {
+ "peer": "patch-tun"
+ },
+ "interface": "patch-int",
+ "type": "patch"
+ }
+ },
+ "type": "vedge"
+}
+ ] \ No newline at end of file
diff --git a/app/test/fetch/cli_fetch/test_data/cli_fetch_host_verservices.py b/app/test/fetch/cli_fetch/test_data/cli_fetch_host_verservices.py
new file mode 100644
index 0000000..94ee38c
--- /dev/null
+++ b/app/test/fetch/cli_fetch/test_data/cli_fetch_host_verservices.py
@@ -0,0 +1,276 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+NETWORK_HOST = {
+ "config": {
+ "interfaces": 4,
+ "log_agent_heartbeats": False,
+ "gateway_external_network_id": "",
+ "router_id": "",
+ "interface_driver": "neutron.agent.linux.interface.OVSInterfaceDriver",
+ "ex_gw_ports": 2,
+ "routers": 2,
+ "handle_internal_only_routers": True,
+ "floating_ips": 1,
+ "external_network_bridge": "",
+ "use_namespaces": True,
+ "agent_mode": "legacy"
+ },
+ "environment": "Mirantis-Liberty-Xiaocong",
+ "host": "node-6.cisco.com",
+ "host_type": [
+ "Controller",
+ "Network"
+ ],
+ "id": "node-6.cisco.com",
+ "id_path": "/Mirantis-Liberty-Xiaocong/Mirantis-Liberty-Xiaocong-regions/RegionOne/RegionOne-availability_zones/internal/node-6.cisco.com",
+ "name": "node-6.cisco.com",
+ "name_path": "/Mirantis-Liberty-Xiaocong/Regions/RegionOne/Availability Zones/internal/node-6.cisco.com",
+ "object_name": "node-6.cisco.com",
+ "parent_id": "internal",
+ "parent_type": "availability_zone",
+ "services": {
+ "nova-scheduler": {
+ "active": True,
+ "available": True,
+ "updated_at": "2016-10-21T18:01:10.000000"
+ },
+ "nova-consoleauth": {
+ "active": True,
+ "available": True,
+ "updated_at": "2016-10-21T18:01:54.000000"
+ },
+ "nova-conductor": {
+ "active": True,
+ "available": True,
+ "updated_at": "2016-10-21T18:01:45.000000"
+ },
+ "nova-cert": {
+ "active": True,
+ "available": True,
+ "updated_at": "2016-10-21T18:01:56.000000"
+ }
+ },
+ "show_in_tree": True,
+ "type" : "host",
+ "zone" : "internal"
+}
+
+COMPUTE_HOST = {
+ "environment": "Mirantis-Liberty-Xiaocong",
+ "host": "node-5.cisco.com",
+ "host_type": [
+ "Compute"
+ ],
+ "id": "node-5.cisco.com",
+ "id_path": "/Mirantis-Liberty-Xiaocong/Mirantis-Liberty-Xiaocong-regions/RegionOne/RegionOne-availability_zones/osdna-zone/node-5.cisco.com",
+ "ip_address": "192.168.0.4",
+ "name": "node-5.cisco.com",
+ "name_path": "/Mirantis-Liberty-Xiaocong/Regions/RegionOne/Availability Zones/osdna-zone/node-5.cisco.com",
+ "object_name": "node-5.cisco.com",
+ "os_id": "1",
+ "parent_id": "osdna-zone",
+ "parent_type": "availability_zone",
+ "services": {
+ "nova-compute": {
+ "active": True,
+ "available": True,
+ "updated_at": "2016-10-21T18:01:42.000000"
+ }
+ },
+ "show_in_tree": True,
+ "type": "host",
+ "zone": "osdna-zone"
+}
+
+NAMESPACES = [
+ 'qdhcp-413de095-01ed-49dc-aa50-4479f43d390e',
+ 'qdhcp-2e3b85f4-756c-49d9-b34c-f3db13212dbc',
+ 'qdhcp-b6fd5175-4b22-4256-9b1a-9fc4b9dce1fe',
+ 'qdhcp-eb276a62-15a9-4616-a192-11466fdd147f',
+ 'qdhcp-7e59b726-d6f4-451a-a574-c67a920ff627',
+ 'qdhcp-a55ff1e8-3821-4e5f-bcfd-07df93720a4f',
+ 'qdhcp-6504fcf7-41d7-40bb-aeb1-6a7658c105fc',
+ 'qrouter-9ec3d703-0725-47e3-8f48-02b16236caf9',
+ 'qrouter-49ac7716-06da-49ed-b388-f8ba60e8a0e6',
+ 'haproxy',
+ 'vrouter'
+]
+
+LOCAL_SERVICES_IDS = [
+ {
+ "local_service_id": "qdhcp-413de095-01ed-49dc-aa50-4479f43d390e"
+ },
+ {
+ "local_service_id": "qdhcp-2e3b85f4-756c-49d9-b34c-f3db13212dbc"
+ },
+ {
+ "local_service_id": "qdhcp-b6fd5175-4b22-4256-9b1a-9fc4b9dce1fe"
+ },
+ {
+ "local_service_id": "qdhcp-eb276a62-15a9-4616-a192-11466fdd147f"
+ },
+ {
+ "local_service_id": "qdhcp-7e59b726-d6f4-451a-a574-c67a920ff627"
+ },
+ {
+ "local_service_id": "qdhcp-a55ff1e8-3821-4e5f-bcfd-07df93720a4f"
+ },
+ {
+ "local_service_id": "qdhcp-6504fcf7-41d7-40bb-aeb1-6a7658c105fc"
+ },
+ {
+ "local_service_id": "qrouter-9ec3d703-0725-47e3-8f48-02b16236caf9"
+ },
+ {
+ "local_service_id": "qrouter-49ac7716-06da-49ed-b388-f8ba60e8a0e6"
+ }
+]
+
+VSERVICE = {
+ "host": "node-6.cisco.com",
+ "id": "qdhcp-b6fd5175-4b22-4256-9b1a-9fc4b9dce1fe",
+ "local_service_id": "qdhcp-b6fd5175-4b22-4256-9b1a-9fc4b9dce1fe",
+ "name": "dhcp-osdna-met4",
+ "service_type": "dhcp"
+ }
+
+AGENT = {
+ "description": "DHCP agent",
+ "folder_text": "DHCP servers",
+ "type": "dhcp"
+}
+
+ROUTER = [
+ {"name": "123456"}
+]
+
+ID_CLEAN = "413de095-01ed-49dc-aa50-4479f43d390e"
+# functional test
+INPUT = "node-6.cisco.com"
+OUTPUT = [
+ {
+ "host": "node-6.cisco.com",
+ "id": "qdhcp-413de095-01ed-49dc-aa50-4479f43d390e",
+ "local_service_id": "qdhcp-413de095-01ed-49dc-aa50-4479f43d390e",
+ "master_parent_id": "node-6.cisco.com-vservices",
+ "master_parent_type": "vservices_folder",
+ "name": "dhcp-aiya",
+ "parent_id": "node-6.cisco.com-vservices-dhcps",
+ "parent_text": "DHCP servers",
+ "parent_type": "vservice_dhcps_folder",
+ "service_type": "dhcp"
+ },
+ {
+ "host": "node-6.cisco.com",
+ "id": "qdhcp-2e3b85f4-756c-49d9-b34c-f3db13212dbc",
+ "local_service_id": "qdhcp-2e3b85f4-756c-49d9-b34c-f3db13212dbc",
+ "master_parent_id": "node-6.cisco.com-vservices",
+ "master_parent_type": "vservices_folder",
+ "name": "dhcp-123456",
+ "parent_id": "node-6.cisco.com-vservices-dhcps",
+ "parent_text": "DHCP servers",
+ "parent_type": "vservice_dhcps_folder",
+ "service_type": "dhcp"
+ },
+ {
+ "host": "node-6.cisco.com",
+ "id": "qdhcp-b6fd5175-4b22-4256-9b1a-9fc4b9dce1fe",
+ "local_service_id": "qdhcp-b6fd5175-4b22-4256-9b1a-9fc4b9dce1fe",
+ "master_parent_id": "node-6.cisco.com-vservices",
+ "master_parent_type": "vservices_folder",
+ "name": "dhcp-osdna-met4",
+ "parent_id": "node-6.cisco.com-vservices-dhcps",
+ "parent_text": "DHCP servers",
+ "parent_type": "vservice_dhcps_folder",
+ "service_type": "dhcp"
+ },
+ {
+ "host": "node-6.cisco.com",
+ "id": "qdhcp-eb276a62-15a9-4616-a192-11466fdd147f",
+ "local_service_id": "qdhcp-eb276a62-15a9-4616-a192-11466fdd147f",
+ "master_parent_id": "node-6.cisco.com-vservices",
+ "master_parent_type": "vservices_folder",
+ "name": "dhcp-osdna-net3",
+ "parent_id": "node-6.cisco.com-vservices-dhcps",
+ "parent_text": "DHCP servers",
+ "parent_type": "vservice_dhcps_folder",
+ "service_type": "dhcp"
+ },
+ {
+ "host": "node-6.cisco.com",
+ "id": "qdhcp-7e59b726-d6f4-451a-a574-c67a920ff627",
+ "local_service_id": "qdhcp-7e59b726-d6f4-451a-a574-c67a920ff627",
+ "master_parent_id": "node-6.cisco.com-vservices",
+ "master_parent_type": "vservices_folder",
+ "name": "dhcp-osdna-net1",
+ "parent_id": "node-6.cisco.com-vservices-dhcps",
+ "parent_text": "DHCP servers",
+ "parent_type": "vservice_dhcps_folder",
+ "service_type": "dhcp"
+ },
+ {
+ "host": "node-6.cisco.com",
+ "id": "qdhcp-a55ff1e8-3821-4e5f-bcfd-07df93720a4f",
+ "local_service_id": "qdhcp-a55ff1e8-3821-4e5f-bcfd-07df93720a4f",
+ "master_parent_id": "node-6.cisco.com-vservices",
+ "master_parent_type": "vservices_folder",
+ "name": "dhcp-osdna-net2",
+ "parent_id": "node-6.cisco.com-vservices-dhcps",
+ "parent_text": "DHCP servers",
+ "parent_type": "vservice_dhcps_folder",
+ "service_type": "dhcp"
+ },
+ {
+ "host": "node-6.cisco.com",
+ "id": "qdhcp-6504fcf7-41d7-40bb-aeb1-6a7658c105fc",
+ "local_service_id": "qdhcp-6504fcf7-41d7-40bb-aeb1-6a7658c105fc",
+ "master_parent_id": "node-6.cisco.com-vservices",
+ "master_parent_type": "vservices_folder",
+ "name": "dhcp-admin_internal_net",
+ "parent_id": "node-6.cisco.com-vservices-dhcps",
+ "parent_text": "DHCP servers",
+ "parent_type": "vservice_dhcps_folder",
+ "service_type": "dhcp"
+ },
+ {
+ "admin_state_up": 1,
+ "enable_snat": 1,
+ "gw_port_id": "63489f34-af99-44f4-81de-9a2eb1c1941f",
+ "host": "node-6.cisco.com",
+ "id": "qrouter-9ec3d703-0725-47e3-8f48-02b16236caf9",
+ "local_service_id": "qrouter-9ec3d703-0725-47e3-8f48-02b16236caf9",
+ "master_parent_id": "node-6.cisco.com-vservices",
+ "master_parent_type": "vservices_folder",
+ "name": "router-osdna-router",
+ "parent_id": "node-6.cisco.com-vservices-routers",
+ "parent_text": "Gateways",
+ "parent_type": "vservice_routers_folder",
+ "service_type": "router",
+ "status": "ACTIVE",
+ "tenant_id": "75c0eb79ff4a42b0ae4973c8375ddf40"
+ },
+ {
+ "admin_state_up": 1,
+ "enable_snat": 1,
+ "gw_port_id": "57e65d34-3d87-4751-8e95-fc78847a3070",
+ "host": "node-6.cisco.com",
+ "id": "qrouter-49ac7716-06da-49ed-b388-f8ba60e8a0e6",
+ "local_service_id": "qrouter-49ac7716-06da-49ed-b388-f8ba60e8a0e6",
+ "master_parent_id": "node-6.cisco.com-vservices",
+ "master_parent_type": "vservices_folder",
+ "name": "router-router04",
+ "parent_id": "node-6.cisco.com-vservices-routers",
+ "parent_text": "Gateways",
+ "parent_type": "vservice_routers_folder",
+ "service_type": "router",
+ "status": "ACTIVE",
+ "tenant_id": "8c1751e0ce714736a63fee3c776164da"
+ }
+] \ No newline at end of file
diff --git a/app/test/fetch/cli_fetch/test_data/cli_fetch_instance_vnics.py b/app/test/fetch/cli_fetch/test_data/cli_fetch_instance_vnics.py
new file mode 100644
index 0000000..a43b5c2
--- /dev/null
+++ b/app/test/fetch/cli_fetch/test_data/cli_fetch_instance_vnics.py
@@ -0,0 +1,288 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+VNICS_FOLDER = {
+ "create_object": True,
+ "environment": "Mirantis-Liberty-Xiaocong",
+ "id": "bf0cb914-b316-486c-a4ce-f22deb453c52-vnics",
+ "id_path": "/Mirantis-Liberty-Xiaocong/Mirantis-Liberty-Xiaocong-regions/RegionOne/RegionOne-availability_zones/osdna-zone/node-5.cisco.com/node-5.cisco.com-instances/bf0cb914-b316-486c-a4ce-f22deb453c52/bf0cb914-b316-486c-a4ce-f22deb453c52-vnics",
+ "name": "vNICs",
+ "name_path": "/Mirantis-Liberty-Xiaocong/Regions/RegionOne/Availability Zones/osdna-zone/node-5.cisco.com/Instances/test/vNICs",
+ "object_name": "vNICs",
+ "parent_id": "bf0cb914-b316-486c-a4ce-f22deb453c52",
+ "parent_type": "instance",
+ "show_in_tree": True,
+ "text": "vNICs",
+ "type": "vnics_folder"
+}
+
+INSATNCE = {
+ "_id": "5806817e4a0a8a3fbe3bee8b",
+ "children_url": "/osdna_dev/discover.py?type=tree&id=bf0cb914-b316-486c-a4ce-f22deb453c52",
+ "environment": "Mirantis-Liberty-Xiaocong",
+ "host": "node-5.cisco.com",
+ "id": "bf0cb914-b316-486c-a4ce-f22deb453c52",
+ "id_path": "/Mirantis-Liberty-Xiaocong/Mirantis-Liberty-Xiaocong-regions/RegionOne/RegionOne-availability_zones/osdna-zone/node-5.cisco.com/node-5.cisco.com-instances/bf0cb914-b316-486c-a4ce-f22deb453c52",
+ "ip_address": "192.168.0.4",
+ "local_name": "instance-00000026",
+ "mac_address": "fa:16:3e:e8:7f:04",
+ "name": "test",
+ "name_path": "/Mirantis-Liberty-Xiaocong/Regions/RegionOne/Availability Zones/osdna-zone/node-5.cisco.com/Instances/test",
+ "network": [
+ "2e3b85f4-756c-49d9-b34c-f3db13212dbc"
+ ],
+ "network_info": [
+ {
+ "devname": "tap1f72bd15-8a",
+ "id": "1f72bd15-8ab2-43cb-94d7-e823dd845255",
+ "profile": {
+
+ },
+ "vnic_type": "normal",
+ "type": "ovs",
+ "address": "fa:16:3e:e8:7f:04",
+ "qbg_params": None,
+ "network": {
+ "bridge": "br-int",
+ "label": "123456",
+ "subnets": [
+ {
+ "cidr": "172.16.13.0/24",
+ "version": 4,
+ "gateway": {
+ "version": 4,
+ "meta": {
+
+ },
+ "address": "172.16.13.1",
+ "type": "gateway"
+ },
+ "routes": [
+
+ ],
+ "dns": [
+
+ ],
+ "ips": [
+ {
+ "meta": {
+
+ },
+ "version": 4,
+ "type": "fixed",
+ "address": "172.16.13.4",
+ "floating_ips": [
+
+ ]
+ }
+ ],
+ "meta": {
+ "dhcp_server": "172.16.13.2"
+ }
+ }
+ ],
+ "meta": {
+ "tenant_id": "75c0eb79ff4a42b0ae4973c8375ddf40",
+ "injected": False
+ },
+ "id": "2e3b85f4-756c-49d9-b34c-f3db13212dbc"
+ },
+ "active": True,
+ "meta": {
+
+ },
+ "details": {
+ "port_filter": True,
+ "ovs_hybrid_plug": True
+ },
+ "preserve_on_delete": False,
+ "qbh_params": None,
+ "ovs_interfaceid": "1f72bd15-8ab2-43cb-94d7-e823dd845255"
+ }
+ ],
+ "object_name": "test",
+ "parent_id": "node-5.cisco.com-instances",
+ "parent_type": "instances_folder",
+ "project_id": "75c0eb79ff4a42b0ae4973c8375ddf40",
+ "projects": [
+ "OSDNA-project"
+ ],
+ "show_in_tree": True,
+ "type": "instance",
+ "uuid": "bf0cb914-b316-486c-a4ce-f22deb453c52"
+}
+
+
+COMPUTE_HOST = {
+ "environment": "Mirantis-Liberty-Xiaocong",
+ "host": "node-5.cisco.com",
+ "host_type": [
+ "Compute"
+ ],
+ "id": "node-5.cisco.com",
+ "id_path": "/Mirantis-Liberty-Xiaocong/Mirantis-Liberty-Xiaocong-regions/RegionOne/RegionOne-availability_zones/osdna-zone/node-5.cisco.com",
+ "ip_address": "192.168.0.4",
+ "name": "node-5.cisco.com",
+ "name_path": "/Mirantis-Liberty-Xiaocong/Regions/RegionOne/Availability Zones/osdna-zone/node-5.cisco.com",
+ "object_name": "node-5.cisco.com",
+ "os_id": "1",
+ "parent_id": "osdna-zone",
+ "parent_type": "availability_zone",
+ "services": {
+ "nova-compute": {
+ "active": True,
+ "available": True,
+ "updated_at": "2016-10-21T18:01:42.000000"
+ }
+ },
+ "show_in_tree": True,
+ "type": "host",
+ "zone": "osdna-zone"
+}
+
+NETWORK_HOST = {
+ "config": {
+ "interfaces": 4,
+ "log_agent_heartbeats": False,
+ "gateway_external_network_id": "",
+ "router_id": "",
+ "interface_driver": "neutron.agent.linux.interface.OVSInterfaceDriver",
+ "ex_gw_ports": 2,
+ "routers": 2,
+ "handle_internal_only_routers": True,
+ "floating_ips": 1,
+ "external_network_bridge": "",
+ "use_namespaces": True,
+ "agent_mode": "legacy"
+ },
+ "environment": "Mirantis-Liberty-Xiaocong",
+ "host": "node-6.cisco.com",
+ "host_type": [
+ "Controller",
+ "Network"
+ ],
+ "id": "node-6.cisco.com",
+ "id_path": "/Mirantis-Liberty-Xiaocong/Mirantis-Liberty-Xiaocong-regions/RegionOne/RegionOne-availability_zones/internal/node-6.cisco.com",
+ "name": "node-6.cisco.com",
+ "name_path": "/Mirantis-Liberty-Xiaocong/Regions/RegionOne/Availability Zones/internal/node-6.cisco.com",
+ "object_name": "node-6.cisco.com",
+ "parent_id": "internal",
+ "parent_type": "availability_zone",
+ "services": {
+ "nova-scheduler": {
+ "active": True,
+ "available": True,
+ "updated_at": "2016-10-21T18:01:10.000000"
+ },
+ "nova-consoleauth": {
+ "active": True,
+ "available": True,
+ "updated_at": "2016-10-21T18:01:54.000000"
+ },
+ "nova-conductor": {
+ "active": True,
+ "available": True,
+ "updated_at": "2016-10-21T18:01:45.000000"
+ },
+ "nova-cert": {
+ "active": True,
+ "available": True,
+ "updated_at": "2016-10-21T18:01:56.000000"
+ }
+ },
+ "show_in_tree": True,
+ "type": "host",
+ "zone": "internal"
+}
+
+DUMPXML = "<domain type='qemu' id='38'>\n <name>instance-00000026</name>\n <uuid>bf0cb914-b316-486c-a4ce-f22deb453c52</uuid>\n <metadata>\n <nova:instance xmlns:nova=\"http://openstack.org/xmlns/libvirt/nova/1.0\">\n <nova:package version=\"12.0.0\"/>\n <nova:name>test</nova:name>\n <nova:creationTime>2016-10-17 22:37:43</nova:creationTime>\n <nova:flavor name=\"m1.micro\">\n <nova:memory>64</nova:memory>\n <nova:disk>0</nova:disk>\n <nova:swap>0</nova:swap>\n <nova:ephemeral>0</nova:ephemeral>\n <nova:vcpus>1</nova:vcpus>\n </nova:flavor>\n <nova:owner>\n <nova:user uuid=\"13baa553aae44adca6615e711fd2f6d9\">admin</nova:user>\n <nova:project uuid=\"75c0eb79ff4a42b0ae4973c8375ddf40\">OSDNA-project</nova:project>\n </nova:owner>\n <nova:root type=\"image\" uuid=\"c6f490c4-3656-43c6-8d03-b4e66bd249f9\"/>\n </nova:instance>\n </metadata>\n <memory unit='KiB'>65536</memory>\n <currentMemory unit='KiB'>65536</currentMemory>\n <vcpu placement='static'>1</vcpu>\n <cputune>\n <shares>1024</shares>\n </cputune>\n <sysinfo type='smbios'>\n <system>\n <entry name='manufacturer'>OpenStack Foundation</entry>\n <entry name='product'>OpenStack Nova</entry>\n <entry name='version'>12.0.0</entry>\n <entry name='serial'>9cf57bfd-7477-4671-b2d3-3dfeebfefb1d</entry>\n <entry name='uuid'>bf0cb914-b316-486c-a4ce-f22deb453c52</entry>\n <entry name='family'>Virtual Machine</entry>\n </system>\n </sysinfo>\n <os>\n <type arch='x86_64' machine='pc-i440fx-trusty'>hvm</type>\n <boot dev='hd'/>\n <smbios mode='sysinfo'/>\n </os>\n <features>\n <acpi/>\n <apic/>\n </features>\n <cpu>\n <topology sockets='1' cores='1' threads='1'/>\n </cpu>\n <clock offset='utc'/>\n <on_poweroff>destroy</on_poweroff>\n <on_reboot>restart</on_reboot>\n <on_crash>destroy</on_crash>\n <devices>\n <emulator>/usr/bin/qemu-system-x86_64</emulator>\n <disk type='file' device='disk'>\n <driver name='qemu' type='qcow2' cache='directsync'/>\n <source file='/var/lib/nova/instances/bf0cb914-b316-486c-a4ce-f22deb453c52/disk'/>\n <backingStore type='file' index='1'>\n <format type='raw'/>\n <source file='/var/lib/nova/instances/_base/44881e4441fbd821d0d6240f90742fc97e52f83e'/>\n <backingStore/>\n </backingStore>\n <target dev='vda' bus='virtio'/>\n <alias name='virtio-disk0'/>\n <address type='pci' domain='0x0000' bus='0x00' slot='0x04' function='0x0'/>\n </disk>\n <controller type='usb' index='0'>\n <alias name='usb0'/>\n <address type='pci' domain='0x0000' bus='0x00' slot='0x01' function='0x2'/>\n </controller>\n <controller type='pci' index='0' model='pci-root'>\n <alias name='pci.0'/>\n </controller>\n <interface type='bridge'>\n <mac address='fa:16:3e:e8:7f:04'/>\n <source bridge='qbr1f72bd15-8a'/>\n <target dev='tap1f72bd15-8a'/>\n <model type='virtio'/>\n <driver name='qemu'/>\n <alias name='net0'/>\n <address type='pci' domain='0x0000' bus='0x00' slot='0x03' function='0x0'/>\n </interface>\n <serial type='file'>\n <source path='/var/lib/nova/instances/bf0cb914-b316-486c-a4ce-f22deb453c52/console.log'/>\n <target port='0'/>\n <alias name='serial0'/>\n </serial>\n <serial type='pty'>\n <source path='/dev/pts/8'/>\n <target port='1'/>\n <alias name='serial1'/>\n </serial>\n <console type='file'>\n <source path='/var/lib/nova/instances/bf0cb914-b316-486c-a4ce-f22deb453c52/console.log'/>\n <target type='serial' port='0'/>\n <alias name='serial0'/>\n </console>\n <input type='tablet' bus='usb'>\n <alias name='input0'/>\n </input>\n <input type='mouse' bus='ps2'/>\n <input type='keyboard' bus='ps2'/>\n <graphics type='vnc' port='5902' autoport='yes' listen='0.0.0.0' keymap='en-us'>\n <listen type='address' address='0.0.0.0'/>\n </graphics>\n <video>\n <model type='cirrus' vram='9216' heads='1'/>\n <alias name='video0'/>\n <address type='pci' domain='0x0000' bus='0x00' slot='0x02' function='0x0'/>\n </video>\n <memballoon model='virtio'>\n <alias name='balloon0'/>\n <address type='pci' domain='0x0000' bus='0x00' slot='0x05' function='0x0'/>\n <stats period='10'/>\n </memballoon>\n </devices>\n <seclabel type='dynamic' model='apparmor' relabel='yes'>\n <label>libvirt-bf0cb914-b316-486c-a4ce-f22deb453c52</label>\n <imagelabel>libvirt-bf0cb914-b316-486c-a4ce-f22deb453c52</imagelabel>\n </seclabel>\n</domain>\n\n"
+WRONG_DUMPXML = "<domain type='qemu' id='38'><uuid>wrong_instance</uuid></domain>"
+INSTANCES_LIST = [
+ ' Id Name State',
+ '----------------------------------------------------',
+ ' 2 instance-00000002 running',
+ ' 27 instance-0000001c running',
+ ' 38 instance-00000026 running',
+ ' 39 instance-00000028 running',
+ ''
+]
+
+VNIC = {
+ "@type": "bridge",
+ "address": {
+ "@bus": "0x00",
+ "@domain": "0x0000",
+ "@function": "0x0",
+ "@slot": "0x03",
+ "@type": "pci"
+ },
+ "alias": {
+ "@name": "net0"
+ },
+ "driver": {
+ "@name": "qemu"
+ },
+ "mac": {
+ "@address": "fa:16:3e:e8:7f:04"
+ },
+ "model": {
+ "@type": "virtio"
+ },
+ "source": {
+ "@bridge": "qbr1f72bd15-8a"
+ },
+ "target": {
+ "@dev": "tap1f72bd15-8a"
+ }
+}
+
+ID = "38"
+
+VNICS_FROM_DUMP_XML = [
+ {
+ "@type": "bridge",
+ "address": {
+ "@bus": "0x00",
+ "@domain": "0x0000",
+ "@function": "0x0",
+ "@slot": "0x03",
+ "@type": "pci"
+ },
+ "alias": {
+ "@name": "net0"
+ },
+ "driver": {
+ "@name": "qemu"
+ },
+ "host": "node-5.cisco.com",
+ "id": "tap1f72bd15-8a",
+ "instance_db_id": "5806817e4a0a8a3fbe3bee8b",
+ "instance_id": "bf0cb914-b316-486c-a4ce-f22deb453c52",
+ "mac": {
+ "@address": "fa:16:3e:e8:7f:04"
+ },
+ "mac_address": "fa:16:3e:e8:7f:04",
+ "model": {
+ "@type": "virtio"
+ },
+ "name": "tap1f72bd15-8a",
+ "source": {
+ "@bridge": "qbr1f72bd15-8a"
+ },
+ "source_bridge": "qbr1f72bd15-8a",
+ "target": {
+ "@dev": "tap1f72bd15-8a"
+ },
+ "vnic_type": "instance_vnic"
+ }
+]
+
+
+# functional test
+INPUT = "bf0cb914-b316-486c-a4ce-f22deb453c52-vnics" \ No newline at end of file
diff --git a/app/test/fetch/cli_fetch/test_data/cli_fetch_vconnectors.py b/app/test/fetch/cli_fetch/test_data/cli_fetch_vconnectors.py
new file mode 100644
index 0000000..f51e510
--- /dev/null
+++ b/app/test/fetch/cli_fetch/test_data/cli_fetch_vconnectors.py
@@ -0,0 +1,103 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+HOST = {
+ "config" : {
+ "metadata_proxy_socket" : "/opt/stack/data/neutron/metadata_proxy",
+ "nova_metadata_ip" : "192.168.20.14",
+ "log_agent_heartbeats" : False
+ },
+ "environment" : "Devstack-VPP-2",
+ "host" : "ubuntu0",
+ "host_type" : [
+ "Controller",
+ "Compute",
+ "Network"
+ ],
+ "id" : "ubuntu0",
+ "id_path" : "/Devstack-VPP-2/Devstack-VPP-2-regions/RegionOne/RegionOne-availability_zones/nova/ubuntu0",
+ "ip_address" : "192.168.20.14",
+ "name" : "ubuntu0",
+ "name_path" : "/Devstack-VPP-2/Regions/RegionOne/Availability Zones/nova/ubuntu0",
+ "object_name" : "ubuntu0",
+ "os_id" : "1",
+ "parent_id" : "nova",
+ "parent_type" : "availability_zone",
+ "services" : {
+ "nova-conductor" : {
+ "available" : True,
+ "active" : True,
+ "updated_at" : "2016-08-30T09:18:58.000000"
+ },
+ "nova-scheduler" : {
+ "available" : True,
+ "active" : True,
+ "updated_at" : "2016-08-30T09:18:54.000000"
+ },
+ "nova-consoleauth" : {
+ "available" : True,
+ "active" : True,
+ "updated_at" : "2016-08-30T09:18:54.000000"
+ }
+ },
+ "show_in_tree" : True,
+ "type" : "host",
+ "zone" : "nova"
+}
+
+WRONG_HOST = {
+ "show_in_tree" : True,
+ "type" : "host",
+ "zone" : "nova"
+}
+
+VCONNECTORS_FOLDER = {
+ "create_object" : True,
+ "environment" : "Mirantis-Liberty-Xiaocong",
+ "id" : "node-6.cisco.com-vconnectors",
+ "id_path" : "/Mirantis-Liberty-Xiaocong/Mirantis-Liberty-Xiaocong-regions/RegionOne/RegionOne-availability_zones/internal/node-6.cisco.com/node-6.cisco.com-vconnectors",
+ "name" : "vConnectors",
+ "name_path" : "/Mirantis-Liberty-Xiaocong/Regions/RegionOne/Availability Zones/internal/node-6.cisco.com/vConnectors",
+ "object_name" : "vConnectors",
+ "parent_id" : "node-6.cisco.com",
+ "parent_type" : "host",
+ "show_in_tree" : True,
+ "text" : "vConnectors",
+ "type" : "vconnectors_folder"
+}
+
+VCONNECTORS = [
+ {
+ "bd_id": "5678",
+ "host": "ubuntu0",
+ "id": "ubuntu0-vconnector-5678",
+ "interfaces": {
+ "name": {
+ "hardware": "VirtualEthernet0/0/8",
+ "id": "15",
+ "mac_address": "fa:16:3e:d1:98:73",
+ "name": "VirtualEthernet0/0/8",
+ "state": "up"
+ }
+ },
+ "interfaces_names": [
+ "TenGigabitEthernetc/0/0",
+ "VirtualEthernet0/0/0",
+ "VirtualEthernet0/0/1",
+ "VirtualEthernet0/0/2",
+ "VirtualEthernet0/0/3",
+ "VirtualEthernet0/0/4",
+ "VirtualEthernet0/0/5",
+ "VirtualEthernet0/0/6",
+ "VirtualEthernet0/0/7",
+ "VirtualEthernet0/0/8"
+ ],
+ "name": "bridge-domain-5678"
+ }
+] \ No newline at end of file
diff --git a/app/test/fetch/cli_fetch/test_data/cli_fetch_vconnectors_ovs.py b/app/test/fetch/cli_fetch/test_data/cli_fetch_vconnectors_ovs.py
new file mode 100644
index 0000000..9161457
--- /dev/null
+++ b/app/test/fetch/cli_fetch/test_data/cli_fetch_vconnectors_ovs.py
@@ -0,0 +1,234 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+NETWORK_NODE = {
+ "config": {
+ "interfaces": 4,
+ "log_agent_heartbeats": False,
+ "gateway_external_network_id": "",
+ "router_id": "",
+ "interface_driver": "neutron.agent.linux.interface.OVSInterfaceDriver",
+ "ex_gw_ports": 2,
+ "routers": 2,
+ "handle_internal_only_routers": True,
+ "floating_ips": 1,
+ "external_network_bridge": "",
+ "use_namespaces": True,
+ "agent_mode": "legacy"
+ },
+ "environment": "Mirantis-Liberty-Xiaocong",
+ "host": "node-6.cisco.com",
+ "host_type": [
+ "Controller",
+ "Network"
+ ],
+ "id": "node-6.cisco.com",
+ "id_path": "/Mirantis-Liberty-Xiaocong/Mirantis-Liberty-Xiaocong-regions/RegionOne/RegionOne-availability_zones/internal/node-6.cisco.com",
+ "name": "node-6.cisco.com",
+ "name_path": "/Mirantis-Liberty-Xiaocong/Regions/RegionOne/Availability Zones/internal/node-6.cisco.com",
+ "object_name": "node-6.cisco.com",
+ "parent_id": "internal",
+ "parent_type": "availability_zone",
+ "services": {
+ "nova-scheduler": {
+ "active": True,
+ "available": True,
+ "updated_at": "2016-10-21T18:01:10.000000"
+ },
+ "nova-consoleauth": {
+ "active": True,
+ "available": True,
+ "updated_at": "2016-10-21T18:01:54.000000"
+ },
+ "nova-conductor": {
+ "active": True,
+ "available": True,
+ "updated_at": "2016-10-21T18:01:45.000000"
+ },
+ "nova-cert": {
+ "active": True,
+ "available": True,
+ "updated_at": "2016-10-21T18:01:56.000000"
+ }
+ },
+ "show_in_tree": True,
+ "type": "host",
+ "zone": "internal"
+}
+
+BRIDGE_RESULT = [
+ "bridge name\tbridge id\t\tSTP enabled\tinterfaces",
+ "br-ex\t\t8000.005056acc9a2\tno\t\teno33554952",
+ "\t\t\t\t\t\t\tp_ff798dba-0",
+ "\t\t\t\t\t\t\tv_public",
+ "\t\t\t\t\t\t\tv_vrouter_pub",
+ "br-fw-admin\t\t8000.005056ace897\tno\t\teno16777728",
+ "br-mesh\t\t8000.005056acc9a2\tno\t\teno33554952.103",
+ "br-mgmt\t\t8000.005056ace897\tno\t\teno16777728.101",
+ "\t\t\t\t\t\t\tmgmt-conntrd",
+ "\t\t\t\t\t\t\tv_management",
+ "\t\t\t\t\t\t\tv_vrouter",
+ "br-storage\t\t8000.005056ace897\tno\t\teno16777728.102"
+]
+
+FIXED_LINES = [
+ "br-ex\t\t8000.005056acc9a2\tno\t\teno33554952,p_ff798dba-0,v_public,v_vrouter_pub",
+ "br-fw-admin\t\t8000.005056ace897\tno\t\teno16777728",
+ "br-mesh\t\t8000.005056acc9a2\tno\t\teno33554952.103",
+ "br-mgmt\t\t8000.005056ace897\tno\t\teno16777728.101,mgmt-conntrd,v_management,v_vrouter",
+ "br-storage\t\t8000.005056ace897\tno\t\teno16777728.102"
+]
+
+PARSE_CM_RESULTS = [
+ {
+ "bridge_id": "8000.005056acc9a2",
+ "bridge_name": "br-ex",
+ "interfaces": "eno33554952,p_ff798dba-0,v_public,v_vrouter_pub",
+ "stp_enabled": "no"
+ },
+ {
+ "bridge_id": "8000.005056ace897",
+ "bridge_name": "br-fw-admin",
+ "interfaces": "eno16777728",
+ "stp_enabled": "no"
+ },
+ {
+ "bridge_id": "8000.005056acc9a2",
+ "bridge_name": "br-mesh",
+ "interfaces": "eno33554952.103",
+ "stp_enabled": "no"
+ },
+ {
+ "bridge_id": "8000.005056ace897",
+ "bridge_name": "br-mgmt",
+ "interfaces": "eno16777728.101,mgmt-conntrd,v_management,v_vrouter",
+ "stp_enabled": "no"
+ },
+ {
+ "bridge_id": "8000.005056ace897",
+ "bridge_name": "br-storage",
+ "interfaces": "eno16777728.102",
+ "stp_enabled": "no"
+ }
+]
+
+# functional test
+INPUT = "node-6.cisco.com"
+OUPUT = [
+ {
+ "connector_type": "bridge",
+ "host": "node-6.cisco.com",
+ "id": "8000.005056acc9a2",
+ "interfaces": {
+ "eno33554952": {
+ "mac_address": "",
+ "name": "eno33554952"
+ },
+ "p_ff798dba-0": {
+ "mac_address": "",
+ "name": "p_ff798dba-0"
+ },
+ "v_public": {
+ "mac_address": "",
+ "name": "v_public"
+ },
+ "v_vrouter_pub": {
+ "mac_address": "",
+ "name": "v_vrouter_pub"
+ }
+ },
+ "interfaces_names": [
+ "p_ff798dba-0",
+ "v_public",
+ "v_vrouter_pub",
+ "eno33554952"
+ ],
+ "name": "br-ex",
+ "stp_enabled": "no"
+ },
+ {
+ "connector_type": "bridge",
+ "host": "node-6.cisco.com",
+ "id": "8000.005056ace897",
+ "interfaces": {
+ "eno16777728": {
+ "mac_address": "",
+ "name": "eno16777728"
+ }
+ },
+ "interfaces_names": [
+ "eno16777728"
+ ],
+ "name": "br-fw-admin",
+ "stp_enabled": "no"
+ },
+ {
+ "connector_type": "bridge",
+ "host": "node-6.cisco.com",
+ "id": "8000.005056acc9a2",
+ "interfaces": {
+ "eno33554952.103": {
+ "mac_address": "",
+ "name": "eno33554952.103"
+ }
+ },
+ "interfaces_names": [
+ "eno33554952.103"
+ ],
+ "name": "br-mesh",
+ "stp_enabled": "no"
+ },
+ {
+ "connector_type": "bridge",
+ "host": "node-6.cisco.com",
+ "id": "8000.005056ace897",
+ "interfaces": {
+ "eno16777728.101": {
+ "mac_address": "",
+ "name": "eno16777728.101"
+ },
+ "mgmt-conntrd": {
+ "mac_address": "",
+ "name": "mgmt-conntrd"
+ },
+ "v_management": {
+ "mac_address": "",
+ "name": "v_management"
+ },
+ "v_vrouter": {
+ "mac_address": "",
+ "name": "v_vrouter"
+ }
+ },
+ "interfaces_names": [
+ "v_management",
+ "mgmt-conntrd",
+ "v_vrouter",
+ "eno16777728.101"
+ ],
+ "name": "br-mgmt",
+ "stp_enabled": "no"
+ },
+ {
+ "connector_type": "bridge",
+ "host": "node-6.cisco.com",
+ "id": "8000.005056ace897",
+ "interfaces": {
+ "eno16777728.102": {
+ "mac_address": "",
+ "name": "eno16777728.102"
+ }
+ },
+ "interfaces_names": [
+ "eno16777728.102"
+ ],
+ "name": "br-storage",
+ "stp_enabled": "no"
+ }
+] \ No newline at end of file
diff --git a/app/test/fetch/cli_fetch/test_data/cli_fetch_vconnectors_vpp.py b/app/test/fetch/cli_fetch/test_data/cli_fetch_vconnectors_vpp.py
new file mode 100644
index 0000000..2c78b6a
--- /dev/null
+++ b/app/test/fetch/cli_fetch/test_data/cli_fetch_vconnectors_vpp.py
@@ -0,0 +1,137 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+HOST = {
+ "config" : {
+ "metadata_proxy_socket" : "/opt/stack/data/neutron/metadata_proxy",
+ "nova_metadata_ip" : "192.168.20.14",
+ "log_agent_heartbeats" : False
+ },
+ "environment" : "Devstack-VPP-2",
+ "host" : "ubuntu0",
+ "host_type" : [
+ "Controller",
+ "Compute",
+ "Network"
+ ],
+ "id" : "ubuntu0",
+ "id_path" : "/Devstack-VPP-2/Devstack-VPP-2-regions/RegionOne/RegionOne-availability_zones/nova/ubuntu0",
+ "ip_address" : "192.168.20.14",
+ "name" : "ubuntu0",
+ "name_path" : "/Devstack-VPP-2/Regions/RegionOne/Availability Zones/nova/ubuntu0",
+ "object_name" : "ubuntu0",
+ "os_id" : "1",
+ "parent_id" : "nova",
+ "parent_type" : "availability_zone",
+ "services" : {
+ "nova-conductor" : {
+ "available" : True,
+ "active" : True,
+ "updated_at" : "2016-08-30T09:18:58.000000"
+ },
+ "nova-scheduler" : {
+ "available" : True,
+ "active" : True,
+ "updated_at" : "2016-08-30T09:18:54.000000"
+ },
+ "nova-consoleauth" : {
+ "available" : True,
+ "active" : True,
+ "updated_at" : "2016-08-30T09:18:54.000000"
+ }
+ },
+ "show_in_tree" : True,
+ "type" : "host",
+ "zone" : "nova"
+}
+
+MODE_RESULT = [
+ "l3 local0 ",
+ "l3 pg/stream-0 ",
+ "l3 pg/stream-1 ",
+ "l3 pg/stream-2 ",
+ "l3 pg/stream-3 ",
+ "l2 bridge TenGigabitEthernetc/0/0 bd_id 5678 shg 0",
+ "l3 TenGigabitEthernetd/0/0 ",
+ "l2 bridge VirtualEthernet0/0/0 bd_id 5678 shg 0",
+ "l2 bridge VirtualEthernet0/0/1 bd_id 5678 shg 0",
+ "l2 bridge VirtualEthernet0/0/2 bd_id 5678 shg 0",
+ "l2 bridge VirtualEthernet0/0/3 bd_id 5678 shg 0",
+ "l2 bridge VirtualEthernet0/0/4 bd_id 5678 shg 0",
+ "l2 bridge VirtualEthernet0/0/5 bd_id 5678 shg 0",
+ "l2 bridge VirtualEthernet0/0/6 bd_id 5678 shg 0",
+ "l2 bridge VirtualEthernet0/0/7 bd_id 5678 shg 0",
+ "l2 bridge VirtualEthernet0/0/8 bd_id 5678 shg 0"
+]
+
+INTERFACE_LINES = [
+ " Name Idx Link Hardware",
+ "TenGigabitEthernetc/0/0 5 up TenGigabitEthernetc/0/0",
+ " Ethernet address 00:25:b5:99:00:5c",
+ " Cisco VIC",
+ " carrier up full duplex speed 40000 mtu 1500 promisc",
+ " rx queues 1, rx desc 5120, tx queues 1, tx desc 2048",
+ " cpu socket 0",
+ "",
+ " tx frames ok 81404",
+ " tx bytes ok 6711404",
+ " rx frames ok 502521",
+ " rx bytes ok 668002732",
+ " rx missed 64495",
+ " extended stats:",
+ " rx good packets 502521",
+ " tx good packets 81404",
+ " rx good bytes 668002732",
+ " tx good bytes 6711404"
+]
+
+INTERFACE_NAME = "TenGigabitEthernetc/0/0"
+
+GET_INTERFACE_DETAIL = {
+ "hardware": "TenGigabitEthernetc/0/0",
+ "id": "5",
+ "mac_address": "00:25:b5:99:00:5c",
+ "name": "TenGigabitEthernetc/0/0",
+ "state": "up"
+}
+
+# functional test
+# environment: Devstack-VPP-2
+# inventory name: vpp
+
+INPUT = "ubuntu0"
+OUPUT = [
+ {
+ "bd_id": "5678",
+ "host": "ubuntu0",
+ "id": "ubuntu0-vconnector-5678",
+ "interfaces": {
+ "name": {
+ "hardware": "VirtualEthernet0/0/8",
+ "id": "15",
+ "mac_address": "fa:16:3e:d1:98:73",
+ "name": "VirtualEthernet0/0/8",
+ "state": "up"
+ }
+ },
+ "interfaces_names": [
+ "TenGigabitEthernetc/0/0",
+ "VirtualEthernet0/0/0",
+ "VirtualEthernet0/0/1",
+ "VirtualEthernet0/0/2",
+ "VirtualEthernet0/0/3",
+ "VirtualEthernet0/0/4",
+ "VirtualEthernet0/0/5",
+ "VirtualEthernet0/0/6",
+ "VirtualEthernet0/0/7",
+ "VirtualEthernet0/0/8"
+ ],
+ "name": "bridge-domain-5678"
+ }
+] \ No newline at end of file
diff --git a/app/test/fetch/cli_fetch/test_data/cli_fetch_vservice_vnics.py b/app/test/fetch/cli_fetch/test_data/cli_fetch_vservice_vnics.py
new file mode 100644
index 0000000..0b60af5
--- /dev/null
+++ b/app/test/fetch/cli_fetch/test_data/cli_fetch_vservice_vnics.py
@@ -0,0 +1,616 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+NETWORK_NODE = {
+ "config": {
+ "interfaces": 4,
+ "log_agent_heartbeats": False,
+ "gateway_external_network_id": "",
+ "router_id": "",
+ "interface_driver": "neutron.agent.linux.interface.OVSInterfaceDriver",
+ "ex_gw_ports": 2,
+ "routers": 2,
+ "handle_internal_only_routers": True,
+ "floating_ips": 1,
+ "external_network_bridge": "",
+ "use_namespaces": True,
+ "agent_mode": "legacy"
+ },
+ "environment": "Mirantis-Liberty-Xiaocong",
+ "host": "node-6.cisco.com",
+ "host_type": [
+ "Controller",
+ "Network"
+ ],
+ "id": "node-6.cisco.com",
+ "id_path": "/Mirantis-Liberty-Xiaocong/Mirantis-Liberty-Xiaocong-regions/RegionOne/RegionOne-availability_zones/internal/node-6.cisco.com",
+ "name": "node-6.cisco.com",
+ "name_path": "/Mirantis-Liberty-Xiaocong/Regions/RegionOne/Availability Zones/internal/node-6.cisco.com",
+ "object_name": "node-6.cisco.com",
+ "parent_id": "internal",
+ "parent_type": "availability_zone",
+ "services": {
+ "nova-scheduler": {
+ "active": True,
+ "available": True,
+ "updated_at": "2016-10-21T18:01:10.000000"
+ },
+ "nova-consoleauth": {
+ "active": True,
+ "available": True,
+ "updated_at": "2016-10-21T18:01:54.000000"
+ },
+ "nova-conductor": {
+ "active": True,
+ "available": True,
+ "updated_at": "2016-10-21T18:01:45.000000"
+ },
+ "nova-cert": {
+ "active": True,
+ "available": True,
+ "updated_at": "2016-10-21T18:01:56.000000"
+ }
+ },
+ "show_in_tree": True,
+ "type": "host",
+ "zone": "internal"
+}
+
+COMPUTE_NODE = {
+ "environment": "Mirantis-Liberty-Xiaocong",
+ "host": "node-5.cisco.com",
+ "host_type": [
+ "Compute"
+ ],
+ "id": "node-5.cisco.com",
+ "id_path": "/Mirantis-Liberty-Xiaocong/Mirantis-Liberty-Xiaocong-regions/RegionOne/RegionOne-availability_zones/osdna-zone/node-5.cisco.com",
+ "ip_address": "192.168.0.4",
+ "name": "node-5.cisco.com",
+ "name_path": "/Mirantis-Liberty-Xiaocong/Regions/RegionOne/Availability Zones/osdna-zone/node-5.cisco.com",
+ "object_name": "node-5.cisco.com",
+ "os_id": "1",
+ "parent_id": "osdna-zone",
+ "parent_type": "availability_zone",
+ "services": {
+ "nova-compute": {
+ "active": True,
+ "available": True,
+ "updated_at": "2016-10-21T18:01:42.000000"
+ }
+ },
+ "show_in_tree": True,
+ "type": "host",
+ "zone": "osdna-zone"
+}
+
+ERROR_NODE = {
+ "environment": "Mirantis-Liberty-Xiaocong",
+ "host": "node-5.cisco.com",
+ "id": "node-5.cisco.com",
+ "id_path": "/Mirantis-Liberty-Xiaocong/Mirantis-Liberty-Xiaocong-regions/RegionOne/RegionOne-availability_zones/osdna-zone/node-5.cisco.com",
+ "ip_address": "192.168.0.4",
+ "name": "node-5.cisco.com",
+ "name_path": "/Mirantis-Liberty-Xiaocong/Regions/RegionOne/Availability Zones/osdna-zone/node-5.cisco.com",
+ "object_name": "node-5.cisco.com",
+ "os_id": "1",
+ "parent_id": "osdna-zone",
+ "parent_type": "availability_zone",
+ "services": {
+ "nova-compute": {
+ "active": True,
+ "available": True,
+ "updated_at": "2016-10-21T18:01:42.000000"
+ }
+ },
+ "show_in_tree": True,
+ "type": "host",
+ "zone": "osdna-zone"
+}
+
+NAME_SPACES = [
+ 'qdhcp-8673c48a-f137-4497-b25d-08b7b218fd17',
+ 'qdhcp-0abe6331-0d74-4bbd-ad89-a5719c3793e4',
+ 'qdhcp-413de095-01ed-49dc-aa50-4479f43d390e',
+ 'qdhcp-2e3b85f4-756c-49d9-b34c-f3db13212dbc',
+ 'qdhcp-b6fd5175-4b22-4256-9b1a-9fc4b9dce1fe',
+ 'qdhcp-eb276a62-15a9-4616-a192-11466fdd147f',
+ 'qdhcp-7e59b726-d6f4-451a-a574-c67a920ff627',
+ 'qdhcp-a55ff1e8-3821-4e5f-bcfd-07df93720a4f',
+ 'qdhcp-6504fcf7-41d7-40bb-aeb1-6a7658c105fc',
+ 'qrouter-9ec3d703-0725-47e3-8f48-02b16236caf9',
+ 'qrouter-49ac7716-06da-49ed-b388-f8ba60e8a0e6',
+ 'haproxy',
+ 'vrouter'
+]
+
+SERVICE_ID = 'qdhcp-8673c48a-f137-4497-b25d-08b7b218fd17'
+
+SERVICES = [
+ {
+ "IP Address": "172.16.13.2",
+ "IPv6 Address": "fe80::f816:3eff:fea1:eb73/64",
+ "cidr": "172.16.13.0/24",
+ "data": "Link encap:Ethernet HWaddr fa:16:3e:a1:eb:73\ninet addr:172.16.13.2 Bcast:172.16.13.255 Mask:255.255.255.0\ninet6 addr: fe80::f816:3eff:fea1:eb73/64 Scope:Link\nUP BROADCAST RUNNING MULTICAST MTU:1450 Metric:1\nRX packets:28 errors:0 dropped:35 overruns:0 frame:0\nTX packets:8 errors:0 dropped:0 overruns:0 carrier:0\ncollisions:0 txqueuelen:0\nRX bytes:4485 (4.4 KB) TX bytes:648 (648.0 B)\n",
+ "host": "node-6.cisco.com",
+ "id": "tapa68b2627-a1",
+ "mac_address": "fa:16:3e:a1:eb:73",
+ "master_parent_id": "qdhcp-8673c48a-f137-4497-b25d-08b7b218fd17",
+ "master_parent_type": "vservice",
+ "name": "tapa68b2627-a1",
+ "netmask": "255.255.255.0",
+ "network": "8673c48a-f137-4497-b25d-08b7b218fd17",
+ "parent_id": "qdhcp-8673c48a-f137-4497-b25d-08b7b218fd17-vnics",
+ "parent_text": "vNICs",
+ "parent_type": "vnics_folder",
+ "type": "vnic",
+ "vnic_type": "vservice_vnic"
+ }
+]
+
+NET_MASK_ARRAY = ["255", "255", "255", "0"]
+SIZE = '24'
+
+VNIC = {
+ "IP Address": "172.16.13.2",
+ "IPv6 Address": "fe80::f816:3eff:fea1:eb73/64",
+ "host": "node-6.cisco.com",
+ "id": "tapa68b2627-a1",
+ "lines": [
+ "Link encap:Ethernet HWaddr fa:16:3e:a1:eb:73",
+ "inet addr:172.16.13.2 Bcast:172.16.13.255 Mask:255.255.255.0",
+ "inet6 addr: fe80::f816:3eff:fea1:eb73/64 Scope:Link",
+ "UP BROADCAST RUNNING MULTICAST MTU:1450 Metric:1",
+ "RX packets:28 errors:0 dropped:35 overruns:0 frame:0",
+ "TX packets:8 errors:0 dropped:0 overruns:0 carrier:0",
+ "collisions:0 txqueuelen:0",
+ "RX bytes:4485 (4.4 KB) TX bytes:648 (648.0 B)",
+ ""
+ ],
+ "mac_address": "fa:16:3e:a1:eb:73",
+ "master_parent_id": "qdhcp-8673c48a-f137-4497-b25d-08b7b218fd17",
+ "master_parent_type": "vservice",
+ "name": "tapa68b2627-a1",
+ "netmask": "255.255.255.0",
+ "parent_id": "qdhcp-8673c48a-f137-4497-b25d-08b7b218fd17-vnics",
+ "parent_text": "vNICs",
+ "parent_type": "vnics_folder",
+ "type": "vnic",
+ "vnic_type": "vservice_vnic"
+}
+
+RAW_VNIC = {
+ "host": "node-6.cisco.com",
+ "id": "tapa68b2627-a1",
+ "lines": [],
+ "master_parent_id": "qdhcp-8673c48a-f137-4497-b25d-08b7b218fd17",
+ "master_parent_type": "vservice",
+ "name": "tapa68b2627-a1",
+ "parent_id": "qdhcp-8673c48a-f137-4497-b25d-08b7b218fd17-vnics",
+ "parent_text": "vNICs",
+ "parent_type": "vnics_folder",
+ "type": "vnic",
+ "vnic_type": "vservice_vnic"
+}
+
+NETWORK = [{
+ "admin_state_up": True,
+ "cidrs": [
+ "172.16.13.0/24"
+ ],
+ "environment": "Mirantis-Liberty-Xiaocong",
+ "id": "8673c48a-f137-4497-b25d-08b7b218fd17",
+ "id_path": "/Mirantis-Liberty-Xiaocong/Mirantis-Liberty-Xiaocong-projects/75c0eb79ff4a42b0ae4973c8375ddf40/75c0eb79ff4a42b0ae4973c8375ddf40-networks/8673c48a-f137-4497-b25d-08b7b218fd17",
+ "mtu": 1400,
+ "name": "25",
+ "name_path": "/Mirantis-Liberty-Xiaocong/Projects/OSDNA-project/Networks/25",
+ "network": "8673c48a-f137-4497-b25d-08b7b218fd17",
+ "object_name": "25",
+ "parent_id": "75c0eb79ff4a42b0ae4973c8375ddf40-networks",
+ "parent_text": "Networks",
+ "parent_type": "networks_folder",
+ "port_security_enabled": True,
+ "project": "OSDNA-project",
+ "provider:network_type": "vxlan",
+ "provider:physical_network": None,
+ "provider:segmentation_id": 52,
+ "router:external": False,
+ "shared": False,
+ "show_in_tree": True,
+ "status": "ACTIVE",
+ "subnets": {
+ "123e": {
+ "ip_version": 4,
+ "enable_dhcp": True,
+ "gateway_ip": "172.16.13.1",
+ "id": "fcfa62ec-5ae7-46ce-9259-5f30de7af858",
+ "ipv6_ra_mode": None,
+ "name": "123e",
+ "dns_nameservers": [
+
+ ],
+ "cidr" : "172.16.13.0/24",
+ "subnetpool_id": None,
+ "ipv6_address_mode": None,
+ "tenant_id": "75c0eb79ff4a42b0ae4973c8375ddf40",
+ "network_id": "8673c48a-f137-4497-b25d-08b7b218fd17",
+ "host_routes": [
+
+ ],
+ "allocation_pools": [
+ {
+ "start": "172.16.13.2",
+ "end": "172.16.13.254"
+ }
+ ]
+ }
+ },
+ "tenant_id": "75c0eb79ff4a42b0ae4973c8375ddf40",
+ "type": "network"
+}]
+
+VSERVICE = {
+ "children_url": "/osdna_dev/discover.py?type=tree&id=qdhcp-8673c48a-f137-4497-b25d-08b7b218fd17",
+ "environment": "Mirantis-Liberty-Xiaocong",
+ "host": "node-6.cisco.com",
+ "id": "qdhcp-8673c48a-f137-4497-b25d-08b7b218fd17",
+ "id_path": "/Mirantis-Liberty-Xiaocong/Mirantis-Liberty-Xiaocong-regions/RegionOne/RegionOne-availability_zones/internal/node-6.cisco.com/node-6.cisco.com-vservices/node-6.cisco.com-vservices-dhcps/qdhcp-8673c48a-f137-4497-b25d-08b7b218fd17",
+ "local_service_id": "qdhcp-8673c48a-f137-4497-b25d-08b7b218fd17",
+ "name": "dhcp-25",
+ "name_path": "/Mirantis-Liberty-Xiaocong/Regions/RegionOne/Availability Zones/internal/node-6.cisco.com/Vservices/DHCP servers/dhcp-25",
+ "network": [
+ "8673c48a-f137-4497-b25d-08b7b218fd17"
+ ],
+ "object_name": "dhcp-25",
+ "parent_id": "node-6.cisco.com-vservices-dhcps",
+ "parent_text": "DHCP servers",
+ "parent_type": "vservice_dhcps_folder",
+ "service_type": "dhcp",
+ "show_in_tree": True,
+ "type": "vservice"
+}
+
+
+CIDR = "172.16.13.0/24"
+
+IFCONFIG_RESULT = [
+ "lo Link encap:Local Loopback ",
+ " inet addr:127.0.0.1 Mask:255.0.0.0",
+ " inet6 addr: ::1/128 Scope:Host",
+ " UP LOOPBACK RUNNING MTU:65536 Metric:1",
+ " RX packets:0 errors:0 dropped:0 overruns:0 frame:0",
+ " TX packets:0 errors:0 dropped:0 overruns:0 carrier:0",
+ " collisions:0 txqueuelen:0 ",
+ " RX bytes:0 (0.0 B) TX bytes:0 (0.0 B)",
+ "",
+ "tapa68b2627-a1 Link encap:Ethernet HWaddr fa:16:3e:a1:eb:73 ",
+ " inet addr:172.16.13.2 Bcast:172.16.13.255 Mask:255.255.255.0",
+ " inet6 addr: fe80::f816:3eff:fea1:eb73/64 Scope:Link",
+ " UP BROADCAST RUNNING MULTICAST MTU:1450 Metric:1",
+ " RX packets:28 errors:0 dropped:35 overruns:0 frame:0",
+ " TX packets:8 errors:0 dropped:0 overruns:0 carrier:0",
+ " collisions:0 txqueuelen:0 ",
+ " RX bytes:4485 (4.4 KB) TX bytes:648 (648.0 B)",
+ ""
+]
+
+MAC_ADDRESS_LINE = "tapa68b2627-a1 Link encap:Ethernet HWaddr 00:50:56:ac:e8:97 "
+MAC_ADDRESS = "00:50:56:ac:e8:97"
+IPV6_ADDRESS_LINE = " inet6 addr: fe80::f816:3eff:fea1:eb73/64 Scope:Link"
+IPV6_ADDRESS = "fe80::f816:3eff:fea1:eb73/64"
+IPV4_ADDRESS_LINE = " inet addr:172.16.13.2 Bcast:172.16.13.255 Mask:255.255.255.0"
+IPV4_ADDRESS = "172.16.13.2"
+
+# functional test
+INPUT = "node-6.cisco.com"
+OUTPUT = [
+ {
+ "IP Address": "172.16.13.2",
+ "IPv6 Address": "fe80::f816:3eff:fea1:eb73/64",
+ "cidr": "172.16.13.0/24",
+ "data": "Link encap:Ethernet HWaddr fa:16:3e:a1:eb:73\ninet addr:172.16.13.2 Bcast:172.16.13.255 Mask:255.255.255.0\ninet6 addr: fe80::f816:3eff:fea1:eb73/64 Scope:Link\nUP BROADCAST RUNNING MULTICAST MTU:1450 Metric:1\nRX packets:28 errors:0 dropped:35 overruns:0 frame:0\nTX packets:8 errors:0 dropped:0 overruns:0 carrier:0\ncollisions:0 txqueuelen:0\nRX bytes:4485 (4.4 KB) TX bytes:648 (648.0 B)\n",
+ "host": "node-6.cisco.com",
+ "id": "tapa68b2627-a1",
+ "mac_address": "fa:16:3e:a1:eb:73",
+ "master_parent_id": "qdhcp-8673c48a-f137-4497-b25d-08b7b218fd17",
+ "master_parent_type": "vservice",
+ "name": "tapa68b2627-a1",
+ "netmask": "255.255.255.0",
+ "network": "8673c48a-f137-4497-b25d-08b7b218fd17",
+ "parent_id": "qdhcp-8673c48a-f137-4497-b25d-08b7b218fd17-vnics",
+ "parent_text": "vNICs",
+ "parent_type": "vnics_folder",
+ "type": "vnic",
+ "vnic_type": "vservice_vnic"
+ },
+ {
+ "IP Address": "172.16.12.2",
+ "IPv6 Address": "fe80::f816:3eff:fec1:7f19/64",
+ "cidr": "172.16.12.0/24",
+ "data": "Link encap:Ethernet HWaddr fa:16:3e:c1:7f:19\ninet addr:172.16.12.2 Bcast:172.16.12.255 Mask:255.255.255.0\ninet6 addr: fe80::f816:3eff:fec1:7f19/64 Scope:Link\nUP BROADCAST RUNNING MULTICAST MTU:1450 Metric:1\nRX packets:6 errors:0 dropped:8 overruns:0 frame:0\nTX packets:8 errors:0 dropped:0 overruns:0 carrier:0\ncollisions:0 txqueuelen:0\nRX bytes:360 (360.0 B) TX bytes:648 (648.0 B)\n",
+ "host": "node-6.cisco.com",
+ "id": "tape67d81de-48",
+ "mac_address": "fa:16:3e:c1:7f:19",
+ "master_parent_id": "qdhcp-0abe6331-0d74-4bbd-ad89-a5719c3793e4",
+ "master_parent_type": "vservice",
+ "name": "tape67d81de-48",
+ "netmask": "255.255.255.0",
+ "network": "0abe6331-0d74-4bbd-ad89-a5719c3793e4",
+ "parent_id": "qdhcp-0abe6331-0d74-4bbd-ad89-a5719c3793e4-vnics",
+ "parent_text": "vNICs",
+ "parent_type": "vnics_folder",
+ "type": "vnic",
+ "vnic_type": "vservice_vnic"
+ },
+ {
+ "IP Address": "172.16.10.2",
+ "IPv6 Address": "fe80::f816:3eff:fe23:1b94/64",
+ "cidr": "172.16.10.0/25",
+ "data": "Link encap:Ethernet HWaddr fa:16:3e:23:1b:94\ninet addr:172.16.10.2 Bcast:172.16.10.127 Mask:255.255.255.128\ninet6 addr: fe80::f816:3eff:fe23:1b94/64 Scope:Link\nUP BROADCAST RUNNING MULTICAST MTU:1450 Metric:1\nRX packets:51 errors:0 dropped:12 overruns:0 frame:0\nTX packets:8 errors:0 dropped:0 overruns:0 carrier:0\ncollisions:0 txqueuelen:0\nRX bytes:9161 (9.1 KB) TX bytes:648 (648.0 B)\n",
+ "host": "node-6.cisco.com",
+ "id": "tapa1bf631f-de",
+ "mac_address": "fa:16:3e:23:1b:94",
+ "master_parent_id": "qdhcp-413de095-01ed-49dc-aa50-4479f43d390e",
+ "master_parent_type": "vservice",
+ "name": "tapa1bf631f-de",
+ "netmask": "255.255.255.128",
+ "network": "413de095-01ed-49dc-aa50-4479f43d390e",
+ "parent_id": "qdhcp-413de095-01ed-49dc-aa50-4479f43d390e-vnics",
+ "parent_text": "vNICs",
+ "parent_type": "vnics_folder",
+ "type": "vnic",
+ "vnic_type": "vservice_vnic"
+ },
+ {
+ "IP Address": "172.16.13.2",
+ "IPv6 Address": "fe80::f816:3eff:fec3:c871/64",
+ "cidr": "172.16.13.0/24",
+ "data": "Link encap:Ethernet HWaddr fa:16:3e:c3:c8:71\ninet addr:172.16.13.2 Bcast:172.16.13.255 Mask:255.255.255.0\ninet6 addr: fe80::f816:3eff:fec3:c871/64 Scope:Link\nUP BROADCAST RUNNING MULTICAST MTU:1450 Metric:1\nRX packets:4614 errors:0 dropped:4 overruns:0 frame:0\nTX packets:4459 errors:0 dropped:0 overruns:0 carrier:0\ncollisions:0 txqueuelen:0\nRX bytes:823296 (823.2 KB) TX bytes:929712 (929.7 KB)\n",
+ "host": "node-6.cisco.com",
+ "id": "tapaf69959f-ef",
+ "mac_address": "fa:16:3e:c3:c8:71",
+ "master_parent_id": "qdhcp-2e3b85f4-756c-49d9-b34c-f3db13212dbc",
+ "master_parent_type": "vservice",
+ "name": "tapaf69959f-ef",
+ "netmask": "255.255.255.0",
+ "network": "8673c48a-f137-4497-b25d-08b7b218fd17",
+ "parent_id": "qdhcp-2e3b85f4-756c-49d9-b34c-f3db13212dbc-vnics",
+ "parent_text": "vNICs",
+ "parent_type": "vnics_folder",
+ "type": "vnic",
+ "vnic_type": "vservice_vnic"
+ },
+ {
+ "IP Address": "172.16.4.2",
+ "IPv6 Address": "fe80::f816:3eff:fed7:c516/64",
+ "cidr": "172.16.4.0/24",
+ "data": "Link encap:Ethernet HWaddr fa:16:3e:d7:c5:16\ninet addr:172.16.4.2 Bcast:172.16.4.255 Mask:255.255.255.0\ninet6 addr: fe80::f816:3eff:fed7:c516/64 Scope:Link\nUP BROADCAST RUNNING MULTICAST MTU:1450 Metric:1\nRX packets:56928 errors:0 dropped:15 overruns:0 frame:0\nTX packets:56675 errors:0 dropped:0 overruns:0 carrier:0\ncollisions:0 txqueuelen:0\nRX bytes:10526014 (10.5 MB) TX bytes:12041070 (12.0 MB)\n",
+ "host": "node-6.cisco.com",
+ "id": "tap16620a58-c4",
+ "mac_address": "fa:16:3e:d7:c5:16",
+ "master_parent_id": "qdhcp-b6fd5175-4b22-4256-9b1a-9fc4b9dce1fe",
+ "master_parent_type": "vservice",
+ "name": "tap16620a58-c4",
+ "netmask": "255.255.255.0",
+ "network": "b6fd5175-4b22-4256-9b1a-9fc4b9dce1fe",
+ "parent_id": "qdhcp-b6fd5175-4b22-4256-9b1a-9fc4b9dce1fe-vnics",
+ "parent_text": "vNICs",
+ "parent_type": "vnics_folder",
+ "type": "vnic",
+ "vnic_type": "vservice_vnic"
+ },
+ {
+ "IP Address": "172.16.2.2",
+ "IPv6 Address": "fe80::f816:3eff:feeb:39c2/64",
+ "cidr": "172.16.2.0/24",
+ "data": "Link encap:Ethernet HWaddr fa:16:3e:eb:39:c2\ninet addr:172.16.2.2 Bcast:172.16.2.255 Mask:255.255.255.0\ninet6 addr: fe80::f816:3eff:feeb:39c2/64 Scope:Link\nUP BROADCAST RUNNING MULTICAST MTU:1450 Metric:1\nRX packets:93317 errors:0 dropped:57 overruns:0 frame:0\nTX packets:93264 errors:0 dropped:0 overruns:0 carrier:0\ncollisions:0 txqueuelen:0\nRX bytes:17406098 (17.4 MB) TX bytes:19958079 (19.9 MB)\n",
+ "host": "node-6.cisco.com",
+ "id": "tap82d4992f-4d",
+ "mac_address": "fa:16:3e:eb:39:c2",
+ "master_parent_id": "qdhcp-eb276a62-15a9-4616-a192-11466fdd147f",
+ "master_parent_type": "vservice",
+ "name": "tap82d4992f-4d",
+ "netmask": "255.255.255.0",
+ "network": "eb276a62-15a9-4616-a192-11466fdd147f",
+ "parent_id": "qdhcp-eb276a62-15a9-4616-a192-11466fdd147f-vnics",
+ "parent_text": "vNICs",
+ "parent_type": "vnics_folder",
+ "type": "vnic",
+ "vnic_type": "vservice_vnic"
+ },
+ {
+ "IP Address": "172.16.3.2",
+ "IPv6 Address": "fe80::f816:3eff:fe1c:9936/64",
+ "cidr": "172.16.3.0/24",
+ "data": "Link encap:Ethernet HWaddr fa:16:3e:1c:99:36\ninet addr:172.16.3.2 Bcast:172.16.3.255 Mask:255.255.255.0\ninet6 addr: fe80::f816:3eff:fe1c:9936/64 Scope:Link\nUP BROADCAST RUNNING MULTICAST MTU:1450 Metric:1\nRX packets:170894 errors:0 dropped:41 overruns:0 frame:0\nTX packets:170588 errors:0 dropped:0 overruns:0 carrier:0\ncollisions:0 txqueuelen:0\nRX bytes:31784458 (31.7 MB) TX bytes:36444046 (36.4 MB)\n",
+ "host": "node-6.cisco.com",
+ "id": "tap5f22f397-d8",
+ "mac_address": "fa:16:3e:1c:99:36",
+ "master_parent_id": "qdhcp-7e59b726-d6f4-451a-a574-c67a920ff627",
+ "master_parent_type": "vservice",
+ "name": "tap5f22f397-d8",
+ "netmask": "255.255.255.0",
+ "network": "7e59b726-d6f4-451a-a574-c67a920ff627",
+ "parent_id": "qdhcp-7e59b726-d6f4-451a-a574-c67a920ff627-vnics",
+ "parent_text": "vNICs",
+ "parent_type": "vnics_folder",
+ "type": "vnic",
+ "vnic_type": "vservice_vnic"
+ },
+ {
+ "IP Address": "172.16.1.2",
+ "IPv6 Address": "fe80::f816:3eff:fe59:5fff/64",
+ "cidr": "172.16.1.0/24",
+ "data": "Link encap:Ethernet HWaddr fa:16:3e:59:5f:ff\ninet addr:172.16.1.2 Bcast:172.16.1.255 Mask:255.255.255.0\ninet6 addr: fe80::f816:3eff:fe59:5fff/64 Scope:Link\nUP BROADCAST RUNNING MULTICAST MTU:1450 Metric:1\nRX packets:93468 errors:0 dropped:38 overruns:0 frame:0\nTX packets:93452 errors:0 dropped:0 overruns:0 carrier:0\ncollisions:0 txqueuelen:0\nRX bytes:17416578 (17.4 MB) TX bytes:19972565 (19.9 MB)\n",
+ "host": "node-6.cisco.com",
+ "id": "tapbf16c3ab-56",
+ "mac_address": "fa:16:3e:59:5f:ff",
+ "master_parent_id": "qdhcp-a55ff1e8-3821-4e5f-bcfd-07df93720a4f",
+ "master_parent_type": "vservice",
+ "name": "tapbf16c3ab-56",
+ "netmask": "255.255.255.0",
+ "network": "a55ff1e8-3821-4e5f-bcfd-07df93720a4f",
+ "parent_id": "qdhcp-a55ff1e8-3821-4e5f-bcfd-07df93720a4f-vnics",
+ "parent_text": "vNICs",
+ "parent_type": "vnics_folder",
+ "type": "vnic",
+ "vnic_type": "vservice_vnic"
+ },
+ {
+ "IP Address": "192.168.111.2",
+ "IPv6 Address": "fe80::f816:3eff:fe74:5/64",
+ "cidr": "192.168.111.0/24",
+ "data": "Link encap:Ethernet HWaddr fa:16:3e:74:00:05\ninet addr:192.168.111.2 Bcast:192.168.111.255 Mask:255.255.255.0\ninet6 addr: fe80::f816:3eff:fe74:5/64 Scope:Link\nUP BROADCAST RUNNING MULTICAST MTU:1450 Metric:1\nRX packets:45 errors:0 dropped:28 overruns:0 frame:0\nTX packets:8 errors:0 dropped:0 overruns:0 carrier:0\ncollisions:0 txqueuelen:0\nRX bytes:3734 (3.7 KB) TX bytes:648 (648.0 B)\n",
+ "host": "node-6.cisco.com",
+ "id": "tapee8e5dbb-03",
+ "mac_address": "fa:16:3e:74:00:05",
+ "master_parent_id": "qdhcp-6504fcf7-41d7-40bb-aeb1-6a7658c105fc",
+ "master_parent_type": "vservice",
+ "name": "tapee8e5dbb-03",
+ "netmask": "255.255.255.0",
+ "network": "6504fcf7-41d7-40bb-aeb1-6a7658c105fc",
+ "parent_id": "qdhcp-6504fcf7-41d7-40bb-aeb1-6a7658c105fc-vnics",
+ "parent_text": "vNICs",
+ "parent_type": "vnics_folder",
+ "type": "vnic",
+ "vnic_type": "vservice_vnic"
+ },
+ {
+ "IP Address": "172.16.0.131",
+ "IPv6 Address": "2001:420:4482:24c1:f816:3eff:fe23:3ad7/64",
+ "cidr": "172.16.0.0/24",
+ "data": "Link encap:Ethernet HWaddr fa:16:3e:23:3a:d7\ninet addr:172.16.0.131 Bcast:172.16.0.255 Mask:255.255.255.0\ninet6 addr: fe80::f816:3eff:fe23:3ad7/64 Scope:Link\ninet6 addr: 2001:420:4482:24c1:f816:3eff:fe23:3ad7/64 Scope:Global\nUP BROADCAST RUNNING MULTICAST MTU:1450 Metric:1\nRX packets:48172796 errors:0 dropped:1144801 overruns:0 frame:0\nTX packets:63 errors:0 dropped:0 overruns:0 carrier:0\ncollisions:0 txqueuelen:0\nRX bytes:4220491940 (4.2 GB) TX bytes:3162 (3.1 KB)\n",
+ "host": "node-6.cisco.com",
+ "id": "qg-63489f34-af",
+ "mac_address": "fa:16:3e:23:3a:d7",
+ "master_parent_id": "qrouter-9ec3d703-0725-47e3-8f48-02b16236caf9",
+ "master_parent_type": "vservice",
+ "name": "qg-63489f34-af",
+ "netmask": "255.255.255.0",
+ "network": "c64adb76-ad9d-4605-9f5e-bd6dbe325cfb",
+ "parent_id": "qrouter-9ec3d703-0725-47e3-8f48-02b16236caf9-vnics",
+ "parent_text": "vNICs",
+ "parent_type": "vnics_folder",
+ "type": "vnic",
+ "vnic_type": "vservice_vnic"
+ },
+ {
+ "IP Address": "172.16.13.5",
+ "IPv6 Address": "fe80::f816:3eff:fe1f:e174/64",
+ "cidr": "172.16.13.0/24",
+ "data": "Link encap:Ethernet HWaddr fa:16:3e:1f:e1:74\ninet addr:172.16.13.5 Bcast:172.16.13.255 Mask:255.255.255.0\ninet6 addr: fe80::f816:3eff:fe1f:e174/64 Scope:Link\nUP BROADCAST RUNNING MULTICAST MTU:1450 Metric:1\nRX packets:25 errors:0 dropped:1 overruns:0 frame:0\nTX packets:10 errors:0 dropped:0 overruns:0 carrier:0\ncollisions:0 txqueuelen:0\nRX bytes:2460 (2.4 KB) TX bytes:864 (864.0 B)\n",
+ "host": "node-6.cisco.com",
+ "id": "qr-18f029db-77",
+ "mac_address": "fa:16:3e:1f:e1:74",
+ "master_parent_id": "qrouter-9ec3d703-0725-47e3-8f48-02b16236caf9",
+ "master_parent_type": "vservice",
+ "name": "qr-18f029db-77",
+ "netmask": "255.255.255.0",
+ "network": "8673c48a-f137-4497-b25d-08b7b218fd17",
+ "parent_id": "qrouter-9ec3d703-0725-47e3-8f48-02b16236caf9-vnics",
+ "parent_text": "vNICs",
+ "parent_type": "vnics_folder",
+ "type": "vnic",
+ "vnic_type": "vservice_vnic"
+ },
+ {
+ "IP Address": "172.16.2.1",
+ "IPv6 Address": "fe80::f816:3eff:fe2c:fb9b/64",
+ "cidr": "172.16.2.0/24",
+ "data": "Link encap:Ethernet HWaddr fa:16:3e:2c:fb:9b\ninet addr:172.16.2.1 Bcast:172.16.2.255 Mask:255.255.255.0\ninet6 addr: fe80::f816:3eff:fe2c:fb9b/64 Scope:Link\nUP BROADCAST RUNNING MULTICAST MTU:1450 Metric:1\nRX packets:49 errors:0 dropped:3 overruns:0 frame:0\nTX packets:10 errors:0 dropped:0 overruns:0 carrier:0\ncollisions:0 txqueuelen:0\nRX bytes:5825 (5.8 KB) TX bytes:864 (864.0 B)\n",
+ "host": "node-6.cisco.com",
+ "id": "qr-3ff411a2-54",
+ "mac_address": "fa:16:3e:2c:fb:9b",
+ "master_parent_id": "qrouter-9ec3d703-0725-47e3-8f48-02b16236caf9",
+ "master_parent_type": "vservice",
+ "name": "qr-3ff411a2-54",
+ "netmask": "255.255.255.0",
+ "network": "eb276a62-15a9-4616-a192-11466fdd147f",
+ "parent_id": "qrouter-9ec3d703-0725-47e3-8f48-02b16236caf9-vnics",
+ "parent_text": "vNICs",
+ "parent_type": "vnics_folder",
+ "type": "vnic",
+ "vnic_type": "vservice_vnic"
+ },
+ {
+ "IP Address": "172.16.1.1",
+ "IPv6 Address": "fe80::f816:3eff:feee:9a46/64",
+ "cidr": "172.16.1.0/24",
+ "data": "Link encap:Ethernet HWaddr fa:16:3e:ee:9a:46\ninet addr:172.16.1.1 Bcast:172.16.1.255 Mask:255.255.255.0\ninet6 addr: fe80::f816:3eff:feee:9a46/64 Scope:Link\nUP BROADCAST RUNNING MULTICAST MTU:1450 Metric:1\nRX packets:85 errors:0 dropped:14 overruns:0 frame:0\nTX packets:10 errors:0 dropped:0 overruns:0 carrier:0\ncollisions:0 txqueuelen:0\nRX bytes:7402 (7.4 KB) TX bytes:864 (864.0 B)\n",
+ "host": "node-6.cisco.com",
+ "id": "qr-8733cc5d-b3",
+ "mac_address": "fa:16:3e:ee:9a:46",
+ "master_parent_id": "qrouter-9ec3d703-0725-47e3-8f48-02b16236caf9",
+ "master_parent_type": "vservice",
+ "name": "qr-8733cc5d-b3",
+ "netmask": "255.255.255.0",
+ "network": "a55ff1e8-3821-4e5f-bcfd-07df93720a4f",
+ "parent_id": "qrouter-9ec3d703-0725-47e3-8f48-02b16236caf9-vnics",
+ "parent_text": "vNICs",
+ "parent_type": "vnics_folder",
+ "type": "vnic",
+ "vnic_type": "vservice_vnic"
+ },
+ {
+ "IP Address": "172.16.3.1",
+ "IPv6 Address": "fe80::f816:3eff:feba:5a3c/64",
+ "cidr": "172.16.3.0/24",
+ "data": "Link encap:Ethernet HWaddr fa:16:3e:ba:5a:3c\ninet addr:172.16.3.1 Bcast:172.16.3.255 Mask:255.255.255.0\ninet6 addr: fe80::f816:3eff:feba:5a3c/64 Scope:Link\nUP BROADCAST RUNNING MULTICAST MTU:1450 Metric:1\nRX packets:3018 errors:0 dropped:15 overruns:0 frame:0\nTX packets:1766 errors:0 dropped:0 overruns:0 carrier:0\ncollisions:0 txqueuelen:0\nRX bytes:295458 (295.4 KB) TX bytes:182470 (182.4 KB)\n",
+ "host": "node-6.cisco.com",
+ "id": "qr-bb9b8340-72",
+ "mac_address": "fa:16:3e:ba:5a:3c",
+ "master_parent_id": "qrouter-9ec3d703-0725-47e3-8f48-02b16236caf9",
+ "master_parent_type": "vservice",
+ "name": "qr-bb9b8340-72",
+ "netmask": "255.255.255.0",
+ "network": "7e59b726-d6f4-451a-a574-c67a920ff627",
+ "parent_id": "qrouter-9ec3d703-0725-47e3-8f48-02b16236caf9-vnics",
+ "parent_text": "vNICs",
+ "parent_type": "vnics_folder",
+ "type": "vnic",
+ "vnic_type": "vservice_vnic"
+ },
+ {
+ "IP Address": "172.16.0.130",
+ "IPv6 Address": "fe80::f816:3eff:fecb:8d7b/64",
+ "cidr": "172.16.0.0/24",
+ "data": "Link encap:Ethernet HWaddr fa:16:3e:cb:8d:7b\ninet addr:172.16.0.130 Bcast:172.16.0.255 Mask:255.255.255.0\ninet6 addr: 2001:420:4482:24c1:f816:3eff:fecb:8d7b/64 Scope:Global\ninet6 addr: fe80::f816:3eff:fecb:8d7b/64 Scope:Link\nUP BROADCAST RUNNING MULTICAST MTU:1450 Metric:1\nRX packets:48172955 errors:0 dropped:1144729 overruns:0 frame:0\nTX packets:59 errors:0 dropped:0 overruns:0 carrier:0\ncollisions:0 txqueuelen:0\nRX bytes:4220505032 (4.2 GB) TX bytes:2958 (2.9 KB)\n",
+ "host": "node-6.cisco.com",
+ "id": "qg-57e65d34-3d",
+ "mac_address": "fa:16:3e:cb:8d:7b",
+ "master_parent_id": "qrouter-49ac7716-06da-49ed-b388-f8ba60e8a0e6",
+ "master_parent_type": "vservice",
+ "name": "qg-57e65d34-3d",
+ "netmask": "255.255.255.0",
+ "network": "c64adb76-ad9d-4605-9f5e-bd6dbe325cfb",
+ "parent_id": "qrouter-49ac7716-06da-49ed-b388-f8ba60e8a0e6-vnics",
+ "parent_text": "vNICs",
+ "parent_type": "vnics_folder",
+ "type": "vnic",
+ "vnic_type": "vservice_vnic"
+ },
+ {
+ "IP Address": "192.168.111.1",
+ "IPv6 Address": "fe80::f816:3eff:fe0a:3cc/64",
+ "cidr": "192.168.111.0/24",
+ "data": "Link encap:Ethernet HWaddr fa:16:3e:0a:03:cc\ninet addr:192.168.111.1 Bcast:192.168.111.255 Mask:255.255.255.0\ninet6 addr: fe80::f816:3eff:fe0a:3cc/64 Scope:Link\nUP BROADCAST RUNNING MULTICAST MTU:1450 Metric:1\nRX packets:79 errors:0 dropped:0 overruns:0 frame:0\nTX packets:10 errors:0 dropped:0 overruns:0 carrier:0\ncollisions:0 txqueuelen:0\nRX bytes:6475 (6.4 KB) TX bytes:864 (864.0 B)\n",
+ "host": "node-6.cisco.com",
+ "id": "qr-f7b44150-99",
+ "mac_address": "fa:16:3e:0a:03:cc",
+ "master_parent_id": "qrouter-49ac7716-06da-49ed-b388-f8ba60e8a0e6",
+ "master_parent_type": "vservice",
+ "name": "qr-f7b44150-99",
+ "netmask": "255.255.255.0",
+ "network": "6504fcf7-41d7-40bb-aeb1-6a7658c105fc",
+ "parent_id": "qrouter-49ac7716-06da-49ed-b388-f8ba60e8a0e6-vnics",
+ "parent_text": "vNICs",
+ "parent_type": "vnics_folder",
+ "type": "vnic",
+ "vnic_type": "vservice_vnic"
+ }
+] \ No newline at end of file
diff --git a/app/test/fetch/config/__init__.py b/app/test/fetch/config/__init__.py
new file mode 100644
index 0000000..b0637e9
--- /dev/null
+++ b/app/test/fetch/config/__init__.py
@@ -0,0 +1,9 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
diff --git a/app/test/fetch/config/test_config.py b/app/test/fetch/config/test_config.py
new file mode 100644
index 0000000..176fd48
--- /dev/null
+++ b/app/test/fetch/config/test_config.py
@@ -0,0 +1,17 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+# local config info for test.
+
+
+MONGODB_CONFIG = 'your-mongo-config-path-here'
+
+ENV_CONFIG = 'your-env-name-here'
+
+COLLECTION_CONFIG = 'your-inventory-collection-name-here'
diff --git a/app/test/fetch/db_fetch/__init__.py b/app/test/fetch/db_fetch/__init__.py
new file mode 100644
index 0000000..b0637e9
--- /dev/null
+++ b/app/test/fetch/db_fetch/__init__.py
@@ -0,0 +1,9 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
diff --git a/app/test/fetch/db_fetch/mock_cursor.py b/app/test/fetch/db_fetch/mock_cursor.py
new file mode 100644
index 0000000..71efd3b
--- /dev/null
+++ b/app/test/fetch/db_fetch/mock_cursor.py
@@ -0,0 +1,25 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+class MockCursor:
+
+ def __init__(self, result):
+ self.result = result
+ self.current = 0
+
+ def __next__(self):
+ if self.current < len(self.result):
+ next = self.result[self.current]
+ self.current += 1
+ return next
+ else:
+ raise StopIteration
+
+ def __iter__(self):
+ return self
diff --git a/app/test/fetch/db_fetch/test_data/__init__.py b/app/test/fetch/db_fetch/test_data/__init__.py
new file mode 100644
index 0000000..b0637e9
--- /dev/null
+++ b/app/test/fetch/db_fetch/test_data/__init__.py
@@ -0,0 +1,9 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
diff --git a/app/test/fetch/db_fetch/test_data/db_access.py b/app/test/fetch/db_fetch/test_data/db_access.py
new file mode 100644
index 0000000..a4ad548
--- /dev/null
+++ b/app/test/fetch/db_fetch/test_data/db_access.py
@@ -0,0 +1,40 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+DB_CONFIG = {
+ "host": "10.56.20.239",
+ "name": "mysql",
+ "password": "102QreDdiD5sKcvNf9qbHrmr",
+ "port": 3307.0,
+ "user": "root",
+ "schema": "nova"
+ }
+
+QUERY_WITHOUT_ID = """
+ SELECT id, name
+ FROM nova.aggregates
+ WHERE deleted = 0
+ """
+
+QUERY_WITH_ID = """
+ SELECT CONCAT('aggregate-', a.name, '-', host) AS id, host AS name
+ FROM nova.aggregate_hosts ah
+ JOIN nova.aggregates a ON a.id = ah.aggregate_id
+ WHERE ah.deleted = 0 AND aggregate_id = %s
+ """
+
+ID = "2"
+OBJECT_TYPE = "host aggregate"
+
+OBJECTS_LIST = [
+ {
+ "id": 1,
+ "name": "osdna-agg"
+ }
+]
diff --git a/app/test/fetch/db_fetch/test_data/db_fetch_aggregate_hosts.py b/app/test/fetch/db_fetch/test_data/db_fetch_aggregate_hosts.py
new file mode 100644
index 0000000..2f1313a
--- /dev/null
+++ b/app/test/fetch/db_fetch/test_data/db_fetch_aggregate_hosts.py
@@ -0,0 +1,34 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+from bson.objectid import ObjectId
+
+
+AGGREGATE = {
+ "id": "1",
+}
+
+HOSTS = [
+ {
+ "id": "aggregate-osdna-agg-node-5.cisco.com",
+ "name": "node-5.cisco.com"
+ }
+]
+
+HOST_IN_INVENTORY = {
+ "_id": "595ac4b6d7c037efdb8918a7"
+}
+
+HOSTS_RESULT = [
+ {
+ "id": "aggregate-osdna-agg-node-5.cisco.com",
+ "name": "node-5.cisco.com",
+ "ref_id": ObjectId(HOST_IN_INVENTORY["_id"])
+ }
+]
diff --git a/app/test/fetch/db_fetch/test_data/db_fetch_aggregates.py b/app/test/fetch/db_fetch/test_data/db_fetch_aggregates.py
new file mode 100644
index 0000000..65182fa
--- /dev/null
+++ b/app/test/fetch/db_fetch/test_data/db_fetch_aggregates.py
@@ -0,0 +1,17 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+REGION_ID = "RegionOne"
+
+OBJECTS_LIST = [
+ {
+ "id": 1,
+ "name": "calipso-agg"
+ }
+]
diff --git a/app/test/fetch/db_fetch/test_data/db_fetch_host_network_agents.py b/app/test/fetch/db_fetch/test_data/db_fetch_host_network_agents.py
new file mode 100644
index 0000000..6188ddf
--- /dev/null
+++ b/app/test/fetch/db_fetch/test_data/db_fetch_host_network_agents.py
@@ -0,0 +1,65 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+CONFIG_WITH_MECHANISM_DRIVERS = {
+ 'mechanism_drivers': [
+ "OVS"
+ ]
+}
+
+CONFIG_WITHOUT_MECHANISM_DRIVERS = {
+ 'mechanism_drivers': [
+
+ ]
+}
+
+NETWORK_AGENT_FOLDER_ID = 'node-6.cisco.com-network_agents'
+
+NETWORK_AGENT = [
+ {
+ 'configurations': '{}',
+ 'id': '1764430c-c09e-4717-86fa-c04350b1fcbb',
+ 'binary': 'neutron-openvswitch-agent',
+ },
+ {
+ 'configurations': '{}',
+ 'id': '2c2ddfee-91f9-47da-bd65-aceecd998b7c',
+ 'binary': 'neutron-dhcp-agent',
+ }
+]
+
+NETWORK_AGENT_WITH_MECHANISM_DRIVERS_IN_CONFIG_RESULTS = [
+ {
+ 'configurations': {},
+ 'id': 'OVS-1764430c-c09e-4717-86fa-c04350b1fcbb',
+ 'binary': 'neutron-openvswitch-agent',
+ 'name': 'neutron-openvswitch-agent'
+ },
+ {
+ 'configurations': {},
+ 'id': 'OVS-2c2ddfee-91f9-47da-bd65-aceecd998b7c',
+ 'binary': 'neutron-dhcp-agent',
+ 'name': 'neutron-dhcp-agent'
+ }
+]
+
+NETWORK_AGENT_WITHOUT_MECHANISM_DRIVERS_IN_CONFIG_RESULTS = [
+ {
+ 'configurations': {},
+ 'id': 'network_agent-1764430c-c09e-4717-86fa-c04350b1fcbb',
+ 'binary': 'neutron-openvswitch-agent',
+ 'name': 'neutron-openvswitch-agent'
+ },
+ {
+ 'configurations': {},
+ 'id': 'network_agent-2c2ddfee-91f9-47da-bd65-aceecd998b7c',
+ 'binary': 'neutron-dhcp-agent',
+ 'name': 'neutron-dhcp-agent'
+ }
+]
diff --git a/app/test/fetch/db_fetch/test_data/db_fetch_instances.py b/app/test/fetch/db_fetch/test_data/db_fetch_instances.py
new file mode 100644
index 0000000..5ba6a74
--- /dev/null
+++ b/app/test/fetch/db_fetch/test_data/db_fetch_instances.py
@@ -0,0 +1,91 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+INSTANCES_FROM_API = [
+ {
+ "host": "node-5.cisco.com",
+ "id": "6f29c867-9150-4533-8e19-70d749b172fa",
+ }
+]
+
+INSTANCES_FROM_DB = [
+ {
+ "host": "node-5.cisco.com",
+ "id": "6f29c867-9150-4533-8e19-70d749b172fa",
+ "network_info": "[{\"network\": {\"id\": \"7e59b726-d6f4-451a-a574-c67a920ff627\"}}]",
+ "project": "Calipso-project",
+ },
+ {
+ "host": "node-5.cisco.com",
+ "id": "bf0cb914-b316-486c-a4ce-f22deb453c52",
+ "network_info": "[{\"network\": {\"id\": \"7e59b726-d6f4-451a-a574-c67a920ff627\"}}]",
+ "project": "Calipso-project",
+ }
+]
+
+UPDATED_INSTANCES_DATA = [
+ {
+ "host": "node-5.cisco.com",
+ "id": "6f29c867-9150-4533-8e19-70d749b172fa",
+ "network": ["7e59b726-d6f4-451a-a574-c67a920ff627"],
+ "type": "instance",
+ "parent_type": "instances_folder",
+ "parent_id": "node-5.cisco.com-instances",
+ "in_project-Calipso-project": "1",
+ "network_info": [
+ {
+ "network": {
+ "id": "7e59b726-d6f4-451a-a574-c67a920ff627"
+ }
+ }
+ ]
+ }
+]
+
+INSTANCE_WITH_NETWORK = {
+ "host": "node-5.cisco.com",
+ "id": "6f29c867-9150-4533-8e19-70d749b172fa",
+ "network_info": "[{\"network\": {\"id\": \"7e59b726-d6f4-451a-a574-c67a920ff627\"}}]",
+ "project": "Calipso-project",
+}
+
+INSTANCE_WITH_NETWORK_RESULT = {
+ "host": "node-5.cisco.com",
+ "id": "6f29c867-9150-4533-8e19-70d749b172fa",
+ "network": ["7e59b726-d6f4-451a-a574-c67a920ff627"],
+ "type": "instance",
+ "parent_type": "instances_folder",
+ "parent_id": "node-5.cisco.com-instances",
+ "in_project-Calipso-project": "1",
+ "network_info": [
+ {
+ "network": {
+ "id": "7e59b726-d6f4-451a-a574-c67a920ff627"
+ }
+ }
+ ]
+}
+
+INSTANCE_WITHOUT_NETWORK = {
+ "host": "node-5.cisco.com",
+ "id": "6f29c867-9150-4533-8e19-70d749b172fa",
+ "network_info": "[]",
+ "project": "Calipso-project",
+}
+
+INSTANCE_WITHOUT_NETWORK_RESULT = {
+ "host": "node-5.cisco.com",
+ "id": "6f29c867-9150-4533-8e19-70d749b172fa",
+ "network": [],
+ "type": "instance",
+ "parent_type": "instances_folder",
+ "parent_id": "node-5.cisco.com-instances",
+ "in_project-Calipso-project": "1",
+ "network_info": []
+}
diff --git a/app/test/fetch/db_fetch/test_data/db_fetch_oteps.py b/app/test/fetch/db_fetch/test_data/db_fetch_oteps.py
new file mode 100644
index 0000000..a5bc63d
--- /dev/null
+++ b/app/test/fetch/db_fetch/test_data/db_fetch_oteps.py
@@ -0,0 +1,131 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+VEDGE_ID = "3858e121-d861-4348-9d64-a55fcd5bf60a"
+VEDGE = {
+ "configurations": {
+ "tunnel_types": [
+ "vxlan"
+ ],
+ "tunneling_ip": "192.168.2.1"
+ },
+ "host": "node-5.cisco.com",
+ "id": "3858e121-d861-4348-9d64-a55fcd5bf60a",
+ "tunnel_ports": {
+ "vxlan-c0a80203": {
+ },
+ "br-tun": {
+ }
+ },
+ "type": "vedge"
+}
+VEDGE_WITHOUT_CONFIGS ={
+
+}
+VEDGE_WITHOUT_TUNNEL_TYPES = {
+ "configuration": {
+ "tunnel_types": ""
+ }
+}
+NON_ICEHOUSE_CONFIGS = {
+ "distribution": "Mirantis-8.0"
+}
+ICEHOUSE_CONFIGS = {
+ "distribution": "Canonical-icehouse"
+}
+HOST = {
+ "host": "node-5.cisco.com",
+ "id": "node-5.cisco.com",
+ "ip_address": "192.168.0.4",
+ "name": "node-5.cisco.com"
+}
+OTEPS_WITHOUT_CONFIGURATIONS_IN_VEDGE_RESULTS = []
+OTEPS_WITHOUT_TUNNEL_TYPES_IN_VEDGE_RESULTS = []
+OTEPS_FOR_NON_ICEHOUSE_DISTRIBUTION_RESULTS = [
+ {
+ "host": "node-5.cisco.com",
+ "ip_address": "192.168.2.1",
+ "udp_port": 4789,
+ "id": "node-5.cisco.com-otep",
+ "name": "node-5.cisco.com-otep",
+ "overlay_type": "vxlan",
+ "ports": {
+ "vxlan-c0a80203": {
+ },
+ "br-tun": {
+ }
+ }
+ }
+]
+OTEPS_FOR_ICEHOUSE_DISTRIBUTION_RESULTS = [
+ {
+ "host": "node-5.cisco.com",
+ "ip_address": "192.168.0.4",
+ "id": "node-5.cisco.com-otep",
+ "name": "node-5.cisco.com-otep",
+ "overlay_type": "vxlan",
+ "ports": {
+ "vxlan-c0a80203": {
+ },
+ "br-tun": {
+ }
+ },
+ "udp_port": "67"
+ }
+]
+
+OTEPS = [
+ {
+ "host": "node-5.cisco.com",
+ "ip_address": "192.168.2.1",
+ "udp_port": 4789
+ }
+]
+
+OTEP_FOR_GETTING_VECONNECTOR = {
+ "host": "node-5.cisco.com",
+ "ip_address": "192.168.2.1",
+ "udp_port": 4789,
+ "id": "node-5.cisco.com-otep",
+ "name": "node-5.cisco.com-otep",
+ "overlay_type": "vxlan",
+ "ports": {
+ "vxlan-c0a80203": {
+ },
+ "br-tun": {
+ }
+ }
+}
+HOST_ID = "node-5.cisco.com"
+IFCONFIG_LINES = [
+ "br-mesh Link encap:Ethernet HWaddr 00:50:56:ac:28:9d ",
+ " inet addr:192.168.2.1 Bcast:192.168.2.255 Mask:255.255.255.0",
+ " inet6 addr: fe80::d4e1:8fff:fe33:ed6a/64 Scope:Link",
+ " UP BROADCAST RUNNING MULTICAST MTU:1500 Metric:1",
+ " RX packets:2273307 errors:0 dropped:0 overruns:0 frame:0",
+ " TX packets:2255930 errors:0 dropped:0 overruns:0 carrier:0",
+ " collisions:0 txqueuelen:0 ",
+ " RX bytes:578536155 (578.5 MB) TX bytes:598541522 (598.5 MB)",
+ ""
+]
+OTEP_WITH_CONNECTOR = {
+ "host": "node-5.cisco.com",
+ "ip_address": "192.168.2.1",
+ "udp_port": 4789,
+ "id": "node-5.cisco.com-otep",
+ "name": "node-5.cisco.com-otep",
+ "overlay_type": "vxlan",
+ "ports": {
+ "vxlan-c0a80203": {
+ },
+ "br-tun": {
+ }
+ },
+ "vconnector": "br-mesh"
+}
diff --git a/app/test/fetch/db_fetch/test_data/db_fetch_vedges_ovs.py b/app/test/fetch/db_fetch/test_data/db_fetch_vedges_ovs.py
new file mode 100644
index 0000000..818704c
--- /dev/null
+++ b/app/test/fetch/db_fetch/test_data/db_fetch_vedges_ovs.py
@@ -0,0 +1,168 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+VEDGES_FOLDER_ID = "node-6.cisco.com-vedges"
+
+OBJECTS_FROM_DB = [
+ {
+ "host": "node-6.cisco.com",
+ "agent_type": "Open vSwitch agent",
+ "configurations": '{"tunneling_ip": "192.168.2.3"}',
+ }
+]
+
+HOST = {
+ "host": "node-6.cisco.com",
+ "host_type": [
+ "Controller",
+ "Network"
+ ],
+ "id": "node-6.cisco.com",
+ "name": "node-6.cisco.com"
+}
+
+HOST_WITHOUT_REQUIRED_HOST_TYPES = {
+ "id": "node-6.cisco.com",
+ "host_type": []
+}
+
+PORTS = {
+ "ovs-system": {
+ "name": "ovs-system",
+ "id": "0",
+ "internal": True
+ },
+ "qr-bb9b8340-72": {
+ "name": "qr-bb9b8340-72",
+ "id": "1",
+ "internal": True,
+ "tag": "3"
+ },
+ "qr-8733cc5d-b3": {
+ "name": "qr-8733cc5d-b3",
+ "id": "2",
+ "internal": True,
+ "tag": "4"
+ }
+}
+
+TUNNEL_PORTS = {
+ "patch-int": {
+ "interface": "patch-int",
+ "name": "patch-int",
+ "options": {
+ "peer": "patch-tun"
+ },
+ "type": "patch"
+ }
+}
+
+GET_RESULTS = [
+ {
+ 'name': 'node-6.cisco.com-OVS',
+ 'host': 'node-6.cisco.com',
+ 'agent_type': 'Open vSwitch agent',
+ 'configurations': {"tunneling_ip": "192.168.2.3"},
+ 'ports': PORTS,
+ 'tunnel_ports': TUNNEL_PORTS
+ }
+]
+
+
+VSCTL_LINES = [
+ "3b12f08e-4e13-4976-8da5-23314b268805",
+ " Bridge br-int",
+ " fail_mode: secure",
+ " Port \"qr-bb9b8340-72\"",
+ " tag: 3",
+ " Interface \"qr-bb9b8340-72\"",
+ " type: internal",
+ " Port \"qr-8733cc5d-b3\"",
+ " tag: 4",
+ " Interface \"qr-8733cc5d-b3\"",
+ " type: internal",
+ " Bridge br-tun",
+ " fail_mode: secure",
+ " Port patch-int",
+ " Interface patch-int",
+ " type: patch",
+ " options: {peer=patch-tun}",
+]
+
+DPCTL_LINES = [
+ "system@ovs-system:",
+ "\tlookups: hit:14039304 missed:35687906 lost:0",
+ "\tflows: 4",
+ "\tmasks: hit:95173613 total:2 hit/pkt:1.91",
+ "\tport 0: ovs-system (internal)",
+ "\tport 1: qr-bb9b8340-72 (internal)",
+ "\tport 2: qr-8733cc5d-b3 (internal)"
+]
+
+DPCTL_RESULTS = {
+ "ovs-system": {
+ "name": "ovs-system",
+ "id": "0",
+ "internal": True
+ },
+ "qr-bb9b8340-72": {
+ "name": "qr-bb9b8340-72",
+ "id": "1",
+ "internal": True
+ },
+ "qr-8733cc5d-b3": {
+ "name": "qr-8733cc5d-b3",
+ "id": "2",
+ "internal": True
+ }
+}
+
+FETCH__PORT_TAGS_INPUT = {
+ "ovs-system": {
+ "name": "ovs-system",
+ "id": "0",
+ "internal": True
+ },
+ "qr-bb9b8340-72": {
+ "name": "qr-bb9b8340-72",
+ "id": "1",
+ "internal": True
+ },
+ "qr-8733cc5d-b3": {
+ "name": "qr-8733cc5d-b3",
+ "id": "2",
+ "internal": True
+ }
+}
+
+FETCH_PORT_TAGS_RESULT = {
+ "ovs-system": {
+ "name": "ovs-system",
+ "id": "0",
+ "internal": True
+ },
+ "qr-bb9b8340-72": {
+ "name": "qr-bb9b8340-72",
+ "id": "1",
+ "internal": True,
+ "tag": "3"
+ },
+ "qr-8733cc5d-b3": {
+ "name": "qr-8733cc5d-b3",
+ "id": "2",
+ "internal": True,
+ "tag": "4"
+ }
+}
+
+DOC_TO_GET_OVERLAY = {
+ "host": "node-6.cisco.com",
+ "agent_type": "Open vSwitch agent",
+ "configurations": {"tunneling_ip": "192.168.2.3"},
+}
diff --git a/app/test/fetch/db_fetch/test_data/db_fetch_vedges_vpp.py b/app/test/fetch/db_fetch/test_data/db_fetch_vedges_vpp.py
new file mode 100644
index 0000000..24265ae
--- /dev/null
+++ b/app/test/fetch/db_fetch/test_data/db_fetch_vedges_vpp.py
@@ -0,0 +1,89 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+VEDGE_FOLDER_ID = "ubuntu0-vedges"
+
+HOST = {
+ "host_type": [
+ "Controller",
+ "Compute",
+ "Network"
+ ],
+ "id": "ubuntu0",
+}
+
+HOST_WITHOUT_REQUIRED_HOST_TYPE = {
+ "host_type": [
+
+ ]
+}
+
+VERSION = [
+ "vpp v16.09-rc0~157-g203c632 built by localadmin on ubuntu0 at Sun Jun 26 16:35:15 PDT 2016\n"
+]
+
+INTERFACES = [
+ " Name Idx State Counter Count ",
+ "TenGigabitEthernetc/0/0 5 up rx packets 502022",
+ " rx bytes 663436206",
+ " tx packets 81404",
+ " tx bytes 6366378",
+ " drops 1414",
+ " punts 1",
+ " rx-miss 64525",
+ "VirtualEthernet0/0/0 7 up tx packets 31496",
+ " tx bytes 2743185",
+ "local0 0 down ",
+ "pg/stream-0 1 down ",
+]
+
+PORTS = {
+ "TenGigabitEthernetc/0/0": {
+ "id": "5",
+ "name": "TenGigabitEthernetc/0/0",
+ "state": "up"
+ },
+ "VirtualEthernet0/0/0": {
+ "id": "7",
+ "name": "VirtualEthernet0/0/0",
+ "state": "up"
+ },
+ "local0": {
+ "id": "0",
+ "name": "local0",
+ "state": "down"
+ },
+ "pg/stream-0": {
+ "id": "1",
+ "name": "pg/stream-0",
+ "state": "down"
+ }
+}
+
+
+VEDGE_RESULTS = [
+ {
+ "host": "ubuntu0",
+ "id": "ubuntu0-VPP",
+ "name": "VPP-ubuntu0",
+ "agent_type": "VPP",
+ "binary": "vpp v16.09-rc0~157-g203c632",
+ "ports": PORTS
+ }
+]
+
+VEDGE_RESULTS_WITHOUT_BINARY = [
+ {
+ "host": "ubuntu0",
+ "id": "ubuntu0-VPP",
+ "name": "VPP-ubuntu0",
+ "agent_type": "VPP",
+ "ports": PORTS
+ }
+]
diff --git a/app/test/fetch/db_fetch/test_db_access.py b/app/test/fetch/db_fetch/test_db_access.py
new file mode 100644
index 0000000..4ef3e74
--- /dev/null
+++ b/app/test/fetch/db_fetch/test_db_access.py
@@ -0,0 +1,108 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+from discover.fetchers.db.db_access import DbAccess
+from test.fetch.test_fetch import TestFetch
+from test.fetch.db_fetch.test_data.db_access import *
+from unittest.mock import MagicMock, patch
+from test.fetch.db_fetch.mock_cursor import MockCursor
+
+
+class TestDbAccess(TestFetch):
+
+ def setUp(self):
+ self.configure_environment()
+ self.fetcher = DbAccess()
+
+ @patch("mysql.connector.connect")
+ def test_db_connect(self, db_connect):
+ DbAccess.conn = None
+ db_conn = MagicMock()
+ db_conn.ping = MagicMock()
+ db_connect.return_value = db_conn
+
+ self.fetcher.db_connect(DB_CONFIG['host'], DB_CONFIG['port'],
+ DB_CONFIG['user'], DB_CONFIG['password'],
+ DB_CONFIG['schema'])
+
+ self.assertEqual(True, db_connect.called, "connect method has't been called")
+ db_conn.ping.assert_called_once_with(True)
+
+ def test_connect_to_db(self):
+ DbAccess.conn = None
+ self.fetcher.db_connect = MagicMock()
+ self.fetcher.connect_to_db()
+
+ self.assertEqual(True, self.fetcher.db_connect.called)
+
+ def test_connect_to_db_with_force(self):
+ DbAccess.conn = MagicMock()
+ self.fetcher.db_connect = MagicMock()
+ self.fetcher.connect_to_db(force=True)
+
+ self.assertEqual(True, self.fetcher.db_connect.called)
+
+ def test_connect_to_db_without_force(self):
+ DbAccess.conn = MagicMock()
+ self.fetcher.db_connect = MagicMock()
+ self.fetcher.connect_to_db()
+
+ self.assertEqual(False, self.fetcher.db_connect.called)
+
+ def test_get_objects_list_for_id_with_id(self):
+ # mock the db cursor
+ mock_cursor = MockCursor(OBJECTS_LIST)
+ mock_cursor.execute = MagicMock()
+
+ self.fetcher.connect_to_db = MagicMock()
+ DbAccess.conn.cursor = MagicMock(return_value=mock_cursor)
+
+ result = self.fetcher.get_objects_list_for_id(QUERY_WITH_ID, OBJECT_TYPE, ID)
+
+ mock_cursor.execute.assert_called_once_with(QUERY_WITH_ID, [ID])
+ self.assertEqual(result, OBJECTS_LIST, "Can't get objects list")
+
+ def test_get_objects_list_for_id_without_id(self):
+ mock_cursor = MockCursor(OBJECTS_LIST)
+
+ self.fetcher.connect_to_db = MagicMock()
+ DbAccess.conn.cursor = MagicMock(return_value=mock_cursor)
+ mock_cursor.execute = MagicMock()
+
+ result = self.fetcher.get_objects_list_for_id(QUERY_WITHOUT_ID, OBJECT_TYPE, None)
+
+ mock_cursor.execute.assert_called_once_with(QUERY_WITHOUT_ID)
+ self.assertEqual(result, OBJECTS_LIST, "Can't get objects list")
+
+ def test_get_objects_list_for_id_with_id_with_exception(self):
+ mock_cursor = MockCursor(OBJECTS_LIST)
+ self.fetcher.connect_to_db = MagicMock()
+ # mock exception
+ DbAccess.conn.cursor = MagicMock(return_value=mock_cursor)
+ mock_cursor.execute = MagicMock(side_effect=[AttributeError, ""])
+
+ result = self.fetcher.get_objects_list_for_id(QUERY_WITH_ID, OBJECT_TYPE, ID)
+
+ self.assertEqual(mock_cursor.execute.call_count, 2, "Can't invoke execute method " +
+ "twice when error occurs")
+ self.assertEqual(result, OBJECTS_LIST, "Can't get objects list")
+
+ def test_get_objects_list_for_id_without_id_with_exception(self):
+ mock_cursor = MockCursor(OBJECTS_LIST)
+ self.fetcher.connect_to_db = MagicMock()
+ DbAccess.conn.cursor = MagicMock(return_value=mock_cursor)
+ mock_cursor.execute = MagicMock(side_effect=[AttributeError, ""])
+
+ result = self.fetcher.get_objects_list_for_id(QUERY_WITHOUT_ID,
+ OBJECT_TYPE,
+ None)
+
+ self.assertEqual(mock_cursor.execute.call_count, 2, "Can't invoke execute method " +
+ "twice when error occurs")
+ self.assertEqual(result, OBJECTS_LIST, "Can't get objects list")
diff --git a/app/test/fetch/db_fetch/test_db_fetch_aggregate_hosts.py b/app/test/fetch/db_fetch/test_db_fetch_aggregate_hosts.py
new file mode 100644
index 0000000..2066577
--- /dev/null
+++ b/app/test/fetch/db_fetch/test_db_fetch_aggregate_hosts.py
@@ -0,0 +1,60 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+from discover.fetchers.db.db_fetch_aggregate_hosts import DbFetchAggregateHosts
+from test.fetch.test_fetch import TestFetch
+from test.fetch.db_fetch.test_data.db_fetch_aggregate_hosts import *
+from unittest.mock import MagicMock
+
+
+class TestDbFetchAggregateHosts(TestFetch):
+
+ def setUp(self):
+ self.configure_environment()
+ self.fetcher = DbFetchAggregateHosts()
+
+ def check_get_results_is_correct(self,
+ objects_list,
+ host_in_inventory,
+ expected_result,
+ err_msg):
+ self.fetcher.get_objects_list_for_id = MagicMock(return_value=objects_list)
+ self.inv.get_by_id = MagicMock(return_value=host_in_inventory)
+ result = self.fetcher.get(AGGREGATE["id"])
+
+ self.assertEqual(result, expected_result, err_msg)
+
+ def test_get(self):
+ test_cases = [
+ {
+ "objects_list": HOSTS,
+ "host_in_inventory": HOST_IN_INVENTORY,
+ "expected_result": HOSTS_RESULT,
+ "err_msg": "Can't get correct hosts info"
+ },
+ {
+ "objects_list": [],
+ "host_in_inventory": None,
+ "expected_result": [],
+ "err_msg": "Can't get [] when the "
+ "returned objects list is empty"
+ },
+ {
+ "objects_list": HOSTS,
+ "host_in_inventory": [],
+ "expected_result": HOSTS,
+ "err_msg": "Can't get correct hosts info "
+ "when the host doesn't exist in the inventory"
+ }
+ ]
+ for test_case in test_cases:
+ self.check_get_results_is_correct(test_case["objects_list"],
+ test_case["host_in_inventory"],
+ test_case["expected_result"],
+ test_case["err_msg"])
diff --git a/app/test/fetch/db_fetch/test_db_fetch_aggregates.py b/app/test/fetch/db_fetch/test_db_fetch_aggregates.py
new file mode 100644
index 0000000..12693b7
--- /dev/null
+++ b/app/test/fetch/db_fetch/test_db_fetch_aggregates.py
@@ -0,0 +1,26 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+from discover.fetchers.db.db_fetch_aggregates import DbFetchAggregates
+from test.fetch.test_fetch import TestFetch
+from test.fetch.db_fetch.test_data.db_fetch_aggregates import *
+from unittest.mock import MagicMock
+
+
+class TestDbFetchAggregates(TestFetch):
+
+ def setUp(self):
+ self.configure_environment()
+ self.fetcher = DbFetchAggregates()
+
+ def test_get(self):
+ self.fetcher.get_objects_list = MagicMock(return_value=OBJECTS_LIST)
+ result = self.fetcher.get(REGION_ID)
+ self.assertEqual(result, OBJECTS_LIST, "Can't get correct " +
+ "aggregates info")
diff --git a/app/test/fetch/db_fetch/test_db_fetch_instances.py b/app/test/fetch/db_fetch/test_db_fetch_instances.py
new file mode 100644
index 0000000..a1207a1
--- /dev/null
+++ b/app/test/fetch/db_fetch/test_db_fetch_instances.py
@@ -0,0 +1,37 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+from discover.fetchers.db.db_fetch_instances import DbFetchInstances
+from test.fetch.test_fetch import TestFetch
+from unittest.mock import MagicMock
+from test.fetch.db_fetch.test_data.db_fetch_instances import *
+
+
+class TestDbFetchInstances(TestFetch):
+
+ def setUp(self):
+ self.configure_environment()
+ self.fetcher = DbFetchInstances()
+
+ def test_get(self):
+ self.fetcher.get_objects_list = MagicMock(return_value=
+ INSTANCES_FROM_DB)
+ self.fetcher.get_instance_data(INSTANCES_FROM_API)
+
+ self.assertEqual(INSTANCES_FROM_API, UPDATED_INSTANCES_DATA)
+
+ def test_build_instance_details_with_network(self):
+ self.fetcher.build_instance_details(INSTANCE_WITH_NETWORK)
+ self.assertEqual(INSTANCE_WITH_NETWORK,
+ INSTANCE_WITH_NETWORK_RESULT)
+
+ def test_build_instance_details_without_network(self):
+ self.fetcher.build_instance_details(INSTANCE_WITHOUT_NETWORK)
+ self.assertEqual(INSTANCE_WITHOUT_NETWORK,
+ INSTANCE_WITHOUT_NETWORK_RESULT)
diff --git a/app/test/fetch/db_fetch/test_db_fetch_oteps.py b/app/test/fetch/db_fetch/test_db_fetch_oteps.py
new file mode 100644
index 0000000..905f55a
--- /dev/null
+++ b/app/test/fetch/db_fetch/test_db_fetch_oteps.py
@@ -0,0 +1,92 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+import copy
+
+from discover.fetchers.db.db_fetch_oteps import DbFetchOteps
+from test.fetch.test_fetch import TestFetch
+from test.fetch.db_fetch.test_data.db_fetch_oteps import *
+from unittest.mock import MagicMock
+
+
+class TestDbFetchOteps(TestFetch):
+
+ def setUp(self):
+ self.configure_environment()
+ self.fetcher = DbFetchOteps()
+ self.fetcher.set_env(self.env)
+
+ def check_get_oteps_results(self, vedge,
+ config,
+ host,
+ oteps_from_db,
+ expected_results,
+ err_msg):
+ original_get_vconnector = self.fetcher.get_vconnector
+ self.fetcher.get_vconnector = MagicMock()
+ self.fetcher.inv.get_by_id = MagicMock(side_effect=[vedge, host])
+ self.fetcher.config.get_env_config = MagicMock(return_value=config)
+ self.fetcher.get_objects_list_for_id = MagicMock(return_value=oteps_from_db)
+ results = self.fetcher.get(VEDGE_ID)
+ self.assertEqual(results, expected_results, err_msg)
+ self.fetcher.get_vconnector = original_get_vconnector
+
+ def test_get(self):
+ test_cases = [
+ {
+ "vedge": VEDGE_WITHOUT_CONFIGS,
+ "config": NON_ICEHOUSE_CONFIGS,
+ "host": None,
+ "oteps_from_db": None,
+ "expected_results": [],
+ "err_msg": "Can't get [] when the vedge " +
+ "doesn't contains configurations"
+ },
+ {
+ "vedge": VEDGE_WITHOUT_TUNNEL_TYPES,
+ "config": NON_ICEHOUSE_CONFIGS,
+ "host": None,
+ "oteps_from_db": None,
+ "expected_results": [],
+ "err_msg": "Can't get [] when the vedge configurations " +
+ "doesn't contain tunnel_types"
+ },
+ {
+ "vedge": VEDGE,
+ "config": ICEHOUSE_CONFIGS,
+ "host": HOST,
+ "oteps_from_db": None,
+ "expected_results": OTEPS_FOR_ICEHOUSE_DISTRIBUTION_RESULTS,
+ "err_msg": "Can't get correct oteps result " +
+ "when the distribution is icehouse"
+ },
+ {
+ "vedge": VEDGE,
+ "config": NON_ICEHOUSE_CONFIGS,
+ "host": None,
+ "oteps_from_db": OTEPS,
+ "expected_results": OTEPS_FOR_NON_ICEHOUSE_DISTRIBUTION_RESULTS,
+ "err_msg": "Can't get correct oteps result " +
+ "when the distribution is not icehouse"
+ }
+ ]
+ for test_case in test_cases:
+ self.check_get_oteps_results(test_case["vedge"],
+ test_case["config"],
+ test_case["host"],
+ test_case["oteps_from_db"],
+ test_case["expected_results"],
+ test_case["err_msg"])
+
+ def test_get_vconnectors(self):
+ self.fetcher.run_fetch_lines = MagicMock(return_value=IFCONFIG_LINES)
+ self.fetcher.get_vconnector(OTEP_FOR_GETTING_VECONNECTOR,
+ HOST_ID, VEDGE)
+ self.assertEqual(OTEP_FOR_GETTING_VECONNECTOR, OTEP_WITH_CONNECTOR,
+ "Can't get vconnector from the config lines for otep")
diff --git a/app/test/fetch/db_fetch/test_db_fetch_vedges_ovs.py b/app/test/fetch/db_fetch/test_db_fetch_vedges_ovs.py
new file mode 100644
index 0000000..b08aebd
--- /dev/null
+++ b/app/test/fetch/db_fetch/test_db_fetch_vedges_ovs.py
@@ -0,0 +1,109 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+from discover.fetchers.db.db_fetch_vedges_ovs import DbFetchVedgesOvs
+from test.fetch.test_fetch import TestFetch
+from test.fetch.db_fetch.test_data.db_fetch_vedges_ovs import *
+from unittest.mock import MagicMock
+
+
+class TestDbFetchVedgesOvs(TestFetch):
+
+ def setUp(self):
+ self.configure_environment()
+ self.fetcher = DbFetchVedgesOvs()
+ self.fetcher.set_env(self.env)
+
+ def check_get_result(self,
+ objects_from_db, host,
+ vsctl_lines, ports, tunnel_ports,
+ expected_result, err_msg):
+ # store original methods
+ original_get_objects_list_by_id = self.fetcher.get_objects_list_for_id
+ original_get_by_id = self.fetcher.inv.get_by_id
+ original_run_fetch_lines = self.fetcher.run_fetch_lines
+ original_fetch_ports = self.fetcher.fetch_ports
+ original_get_overlay_tunnels = self.fetcher.get_overlay_tunnels
+
+ self.fetcher.get_objects_list_for_id = MagicMock(return_value=objects_from_db)
+ self.fetcher.inv.get_by_id = MagicMock(return_value=host)
+ self.fetcher.run_fetch_lines = MagicMock(return_value=vsctl_lines)
+ self.fetcher.fetch_ports = MagicMock(return_value=ports)
+ self.fetcher.get_overlay_tunnels = MagicMock(return_value=tunnel_ports)
+
+ results = self.fetcher.get(VEDGES_FOLDER_ID)
+ self.assertEqual(results, expected_result, err_msg)
+
+ # restore methods
+ self.fetcher.get_objects_list_for_id = original_get_objects_list_by_id
+ self.fetcher.inv.get_by_id = original_get_by_id
+ self.fetcher.run_fetch_lines = original_run_fetch_lines
+ self.fetcher.fetch_ports = original_fetch_ports
+ self.fetcher.get_overlay_tunnels = original_get_overlay_tunnels
+
+ def test_get(self):
+ test_cases = [
+ {
+ "objects_from_db": OBJECTS_FROM_DB,
+ "host": HOST,
+ "vsctl_lines": "",
+ "ports": PORTS,
+ "tunnel_ports": TUNNEL_PORTS,
+ "expected_result": GET_RESULTS,
+ "err_msg": "Can't get correct vedges"
+ },
+ {
+ "objects_from_db": OBJECTS_FROM_DB,
+ "host": [],
+ "vsctl_lines": "",
+ "ports": {},
+ "tunnel_ports": [],
+ "expected_result": [],
+ "err_msg": "Can't get [] when the host " +
+ "doesn't exist"
+ },
+ {
+ "objects_from_db": OBJECTS_FROM_DB,
+ "host": HOST_WITHOUT_REQUIRED_HOST_TYPES,
+ "vsctl_lines": "",
+ "ports": {},
+ "tunnel_ports": [],
+ "expected_result": [],
+ "err_msg": "Can't get [] when the host " +
+ "doesn't have required host types"
+ }
+ ]
+ for test_case in test_cases:
+ self.check_get_result(test_case["objects_from_db"],
+ test_case["host"],
+ test_case["vsctl_lines"],
+ test_case["ports"],
+ test_case["tunnel_ports"],
+ test_case["expected_result"],
+ test_case["err_msg"])
+
+ def test_fetch_ports_from_dpctl(self):
+ original_run_fetch_lines = self.fetcher.run_fetch_lines
+ self.fetcher.run_fetch_lines = MagicMock(return_value=DPCTL_LINES)
+
+ results = self.fetcher.fetch_ports_from_dpctl(HOST['id'])
+ self.fetcher.run_fetch_lines = original_run_fetch_lines
+ self.assertEqual(results, DPCTL_RESULTS,
+ "Can' t get correct ports info from dpctl lines")
+
+ def test_fetch_port_tags_from_vsctl(self):
+ ports = self.fetcher.fetch_port_tags_from_vsctl(VSCTL_LINES,
+ FETCH__PORT_TAGS_INPUT)
+ self.assertEqual(ports, FETCH_PORT_TAGS_RESULT,
+ "Can't fetch tag from vsctl")
+
+ def test_get_overlay_tunnels(self):
+ results = self.fetcher.get_overlay_tunnels(DOC_TO_GET_OVERLAY,
+ VSCTL_LINES)
+ self.assertEqual(results, TUNNEL_PORTS)
diff --git a/app/test/fetch/db_fetch/test_db_fetch_vedges_vpp.py b/app/test/fetch/db_fetch/test_db_fetch_vedges_vpp.py
new file mode 100644
index 0000000..9e6f497
--- /dev/null
+++ b/app/test/fetch/db_fetch/test_db_fetch_vedges_vpp.py
@@ -0,0 +1,82 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+from discover.fetchers.db.db_fetch_vedges_vpp import DbFetchVedgesVpp
+from test.fetch.test_fetch import TestFetch
+from test.fetch.db_fetch.test_data.db_fetch_vedges_vpp import *
+from unittest.mock import MagicMock
+
+
+class TestDbFetchVedgesVpp(TestFetch):
+
+ def setUp(self):
+ self.configure_environment()
+ self.fetcher = DbFetchVedgesVpp()
+ self.fetcher.set_env(self.env)
+
+ def check_get_results(self, version,
+ interfaces, host,
+ expected_results, err_msg):
+ original_run_fetch_lines = self.fetcher.run_fetch_lines
+ original_get_by_id = self.fetcher.inv.get_by_id
+
+ self.fetcher.run_fetch_lines = MagicMock(side_effect=[version, interfaces])
+ self.fetcher.inv.get_by_id = MagicMock(return_value=host)
+
+ vedges = self.fetcher.get(VEDGE_FOLDER_ID)
+ self.assertEqual(vedges, expected_results, err_msg)
+
+ self.fetcher.run_fetch_lines = original_run_fetch_lines
+ self.fetcher.inv.get_by_id = original_get_by_id
+
+ def test_get(self):
+ test_cases = [
+ {
+ "version": VERSION,
+ "interfaces": INTERFACES,
+ "host": HOST,
+ "expected_results": VEDGE_RESULTS,
+ "err_msg": "Can' get correct vedges"
+ },
+ {
+ "version": [],
+ "interfaces": INTERFACES,
+ "host": HOST,
+ "expected_results": VEDGE_RESULTS_WITHOUT_BINARY,
+ "err_msg": "Can' get correct vedges when " +
+ "it can't get version info host"
+ },
+ {
+ "version": VERSION,
+ "interfaces": INTERFACES,
+ "host": [],
+ "expected_results": [],
+ "err_msg": "Can't get [] when the host of the " +
+ "vedge doesn't exist in db"
+ },
+ {
+ "version": VERSION,
+ "interfaces": INTERFACES,
+ "host": HOST_WITHOUT_REQUIRED_HOST_TYPE,
+ "expected_results": [],
+ "err_msg": "Can't get [] when the host of the " +
+ "vedge doesn't contains required host types"
+ }
+ ]
+
+ for test_case in test_cases:
+ self.check_get_results(test_case["version"],
+ test_case["interfaces"],
+ test_case["host"],
+ test_case["expected_results"],
+ test_case["err_msg"])
+
+ def test_fetch_ports(self):
+ ports = self.fetcher.fetch_ports(INTERFACES)
+ self.assertEqual(ports, PORTS, "Can't get the correct ports info") \ No newline at end of file
diff --git a/app/test/fetch/db_fetch/test_fetch_host_network_agents.py b/app/test/fetch/db_fetch/test_fetch_host_network_agents.py
new file mode 100644
index 0000000..fd68a56
--- /dev/null
+++ b/app/test/fetch/db_fetch/test_fetch_host_network_agents.py
@@ -0,0 +1,66 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+import copy
+
+from discover.fetchers.db.db_fetch_host_network_agents import DbFetchHostNetworkAgents
+from test.fetch.test_fetch import TestFetch
+from test.fetch.db_fetch.test_data.db_fetch_host_network_agents import *
+from unittest.mock import MagicMock
+
+
+class TestFetchHostNetworkAgents(TestFetch):
+
+ def setUp(self):
+ self.configure_environment()
+ self.fetcher = DbFetchHostNetworkAgents()
+
+ def check_get_result(self,
+ config,
+ network_agent_res,
+ expected_result,
+ err_msg):
+ self.fetcher.env_config = config
+ self.fetcher.get_objects_list_for_id =\
+ MagicMock(return_value=network_agent_res)
+ result = self.fetcher.get(NETWORK_AGENT_FOLDER_ID)
+ self.assertEqual(result, expected_result, err_msg)
+
+ def test_get(self):
+ test_cases = [
+ {
+ 'config': CONFIG_WITH_MECHANISM_DRIVERS,
+ 'network_agent_res': copy.deepcopy(NETWORK_AGENT),
+ 'expected_result':
+ NETWORK_AGENT_WITH_MECHANISM_DRIVERS_IN_CONFIG_RESULTS,
+ 'err_msg': "Can't get correct result when the " +
+ "mechanism drivers exists in the config"
+ },
+ {
+ 'config': CONFIG_WITHOUT_MECHANISM_DRIVERS,
+ 'network_agent_res': copy.deepcopy(NETWORK_AGENT),
+ 'expected_result':
+ NETWORK_AGENT_WITHOUT_MECHANISM_DRIVERS_IN_CONFIG_RESULTS,
+ 'err_msg': "Can't get correct result when the " +
+ "mechanism drivers doesn't exist in the config"
+ },
+ {
+ 'config': CONFIG_WITH_MECHANISM_DRIVERS,
+ 'network_agent_res': [],
+ 'expected_result': [],
+ 'err_msg': "Can't get [] when the network agent result " +
+ "is empty"
+ }
+ ]
+
+ for test_case in test_cases:
+ self.check_get_result(test_case['config'],
+ test_case['network_agent_res'],
+ test_case['expected_result'],
+ test_case['err_msg'])
diff --git a/app/test/fetch/test_fetch.py b/app/test/fetch/test_fetch.py
new file mode 100644
index 0000000..b9fd3f1
--- /dev/null
+++ b/app/test/fetch/test_fetch.py
@@ -0,0 +1,46 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+import unittest
+
+from discover.configuration import Configuration
+from discover.fetchers.db.db_access import DbAccess
+from test.fetch.config.test_config import MONGODB_CONFIG, ENV_CONFIG, COLLECTION_CONFIG
+from test.fetch.api_fetch.test_data.regions import REGIONS
+from test.fetch.api_fetch.test_data.configurations import CONFIGURATIONS
+from unittest.mock import MagicMock
+from utils.inventory_mgr import InventoryMgr
+from utils.mongo_access import MongoAccess
+from utils.ssh_connection import SshConnection
+from utils.ssh_conn import SshConn
+
+
+class TestFetch(unittest.TestCase):
+
+ def configure_environment(self):
+ self.env = ENV_CONFIG
+ self.inventory_collection = COLLECTION_CONFIG
+ # mock the Mongo Access
+ MongoAccess.mongo_connect = MagicMock()
+ MongoAccess.db = MagicMock()
+
+ self.conf = Configuration()
+ self.conf.use_env = MagicMock()
+ self.conf.environment = CONFIGURATIONS
+ self.conf.configuration = CONFIGURATIONS["configuration"]
+
+ self.inv = InventoryMgr()
+ self.inv.set_collections(self.inventory_collection)
+ DbAccess.conn = MagicMock()
+ SshConnection.connect = MagicMock()
+ SshConnection.check_definitions = MagicMock()
+ SshConn.check_definitions = MagicMock()
+
+ def set_regions_for_fetcher(self, fetcher):
+ fetcher.regions = REGIONS
diff --git a/app/test/scan/__init__.py b/app/test/scan/__init__.py
new file mode 100644
index 0000000..1e85a2a
--- /dev/null
+++ b/app/test/scan/__init__.py
@@ -0,0 +1,10 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+
diff --git a/app/test/scan/config/__init__.py b/app/test/scan/config/__init__.py
new file mode 100644
index 0000000..b0637e9
--- /dev/null
+++ b/app/test/scan/config/__init__.py
@@ -0,0 +1,9 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
diff --git a/app/test/scan/config/test_config.py b/app/test/scan/config/test_config.py
new file mode 100644
index 0000000..176fd48
--- /dev/null
+++ b/app/test/scan/config/test_config.py
@@ -0,0 +1,17 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+# local config info for test.
+
+
+MONGODB_CONFIG = 'your-mongo-config-path-here'
+
+ENV_CONFIG = 'your-env-name-here'
+
+COLLECTION_CONFIG = 'your-inventory-collection-name-here'
diff --git a/app/test/scan/main.py b/app/test/scan/main.py
new file mode 100644
index 0000000..fb8c4b5
--- /dev/null
+++ b/app/test/scan/main.py
@@ -0,0 +1,17 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+import unittest
+
+from test.scan.test_scanner import TestScanner
+from test.scan.test_scan_controller import TestScanController
+from test.scan.test_scan_metadata_parser import TestScanMetadataParser
+
+if __name__=='__main__':
+ unittest.main()
diff --git a/app/test/scan/mock_module.py b/app/test/scan/mock_module.py
new file mode 100644
index 0000000..e7aeb13
--- /dev/null
+++ b/app/test/scan/mock_module.py
@@ -0,0 +1,37 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+class ScanEnvironment:
+
+ run_scan_count = 0
+ scan_links_count = 0
+ scan_cliques_count = 0
+ result = []
+
+ def set_result(self, result):
+ self.result = result
+
+ def run_scan(self, *args):
+ ScanEnvironment.run_scan_count += 1
+ return self.result
+
+ def scan_links(self, *args):
+ ScanEnvironment.scan_links_count += 1
+
+ def scan_cliques(self, *args):
+ ScanEnvironment.scan_cliques_count += 1
+
+ def set_env(self, env):
+ pass
+
+ @classmethod
+ def reset_counts(cls):
+ cls.run_scan_count = 0
+ cls.scan_cliques_count = 0
+ cls.scan_links_count = 0 \ No newline at end of file
diff --git a/app/test/scan/test_data/__init__.py b/app/test/scan/test_data/__init__.py
new file mode 100644
index 0000000..b0637e9
--- /dev/null
+++ b/app/test/scan/test_data/__init__.py
@@ -0,0 +1,9 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
diff --git a/app/test/scan/test_data/configurations.py b/app/test/scan/test_data/configurations.py
new file mode 100644
index 0000000..da68dd1
--- /dev/null
+++ b/app/test/scan/test_data/configurations.py
@@ -0,0 +1,69 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+CONFIGURATIONS = {
+ "app_path": "/home/scan/calipso_prod/app/",
+ "scanners_file": "/home/yarony/osdna_dev/app/discover/scanners.json",
+ "configuration": [
+ {
+ "mock": "True",
+ "host": "10.56.20.239",
+ "name": "mysql",
+ "password": "102QreDdiD5sKcvNf9qbHrmr",
+ "port": 3307.0,
+ "user": "root",
+ "schema": "nova"
+ },
+ {
+ "name": "OpenStack",
+ "host": "10.56.20.239",
+ "admin_token": "38MUh19YWcgQQUlk2VEFQ7Ec",
+ "port": "5000",
+ "user": "admin",
+ "pwd": "admin"
+ },
+ {
+ "host": "10.56.20.239",
+ "key": "/Users/ngrandhi/.ssh/id_rsa",
+ "name": "CLI",
+ "pwd": "",
+ "user": "root"
+ },
+ {
+ "name": "AMQP",
+ "host": "10.56.20.239",
+ "port": "5673",
+ "user": "nova",
+ "password": "NF2nSv3SisooxPkCTr8fbfOa"
+ },
+ {
+ "config_folder": "/tmp/sensu_config",
+ "provision": "Deploy",
+ "env_type": "development",
+ "name": "Monitoring",
+ "rabbitmq_port": "5672",
+ "rabbitmq_pass": "osdna",
+ "rabbitmq_user": "sensu",
+ "ssh_port": "20022",
+ "ssh_user": "scan",
+ "ssh_password": "scan",
+ "server_ip": "korlev-osdna-staging1.cisco.com",
+ "server_name": "osdna-sensu",
+ "type": "Sensu"
+ }
+ ],
+ "distribution": "Mirantis-8.0",
+ "last_scanned:": "5/8/16",
+ "name": "Mirantis-Liberty-Nvn",
+ "mechanism_drivers": [
+ "OVS"
+ ],
+ "operational": "yes",
+ "type": "environment"
+}
diff --git a/app/test/scan/test_data/metadata.py b/app/test/scan/test_data/metadata.py
new file mode 100644
index 0000000..ed47c80
--- /dev/null
+++ b/app/test/scan/test_data/metadata.py
@@ -0,0 +1,318 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+METADATA_EMPTY = {}
+
+METADATA_SCANNERS_MISSING = {"scanners_package": "discover.fetchers"}
+
+METADATA_NO_PACKAGE = {
+ "scanners": {}
+}
+
+METADATA_NO_SCANNERS = {
+ "scanners_package": "discover.fetchers"
+}
+
+METADATA_ZERO_SCANNERS = {
+ "scanners_package": "discover.fetchers",
+ "scanners": {}
+}
+
+METADATA_SIMPLE_SCANNER = {
+ "scanners_package": "discover.fetchers",
+ "scanners": {
+ "ScanAggregate": [
+ {
+ "type": "host_ref",
+ "fetcher": "DbFetchAggregateHosts"
+ }
+ ]
+ }
+}
+
+METADATA_SCANNER_UNKNOWN_ATTRIBUTE = {
+ "scanners_package": "discover.fetchers",
+ "scanners": {
+ "ScanAggregate": [
+ {
+ "xyz": "123",
+ "type": "host_ref",
+ "fetcher": "DbFetchAggregateHosts"
+ }
+ ]
+ }
+}
+
+METADATA_SCANNER_NO_TYPE = {
+ "scanners_package": "discover.fetchers",
+ "scanners": {
+ "ScanAggregate": [
+ {
+ "fetcher": "DbFetchAggregateHosts"
+ }
+ ]
+ }
+}
+
+METADATA_SCANNER_NO_FETCHER = {
+ "scanners_package": "discover.fetchers",
+ "scanners": {
+ "ScanAggregate": [
+ {
+ "type": "host_ref"
+ }
+ ]
+ }
+}
+
+METADATA_SCANNER_INCORRECT_TYPE = {
+ "scanners_package": "discover.fetchers",
+ "scanners": {
+ "ScanAggregate": [
+ {
+ "type": "t1",
+ "fetcher": "DbFetchAggregateHosts"
+ }
+ ]
+ }
+}
+
+METADATA_SCANNER_INCORRECT_FETCHER = {
+ "scanners_package": "discover.fetchers",
+ "scanners": {
+ "ScanAggregate": [
+ {
+ "type": "host_ref",
+ "fetcher": "f1"
+ }
+ ]
+ }
+}
+
+METADATA_SCANNER_WITH_CHILD = {
+ "scanners_package": "discover.fetchers",
+ "scanners": {
+ "ScanAggregatesRoot": [
+ {
+ "type": "aggregate",
+ "fetcher": "DbFetchAggregates",
+ "children_scanner": "ScanAggregate"
+ }
+ ],
+ "ScanAggregate": [
+ {
+ "type": "host_ref",
+ "fetcher": "DbFetchAggregateHosts"
+ }
+ ]
+ }
+}
+
+METADATA_SCANNER_WITH_INCORRECT_CHILD = {
+ "scanners_package": "discover.fetchers",
+ "scanners": {
+ "ScanAggregatesRoot": [
+ {
+ "type": "aggregate",
+ "fetcher": "DbFetchAggregates",
+ "children_scanner": 1
+ }
+ ]
+ }
+}
+
+METADATA_SCANNER_WITH_MISSING_CHILD = {
+ "scanners_package": "discover.fetchers",
+ "scanners": {
+ "ScanAggregatesRoot": [
+ {
+ "type": "aggregate",
+ "fetcher": "DbFetchAggregates",
+ "children_scanner": "ScanAggregate"
+ }
+ ]
+ }
+}
+
+METADATA_SCANNER_FETCHER_INVALID_DICT = {
+ "scanners_package": "discover.fetchers",
+ "scanners": {
+ "ScanEnvironment": [
+ {
+ "type": "regions_folder",
+ "fetcher": {
+ "types_name": "regions",
+ "parent_type": "environment"
+ }
+ },
+ ]
+
+ }
+}
+
+METADATA_SCANNER_WITH_FOLDER = {
+ "scanners_package": "discover.fetchers",
+ "scanners": {
+ "ScanEnvironment": [
+ {
+ "type": "regions_folder",
+ "fetcher": {
+ "folder": 1,
+ "types_name": "regions",
+ "parent_type": "environment"
+ }
+ },
+ {
+ "type": "projects_folder",
+ "fetcher": {
+ "folder": 1,
+ "types_name": "projects",
+ "parent_type": "environment"
+ }
+ }
+ ]
+ }
+}
+
+METADATA_SCANNER_WITH_INVALID_CONDITION = {
+ "scanners_package": "discover.fetchers",
+ "scanners": {
+ "ScanHost": [
+ {
+ "type": "pnics_folder",
+ "fetcher": "DbFetchAggregateHosts",
+ "environment_condition": 1
+ }
+ ]
+ }
+}
+
+METADATA_SCANNER_WITH_INVALID_MECHANISM_DRIVER_CONDITION = {
+ "scanners_package": "discover.fetchers",
+ "scanners": {
+ "ScanHost": [
+ {
+ "type": "pnics_folder",
+ "fetcher": {
+ "folder": 1,
+ "types_name": "pnics",
+ "parent_type": "host",
+ "text": "pNICs"
+ },
+ "environment_condition": {
+ "mechanism_drivers": ""
+ }
+ }
+ ]
+ }
+}
+
+METADATA_SCANNER_WITH_INVALID_MECHANISM_DRIVER = {
+ "scanners_package": "discover.fetchers",
+ "scanners": {
+ "ScanHost": [
+ {
+ "type": "pnics_folder",
+ "fetcher": {
+ "folder": 1,
+ "types_name": "pnics",
+ "parent_type": "host",
+ "text": "pNICs"
+ },
+ "environment_condition": {
+ "mechanism_drivers": [ 1, 2]
+ }
+ }
+ ]
+ }
+}
+
+METADATA_SCANNER_WITH_CONDITION = {
+ "scanners_package": "discover.fetchers",
+ "scanners": {
+ "ScanHost": [
+ {
+ "type": "pnics_folder",
+ "fetcher": {
+ "folder": 1,
+ "types_name": "pnics",
+ "parent_type": "host",
+ "text": "pNICs"
+ },
+ "environment_condition": {
+ "mechanism_drivers": [
+ "OVS",
+ "LXB"
+ ]
+ }
+ }
+ ]
+ }
+}
+
+CONSTANTS = {
+ "scan_object_types": {
+ "name": "scan_object_types",
+ "data": [
+ {
+ "value": "regions_folder",
+ "label": "regions_folder"
+ },
+ {
+ "value": "pnics_folder",
+ "label": "pnics_folder"
+ },
+ {
+ "value": "projects_folder",
+ "label": "projects_folder"
+ },
+ {
+ "value": "aggregate",
+ "label": "aggregate"
+ },
+ {
+ "value": "host",
+ "label": "host"
+ },
+ {
+ "value": "region",
+ "label": "region"
+ },
+ {
+ "value": "host_ref",
+ "label": "host_ref"
+ }
+ ]
+ },
+ "mechanism_drivers": {
+ "data": [
+ {
+ "label": "OVS",
+ "value": "OVS"
+ },
+ {
+ "label": "VPP",
+ "value": "VPP"
+ },
+ {
+ "label": "LXB",
+ "value": "LXB"
+ },
+ {
+ "label": "Arista",
+ "value": "Arista"
+ },
+ {
+ "label": "Nexus",
+ "value": "Nexus"
+ }
+ ],
+ "name": "mechanism_drivers"
+ }
+}
diff --git a/app/test/scan/test_data/scan.py b/app/test/scan/test_data/scan.py
new file mode 100644
index 0000000..fa36c3e
--- /dev/null
+++ b/app/test/scan/test_data/scan.py
@@ -0,0 +1,435 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+UNIT_TESTS_ENV = "WebEX-Mirantis@Cisco"
+UNIT_TESTS_INVENTORY = 'unit_tests'
+
+MONGO_CONFIG = 'mongo_config_file.txt'
+
+DEFAULT_ARGUMENTS = {
+ "MONGO_CONFIG": "",
+ "ENV": UNIT_TESTS_ENV,
+ "TYPE": "environment",
+ "INVENTORY": "inventory",
+ "SCAN_SELF": False,
+ "ID": UNIT_TESTS_ENV,
+ "PARENT_ID": "",
+ "PARENT_TYPE": "",
+ "ID_FIELD": "id",
+ "LOGLEVEL": "INFO",
+ "INVENTORY_ONLY": False,
+ "LINKS_ONLY": False,
+ "CLIQUES_ONLY": False,
+ "CLEAR": False
+}
+
+SHORT_FLAGS_ARGUMENTS = {
+ "MONGO_CONFIG": "mongo_config_file",
+ "ENV": UNIT_TESTS_ENV,
+ "TYPE": "project",
+ "INVENTORY": UNIT_TESTS_INVENTORY,
+ "SCAN_SELF": True,
+ "ID": "admin",
+ "PARENT_ID": "RegionOne",
+ "PARENT_TYPE": "Region",
+ "ID_FIELD": "name",
+ "LOGLEVEL": "ERROR"
+}
+
+ARGUMENTS_FULL = {
+ "MONGO_CONFIG": "mongo_config_file",
+ "ENV": UNIT_TESTS_ENV,
+ "TYPE": "project",
+ "INVENTORY": UNIT_TESTS_INVENTORY,
+ "SCAN_SELF": True,
+ "ID": "admin",
+ "PARENT_ID": "RegionOne",
+ "PARENT_TYPE": "Region",
+ "ID_FIELD": "name",
+ "LOGLEVEL": "ERROR",
+ "INVENTORY_ONLY": False,
+ "LINKS_ONLY": False,
+ "CLIQUES_ONLY": False,
+ "CLEAR": True,
+ "CLEAR_ALL": False
+}
+
+ARGUMENTS_FULL_CLEAR_ALL = {
+ "MONGO_CONFIG": "mongo_config_file",
+ "ENV": UNIT_TESTS_ENV,
+ "TYPE": "project",
+ "INVENTORY": UNIT_TESTS_INVENTORY,
+ "SCAN_SELF": True,
+ "ID": "admin",
+ "PARENT_ID": "RegionOne",
+ "PARENT_TYPE": "Region",
+ "ID_FIELD": "name",
+ "LOGLEVEL": "ERROR",
+ "INVENTORY_ONLY": False,
+ "LINKS_ONLY": False,
+ "CLIQUES_ONLY": False,
+ "CLEAR": False,
+ "CLEAR_ALL": True
+}
+
+ARGUMENTS_FULL_INVENTORY_ONLY = {
+ "MONGO_CONFIG": "mongo_config_file",
+ "ENV": UNIT_TESTS_ENV,
+ "TYPE": "project",
+ "INVENTORY": UNIT_TESTS_INVENTORY,
+ "SCAN_SELF": True,
+ "ID": "admin",
+ "PARENT_ID": "RegionOne",
+ "PARENT_TYPE": "Region",
+ "ID_FIELD": "name",
+ "LOGLEVEL": "ERROR",
+ "INVENTORY_ONLY": True,
+ "LINKS_ONLY": False,
+ "CLIQUES_ONLY": False,
+ "CLEAR": True,
+ "CLEAR_ALL": False
+}
+
+ARGUMENTS_FULL_LINKS_ONLY = {
+ "MONGO_CONFIG": "mongo_config_file",
+ "ENV": UNIT_TESTS_ENV,
+ "TYPE": "project",
+ "INVENTORY": UNIT_TESTS_INVENTORY,
+ "SCAN_SELF": True,
+ "ID": "admin",
+ "PARENT_ID": "RegionOne",
+ "PARENT_TYPE": "Region",
+ "ID_FIELD": "name",
+ "LOGLEVEL": "ERROR",
+ "INVENTORY_ONLY": False,
+ "LINKS_ONLY": True,
+ "CLIQUES_ONLY": False,
+ "CLEAR": True,
+ "CLEAR_ALL": False
+}
+
+ARGUMENTS_FULL_CLIQUES_ONLY = {
+ "MONGO_CONFIG": "mongo_config_file",
+ "ENV": UNIT_TESTS_ENV,
+ "TYPE": "project",
+ "INVENTORY": UNIT_TESTS_INVENTORY,
+ "SCAN_SELF": True,
+ "ID": "admin",
+ "PARENT_ID": "RegionOne",
+ "PARENT_TYPE": "Region",
+ "ID_FIELD": "name",
+ "LOGLEVEL": "ERROR",
+ "INVENTORY_ONLY": False,
+ "LINKS_ONLY": False,
+ "CLIQUES_ONLY": True,
+ "CLEAR": True,
+ "CLEAR_ALL": False
+}
+
+FORM = {
+ "loglevel": "INFO",
+ "inventory_only": False,
+ "links_only": False,
+ "cliques_only": False,
+ "clear": True,
+ "type": "region",
+ "env": UNIT_TESTS_ENV,
+ "id": "RegionOne",
+ "parent_id": UNIT_TESTS_ENV + "-regions",
+ "parent_type": "regions_folder",
+ "id_field": "id",
+ "scan_self": False,
+ "child_type": "region",
+ "child_id": None
+}
+
+
+SCAN_ENV_PLAN_TO_BE_PREPARED = {
+ "loglevel": "INFO",
+ "inventory_only": False,
+ "links_only": False,
+ "cliques_only": False,
+ "clear": True,
+ "object_type": "environment",
+ "env": UNIT_TESTS_ENV,
+ "id": "",
+ "parent_id": "",
+ "type_to_scan": "",
+ "id_field": "id",
+ "scan_self": False,
+ "child_type": "environment",
+ "child_id": None
+}
+
+SCAN_ENV_INVENTORY_ONLY_PLAN_TO_BE_PREPARED = {
+ "loglevel": "INFO",
+ "inventory_only": True,
+ "links_only": False,
+ "cliques_only": False,
+ "clear": True,
+ "object_type": "environment",
+ "env": UNIT_TESTS_ENV,
+ "id": '',
+ "parent_id": "",
+ "type_to_scan": "",
+ "id_field": "id",
+ "scan_self": False,
+ "child_type": "environment",
+ "child_id": None
+}
+
+SCAN_ENV_LINKS_ONLY_PLAN_TO_BE_PREPARED = {
+ "loglevel": "INFO",
+ "inventory_only": False,
+ "links_only": True,
+ "cliques_only": False,
+ "clear": True,
+ "object_type": "environment",
+ "env": UNIT_TESTS_ENV,
+ "id": '',
+ "parent_id": "",
+ "type_to_scan": "",
+ "id_field": "id",
+ "scan_self": False,
+ "child_type": "environment",
+ "child_id": None
+}
+
+SCAN_ENV_CLIQUES_ONLY_PLAN_TO_BE_PREPARED = {
+ "loglevel": "INFO",
+ "inventory_only": False,
+ "links_only": False,
+ "cliques_only": True,
+ "clear": True,
+ "object_type": "environment",
+ "env": UNIT_TESTS_ENV,
+ "id": '',
+ "parent_id": "",
+ "type_to_scan": "",
+ "id_field": "id",
+ "scan_self": False,
+ "child_type": "environment",
+ "child_id": None
+}
+
+PREPARED_ENV_PLAN = {
+ 'obj': {
+ 'id': UNIT_TESTS_ENV
+ },
+ 'child_id': None,
+ 'environment': UNIT_TESTS_ENV,
+ 'inventory_only': False,
+ 'clear': True,
+ 'links_only': False,
+ 'scanner_class': 'ScanEnvironment',
+ 'object_type': 'environment',
+ 'id': UNIT_TESTS_ENV,
+ 'inventory': UNIT_TESTS_INVENTORY,
+ 'loglevel': 'INFO',
+ 'child_type': None,
+ 'type_to_scan': '',
+ 'cliques_only': False,
+ 'id_field': 'id',
+ 'parent_id': '',
+ 'scan_self': False,
+ 'env': UNIT_TESTS_ENV
+}
+
+SCANNER_CLASS = "ScanEnvironment"
+SCANNER_TYPE_FOR_ENV = "ScanEnvironment"
+OBJ_ID_FOR_ENV = ""
+CHILD_TYPE_FOR_ENV = None
+CHILD_ID_FOR_ENV = None
+
+PREPARED_ENV_INVENTORY_ONLY_PLAN = {
+ 'obj': {
+ 'id': UNIT_TESTS_ENV
+ },
+ 'child_id': None,
+ 'clear': True,
+ 'inventory_only': True,
+ 'links_only': False,
+ 'scanner_class': 'ScanEnvironment',
+ 'object_type': 'environment',
+ 'id': UNIT_TESTS_ENV,
+ 'inventory': UNIT_TESTS_INVENTORY,
+ 'loglevel': 'INFO',
+ 'child_type': None,
+ 'type_to_scan': '',
+ 'cliques_only': False,
+ 'id_field': 'id',
+ 'parent_id': '',
+ 'scan_self': False,
+ 'env': UNIT_TESTS_ENV
+}
+
+PREPARED_ENV_LINKS_ONLY_PLAN = {
+ 'obj': {
+ 'id': UNIT_TESTS_ENV
+ },
+ 'child_id': None,
+ 'clear': True,
+ 'inventory_only': False,
+ 'links_only': True,
+ 'cliques_only': False,
+ 'scanner_class': 'ScanEnvironment',
+ 'object_type': 'environment',
+ 'id': UNIT_TESTS_ENV,
+ 'inventory': UNIT_TESTS_INVENTORY,
+ 'loglevel': 'INFO',
+ 'child_type': None,
+ 'type_to_scan': '',
+ 'id_field': 'id',
+ 'parent_id': '',
+ 'scan_self': False,
+ 'env': UNIT_TESTS_ENV
+}
+
+PREPARED_ENV_CLIQUES_ONLY_PLAN = {
+ 'obj': {
+ 'id': UNIT_TESTS_ENV
+ },
+ 'child_id': None,
+ 'clear': True,
+ 'inventory_only': False,
+ 'links_only': False,
+ 'cliques_only': True,
+ 'scanner_class': 'ScanEnvironment',
+ 'object_type': 'environment',
+ 'id': UNIT_TESTS_ENV,
+ 'inventory': UNIT_TESTS_INVENTORY,
+ 'loglevel': 'INFO',
+ 'child_type': None,
+ 'type_to_scan': '',
+ 'id_field': 'id',
+ 'parent_id': '',
+ 'scan_self': False,
+ 'env': UNIT_TESTS_ENV
+}
+
+SCAN_REGION_FOLDER_PLAN_TO_BE_PREPARED = {
+ "loglevel": "INFO",
+ "inventory_only": False,
+ "links_only": False,
+ "cliques_only": False,
+ "clear": True,
+ "object_type": "regions_folder",
+ "env": UNIT_TESTS_ENV,
+ "id": UNIT_TESTS_ENV + "-regions",
+ "parent_id": UNIT_TESTS_ENV,
+ "parent_type": "environment",
+ "type_to_scan": "regions_folder",
+ "id_field": "id",
+ "scan_self": False,
+ "type": "regions_folder"
+}
+
+SCAN_REGION_PLAN_TO_BE_PREPARED = {
+ "loglevel": "INFO",
+ "inventory_only": False,
+ "links_only": False,
+ "cliques_only": False,
+ "clear": True,
+ "object_type": "region",
+ "env": UNIT_TESTS_ENV,
+ "id": "RegionOne",
+ "parent_id": UNIT_TESTS_ENV + "-regions",
+ "parent_type": "regions_folder",
+ "type_to_scan": "region",
+ "id_field": "id",
+ "scan_self": False,
+ "type": "region",
+}
+
+SCANNER_TYPE_FOR_REGION = "ScanRegionsRoot"
+OBJ_ID_FOR_REGION = UNIT_TESTS_ENV + "-regions"
+CHILD_TYPE_FOR_REGION = "region"
+CHILD_ID_FOR_REGION = "RegionOne"
+
+REGIONS_FOLDER = {
+ "id": OBJ_ID_FOR_REGION,
+ "type": "regions_folder",
+ "parent_type": "environment",
+ "object_name": "Regions",
+ "parent_id": UNIT_TESTS_ENV,
+ "name": "Regions",
+ "create_object": True,
+ "text": "Regions"
+}
+
+SCAN_PROJECT_FOLDER_PLAN_TO_BE_PREPARED = {
+ "loglevel": "INFO",
+ "inventory_only": False,
+ "links_only": False,
+ "cliques_only": False,
+ "clear": True,
+ "object_type": "projects_folder",
+ "env": UNIT_TESTS_ENV,
+ "object_id": UNIT_TESTS_ENV + "-projects",
+ "parent_id": UNIT_TESTS_ENV,
+ "type_to_scan": "project",
+ "id_field": "id",
+ "scan_self": False,
+ "child_type": "regions_folder",
+ "child_id": None
+}
+
+SCANNER_CLASS_FOR_REGION_FOLDER = "ScanEnvironment"
+OBJ_ID_FOR_REGION_FOLDER = UNIT_TESTS_ENV
+CHILD_TYPE_FOR_REGION_FOLDER = "regions_folder"
+CHILD_ID_FOR_REGION_FOLDER = UNIT_TESTS_ENV + "-regions"
+
+DEFAULT_COMMAND_ARGS = ["scanner.py"]
+
+SHORT_COMMAND_ARGS = ["scanner.py", "-m", "mongo_config_file",
+ "-e", UNIT_TESTS_ENV, "-t", "project",
+ "-y", UNIT_TESTS_INVENTORY, "-s", "-i", "admin",
+ "-p", "RegionOne", "-a", "Region", "-f", "name",
+ "-l", "ERROR"]
+
+LONG_COMMAND_ARGS = [
+ "scanner.py", "--mongo_config", "mongo_config_file",
+ "--env", UNIT_TESTS_ENV, "--type", "project",
+ "--inventory", UNIT_TESTS_INVENTORY, "--scan_self", "--id", "admin",
+ "--parent_id", "RegionOne", "--parent_type", "Region",
+ "--id_field", "name", "--loglevel", "ERROR",
+ "--clear"]
+
+LONG_COMMAND_ARGS_CLEAR_ALL = [
+ "scanner.py", "--mongo_config", "mongo_config_file",
+ "--env", UNIT_TESTS_ENV, "--type", "project",
+ "--inventory", UNIT_TESTS_INVENTORY, "--scan_self", "--id", "admin",
+ "--parent_id", "RegionOne", "--parent_type", "Region",
+ "--id_field", "name", "--loglevel", "ERROR",
+ "--clear_all"]
+
+LONG_COMMAND_ARGS_INVENTORY_ONLY = [
+ "scanner.py", "--mongo_config", "mongo_config_file",
+ "--env", UNIT_TESTS_ENV, "--type", "project",
+ "--inventory", UNIT_TESTS_INVENTORY, "--scan_self", "--id", "admin",
+ "--parent_id", "RegionOne", "--parent_type", "Region",
+ "--id_field", "name", "--loglevel", "ERROR", "--inventory_only",
+ "--clear"]
+
+LONG_COMMAND_ARGS_LINKS_ONLY = [
+ "scanner.py", "--mongo_config", "mongo_config_file",
+ "--env", UNIT_TESTS_ENV, "--type", "project",
+ "--inventory", UNIT_TESTS_INVENTORY, "--scan_self", "--id", "admin",
+ "--parent_id", "RegionOne", "--parent_type", "Region",
+ "--id_field", "name", "--loglevel", "ERROR", "--links_only",
+ "--clear"]
+
+LONG_COMMAND_ARGS_CLIQUES_ONLY = [
+ "scanner.py", "--mongo_config", "mongo_config_file",
+ "--env", UNIT_TESTS_ENV, "--type", "project",
+ "--inventory", UNIT_TESTS_INVENTORY, "--scan_self", "--id", "admin",
+ "--parent_id", "RegionOne", "--parent_type", "Region",
+ "--id_field", "name", "--loglevel", "ERROR", "--cliques_only",
+ "--clear"]
+
diff --git a/app/test/scan/test_data/scanner.py b/app/test/scan/test_data/scanner.py
new file mode 100644
index 0000000..cebeca2
--- /dev/null
+++ b/app/test/scan/test_data/scanner.py
@@ -0,0 +1,355 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+import queue
+from discover.fetchers.folder_fetcher import FolderFetcher
+
+
+SCANNER_TYPE_FOR_ENV = "ScanEnvironment"
+
+METADATA = {
+ "scanners_package": "discover",
+ "scanners": {}
+}
+
+TYPE_TO_FETCH = {
+ "type": "pnic",
+ "fetcher": "CliFetchHostPnicsVpp",
+ "environment_condition": {"mechanism_drivers": "OVS"},
+ "children_scanner": "ScanOteps"
+}
+
+TYPE_TO_FETCH_WITH_WRONG_ENVIRONMENT_CONDITION = {
+ "type": "pnic",
+ "fetcher": "CliFetchHostPnicsVpp",
+ "environment_condition": {"mechanism_drivers": "VPP"},
+ "children_scanner": "ScanOteps"
+}
+
+TYPE_TO_FETCH_WITH_ERROR_VALUE = {
+ "environment_condition": {
+ "distribution": "Mirantis-7.0"
+ }
+}
+
+TYPE_TO_FETCH_WITHOUT_ENV_CON = {
+ "type": "pnic",
+ "fetcher": "CliFetchHostPnicsVpp",
+ "children_scanner": "ScanOteps"
+}
+
+TYPES_TO_FETCH = [
+ {
+ "type": "ports_folder",
+ "fetcher": FolderFetcher("ports", "network")
+ },
+ {
+ "type": "network_services_folder",
+ "fetcher": FolderFetcher("network_services", "network", "Network vServices")
+ }
+]
+
+ID_FIELD = "id"
+
+PROJECT1 = {
+ "object": {
+ "description": "",
+ "enabled": True,
+ "id": "75c0eb79ff4a42b0ae4973c8375ddf40",
+ "name": "OSDNA-project"
+ },
+ "child_id_field": ID_FIELD,
+ "scanner": "ScanProject"
+}
+
+PROJECT2 = {
+ "object": {
+ "description": "admin tenant",
+ "enabled": True,
+ "id": "8c1751e0ce714736a63fee3c776164da",
+ "name": "admin"
+ },
+ "child_id_field": ID_FIELD,
+ "scanner": "ScanProject"
+}
+
+SCAN_QUEUE = queue.Queue()
+SCAN_QUEUE.put(PROJECT1)
+SCAN_QUEUE.put(PROJECT2)
+QUEUE_SIZE = 2
+
+LIMIT_TO_CHILD_TYPE = "ports_folder"
+
+CONFIGURATIONS = {
+ "configuration": [
+ {
+ "mock": "True",
+ "host": "10.56.20.239",
+ "name": "mysql",
+ "password": "102QreDdiD5sKcvNf9qbHrmr",
+ "port": 3307.0,
+ "user": "root",
+ "schema": "nova"
+ },
+ {
+ "name": "OpenStack",
+ "host": "10.56.20.239",
+ "admin_token": "38MUh19YWcgQQUlk2VEFQ7Ec",
+ "port": "5000",
+ "user": "admin",
+ "pwd": "admin"
+ },
+ {
+ "host": "10.56.20.239",
+ "key": "/Users/ngrandhi/.ssh/id_rsa",
+ "name": "CLI",
+ "pwd": "",
+ "user": "root"
+ },
+ {
+ "name": "AMQP",
+ "host": "10.56.20.239",
+ "port": "5673",
+ "user": "nova",
+ "password": "NF2nSv3SisooxPkCTr8fbfOa"
+ }
+ ],
+ "distribution": "Mirantis-8.0",
+ "last_scanned:": "5/8/16",
+ "name": "Mirantis-Liberty-Nvn",
+ "mechanism_drivers": [
+ "OVS"
+ ],
+ "operational": "yes",
+ "type": "environment"
+}
+
+TYPES_TO_FETCHES_FOR_PNIC = {
+ "type": "pnic",
+ "fetcher": "CliFetchHostPnicsVpp",
+ "environment_condition": {"mechanism_drivers": "VPP"},
+ "children_scanner": "ScanOteps"
+}
+
+TYPES_TO_FETCHES_FOR_PNIC_WITHOUT_ENV_CON = {
+ "type": "pnic",
+ "fetcher": "CliFetchHostPnicsVpp",
+ "children_scanner": "ScanOteps"
+}
+
+TYPES_TO_FETCHES_FOR_SCAN_AGGREGATE = [{
+ "type": "host_ref",
+ "fetcher": "DbFetchAggregateHosts"
+}]
+
+
+
+
+# id = 'RegionOne-aggregates'
+# obj = self.inv.get_by_id(self.env, id)
+obj = {'id': 'Mirantis-Liberty-Nvn'}
+id_field = 'id'
+child_id = '',
+child_type = ''
+
+
+child_data = [
+ {
+ 'id_path': '/Mirantis-Liberty-Nvn/Mirantis-Liberty-Nvn-regions',
+ 'object_name': 'Regions',
+ 'parent_id': 'Mirantis-Liberty-Nvn',
+ 'environment': 'Mirantis-Liberty-Nvn',
+ 'id': 'Mirantis-Liberty-Nvn-regions',
+ 'show_in_tree': True,
+ 'text': 'Regions',
+ 'type': 'regions_folder',
+ 'name': 'Regions',
+ 'create_object': True,
+ 'name_path': '/Mirantis-Liberty-Nvn/Regions',
+ 'parent_type': 'environment'
+ }
+]
+
+PARENT = {
+ "environment" : "Mirantis-Liberty-Xiaocong",
+ "id" : "node-6.cisco.com-vservices-dhcps",
+ "name" : "node-6.cisco.com-vservices-dhcps",
+ "object_name" : "DHCP servers",
+ "parent_id" : "node-6.cisco.com-vservices",
+ "parent_type" : "vservices_folder",
+ "show_in_tree" : True,
+ "text" : "DHCP servers",
+ "type" : "vservice_dhcps_folder"
+}
+
+PARENT_WITHOUT_ID = {
+ 'id': ''
+}
+
+TYPE_TO_FETCH_FOR_ENVIRONMENT = {
+ "type": "regions_folder",
+ "fetcher": FolderFetcher("regions", "environment"),
+ "children_scanner": "ScanRegionsRoot"
+}
+
+TYPE_TO_FETCH_FOR_ENV_WITHOUT_CHILDREN_FETCHER = {
+ "type": "regions_folder",
+ "fetcher": FolderFetcher("regions", "environment")
+}
+
+DB_RESULTS_WITH_CREATE_OBJECT = [
+ {
+ "name": "Mirantis-Liberty-Xiaocong-regions",
+ "parent_type": "environment",
+ "parent_id": "Mirantis-Liberty-Xiaocong",
+ "text": "Regions",
+ "create_object": True,
+ "type": "regions_folder",
+ "id": "Mirantis-Liberty-Xiaocong-regions"
+ }
+]
+
+DB_RESULTS_WITHOUT_CREATE_OBJECT = [
+ {
+ "name": "Mirantis-Liberty-Xiaocong-regions",
+ "parent_type": "environment",
+ "parent_id": "Mirantis-Liberty-Xiaocong",
+ "text": "Regions",
+ "create_object": False,
+ "type": "regions_folder",
+ "id": "Mirantis-Liberty-Xiaocong-regions"
+ }
+]
+
+DB_RESULTS_WITH_PROJECT = [
+ {
+ "name": "Mirantis-Liberty-Xiaocong-regions",
+ "parent_type": "environment",
+ "parent_id": "Mirantis-Liberty-Xiaocong",
+ "text": "Regions", "create_object": True,
+ "type": "regions_folder",
+ "id": "Mirantis-Liberty-Xiaocong-regions",
+ "in_project-OSDNA-project": "1",
+ }
+]
+
+PROJECT_KEY = "in_project-OSDNA-project"
+
+DB_RESULTS_WITH_MASTER_PARENT_IN_DB = [
+ {
+ "host": "node-6.cisco.com",
+ "id": "qdhcp-413de095-01ed-49dc-aa50-4479f43d390e",
+ "local_service_id": "qdhcp-413de095-01ed-49dc-aa50-4479f43d390e",
+ "master_parent_id": "node-6.cisco.com-vservices",
+ "master_parent_type": "vservices_folder",
+ "name": "dhcp-aiya",
+ "parent_id": "node-6.cisco.com-vservices-dhcps",
+ "parent_text": "DHCP servers",
+ "parent_type": "vservice_dhcps_folder",
+ "service_type": "dhcp"
+ }
+]
+
+DB_RESULTS_WITHOUT_MASTER_PARENT_IN_DB = [
+ {
+ "host": "node-6.cisco.com",
+ "id": "qdhcp-413de095-01ed-49dc-aa50-4479f43d390e",
+ "local_service_id": "qdhcp-413de095-01ed-49dc-aa50-4479f43d390e",
+ "master_parent_id": "node-6.cisco.com-vservices",
+ "master_parent_type": "vservices_folder",
+ "name": "dhcp-aiya",
+ "parent_id": "node-6.cisco.com-vservices-dhcps",
+ "parent_text": "DHCP servers",
+ "parent_type": "vservice_dhcps_folder",
+ "service_type": "dhcp"
+ }
+]
+
+
+DICTIONARY_DB_RESULTS = {
+ "name": "Mirantis-Liberty-Xiaocong-regions",
+ "parent_type": "environment",
+ "parent_id": "Mirantis-Liberty-Xiaocong",
+ "text": "Regions", "create_object": True,
+ "type": "regions_folder",
+ "id": "Mirantis-Liberty-Xiaocong-regions"
+}
+
+MASTER_PARENT = {
+ "create_object" : True,
+ "environment" : "Mirantis-Liberty-Xiaocong",
+ "id" : "node-6.cisco.com-vservices",
+ "id_path" : "/Mirantis-Liberty/Mirantis-Liberty-regions/RegionOne/RegionOne-availability_zones/internal/node-6.cisco.com/node-6.cisco.com-vservices",
+ "name" : "Vservices",
+ "name_path" : "/Mirantis-Liberty/Regions/RegionOne/Availability Zones/internal/node-6.cisco.com/Vservices",
+ "object_name" : "Vservices",
+ "parent_id" : "node-6.cisco.com",
+ "parent_type" : "host",
+ "show_in_tree" : True,
+ "text" : "Vservices",
+ "type" : "vservices_folder"
+}
+
+CONFIGURATIONS_WITHOUT_MECHANISM_DRIVERS = {
+ "configuration": [
+ {
+ "mock": "True",
+ "host": "10.56.20.239",
+ "name": "mysql",
+ "password": "102QreDdiD5sKcvNf9qbHrmr",
+ "port": 3307.0,
+ "user": "root",
+ "schema": "nova"
+ },
+ {
+ "name": "OpenStack",
+ "host": "10.56.20.239",
+ "admin_token": "38MUh19YWcgQQUlk2VEFQ7Ec",
+ "port": "5000",
+ "user": "admin",
+ "pwd": "admin"
+ },
+ {
+ "host": "10.56.20.239",
+ "key": "/Users/ngrandhi/.ssh/id_rsa",
+ "name": "CLI",
+ "pwd": "",
+ "user": "root"
+ },
+ {
+ "name": "AMQP",
+ "host": "10.56.20.239",
+ "port": "5673",
+ "user": "nova",
+ "password": "NF2nSv3SisooxPkCTr8fbfOa"
+ }
+ ],
+ "distribution": "Mirantis-8.0",
+ "last_scanned:": "5/8/16",
+ "name": "Mirantis-Liberty-Nvn",
+ "operational": "yes",
+ "type": "environment"
+}
+
+SCAN_TYPE_RESULTS = [
+ {
+ "description": "",
+ "enabled": True,
+ "id": "75c0eb79ff4a42b0ae4973c8375ddf40",
+ "name": "OSDNA-project"
+ },
+ {
+ "description": "admin tenant",
+ "enabled": True,
+ "id": "8c1751e0ce714736a63fee3c776164da",
+ "name": "admin"
+ }
+]
+
+LIMIT_TO_CHILD_ID = "75c0eb79ff4a42b0ae4973c8375ddf40"
diff --git a/app/test/scan/test_scan.py b/app/test/scan/test_scan.py
new file mode 100644
index 0000000..a01083e
--- /dev/null
+++ b/app/test/scan/test_scan.py
@@ -0,0 +1,46 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+import unittest
+from unittest.mock import MagicMock
+
+from discover.configuration import Configuration
+from monitoring.setup.monitoring_setup_manager import MonitoringSetupManager
+from test.scan.config.test_config \
+ import MONGODB_CONFIG, ENV_CONFIG, COLLECTION_CONFIG
+from test.scan.test_data.configurations import CONFIGURATIONS
+from utils.inventory_mgr import InventoryMgr
+from utils.mongo_access import MongoAccess
+from utils.logging.full_logger import FullLogger
+
+
+class TestScan(unittest.TestCase):
+
+ def configure_environment(self):
+ self.env = ENV_CONFIG
+ self.inventory_collection = COLLECTION_CONFIG
+ # mock the mongo access
+ MongoAccess.mongo_connect = MagicMock()
+ MongoAccess.db = MagicMock()
+ # mock log
+ FullLogger.info = MagicMock()
+
+ self.conf = Configuration()
+ self.conf.use_env = MagicMock()
+ self.conf.environment = CONFIGURATIONS
+ self.conf.configuration = CONFIGURATIONS["configuration"]
+
+ self.inv = InventoryMgr()
+ self.inv.clear = MagicMock()
+ self.inv.set_collections(self.inventory_collection)
+
+ MonitoringSetupManager.server_setup = MagicMock()
+
+ def setUp(self):
+ self.configure_environment()
diff --git a/app/test/scan/test_scan_controller.py b/app/test/scan/test_scan_controller.py
new file mode 100644
index 0000000..f3bcc9a
--- /dev/null
+++ b/app/test/scan/test_scan_controller.py
@@ -0,0 +1,215 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+import sys
+from unittest.mock import MagicMock
+
+from discover.scan import ScanController, ScanPlan
+from discover.scanner import Scanner
+from test.scan.test_scan import TestScan
+from test.scan.test_data.scan import *
+from utils.inventory_mgr import InventoryMgr
+
+
+class TestScanController(TestScan):
+
+ def setUp(self):
+ super().setUp()
+ self.scan_controller = ScanController()
+
+ def arg_validate(self, args, expected, key, err=None):
+ if key not in expected:
+ return
+ err = err if err else 'The value of {} is wrong'.format(key)
+ self.assertEqual(args.get(key, None), expected[key.upper()], err)
+
+ def check_args_values(self, args, expected):
+ self.arg_validate(args, expected, 'env',
+ 'The value of environment is wrong')
+ keys = ['mongo_config', 'mongo_config', 'type', 'inventory',
+ 'scan_self', 'id', 'parent_id', 'parent_type', 'id_field',
+ 'loglevel', 'inventory_only', 'links_only', 'cliques_only',
+ 'clear']
+ for key in keys:
+ self.arg_validate(args, expected, key)
+
+ def test_get_args_with_default_arguments(self):
+ sys.argv = DEFAULT_COMMAND_ARGS
+ args = self.scan_controller.get_args()
+ # check the default value of each argument
+ self.check_args_values(args, DEFAULT_ARGUMENTS)
+
+ def test_get_args_with_short_command_args(self):
+ sys.argv = SHORT_COMMAND_ARGS
+ args = self.scan_controller.get_args()
+ # check the value parsed by short arguments
+ self.check_args_values(args, SHORT_FLAGS_ARGUMENTS)
+
+ def test_get_args_with_full_command_args(self):
+ sys.argv = LONG_COMMAND_ARGS
+ args = self.scan_controller.get_args()
+ # check the value parsed by long arguments
+ self.check_args_values(args, ARGUMENTS_FULL)
+
+ def test_get_args_with_full_command_args_clear_all(self):
+ sys.argv = LONG_COMMAND_ARGS_CLEAR_ALL
+ args = self.scan_controller.get_args()
+ # check the value parsed by long arguments
+ self.check_args_values(args, ARGUMENTS_FULL_CLEAR_ALL)
+
+ def test_get_args_with_full_command_args_inventory_only(self):
+ sys.argv = LONG_COMMAND_ARGS_INVENTORY_ONLY
+ args = self.scan_controller.get_args()
+ # check the value parsed by long arguments
+ self.check_args_values(args, ARGUMENTS_FULL_INVENTORY_ONLY)
+
+ def test_get_args_with_full_command_args_links_only(self):
+ sys.argv = LONG_COMMAND_ARGS_LINKS_ONLY
+ args = self.scan_controller.get_args()
+ # check the value parsed by long arguments
+ self.check_args_values(args, ARGUMENTS_FULL_LINKS_ONLY)
+
+ def test_get_args_with_full_command_args_cliques_only(self):
+ sys.argv = LONG_COMMAND_ARGS_CLIQUES_ONLY
+ args = self.scan_controller.get_args()
+ # check the value parsed by long arguments
+ self.check_args_values(args, ARGUMENTS_FULL_CLIQUES_ONLY)
+
+ def side_effect(self, key, default):
+ if key in FORM.keys():
+ return FORM[key]
+ else:
+ return default
+
+ def check_plan_values(self, plan, scanner_type, obj_id,
+ child_type, child_id):
+ self.assertEqual(scanner_type, plan.scanner_type,
+ 'The scanner class is wrong')
+ self.assertEqual(child_type, plan.child_type,
+ 'The child type is wrong')
+ self.assertEqual(child_id, plan.child_id,
+ 'The child id is wrong')
+ self.assertEqual(obj_id, plan.object_id, 'The object is wrong')
+
+ def test_prepare_scan_plan(self):
+ scan_plan = ScanPlan(SCAN_ENV_PLAN_TO_BE_PREPARED)
+ plan = self.scan_controller.prepare_scan_plan(scan_plan)
+ self.check_plan_values(plan, SCANNER_TYPE_FOR_ENV,
+ OBJ_ID_FOR_ENV, CHILD_TYPE_FOR_ENV,
+ CHILD_ID_FOR_ENV)
+
+ def test_prepare_scan_region_plan(self):
+ original_get_by_id = self.inv.get_by_id
+ self.inv.get_by_id = MagicMock(return_value=REGIONS_FOLDER)
+
+ self.scan_controller.inv = self.inv
+ scan_plan = ScanPlan(SCAN_REGION_PLAN_TO_BE_PREPARED)
+ plan = self.scan_controller.prepare_scan_plan(scan_plan)
+
+ self.check_plan_values(plan, SCANNER_TYPE_FOR_REGION,
+ OBJ_ID_FOR_REGION, CHILD_TYPE_FOR_REGION,
+ CHILD_ID_FOR_REGION)
+ self.inv.get_by_id = original_get_by_id
+
+ def test_prepare_scan_region_folder_plan(self):
+ scan_plan = ScanPlan(SCAN_REGION_FOLDER_PLAN_TO_BE_PREPARED)
+ plan = self.scan_controller.prepare_scan_plan(scan_plan)
+ self.check_plan_values(plan, SCANNER_CLASS_FOR_REGION_FOLDER,
+ OBJ_ID_FOR_REGION_FOLDER,
+ CHILD_TYPE_FOR_REGION_FOLDER,
+ CHILD_ID_FOR_REGION_FOLDER)
+
+ def check_scan_method_calls(self, mock, count):
+ if count:
+ self.assertTrue(mock.called)
+ else:
+ mock.assert_not_called()
+
+ def check_scan_counts(self, run_scan_count, scan_links_count,
+ scan_cliques_count, deploy_monitoring_setup_count):
+ self.check_scan_method_calls(Scanner.scan, run_scan_count)
+ self.check_scan_method_calls(Scanner.scan_links, scan_links_count)
+ self.check_scan_method_calls(Scanner.scan_cliques, scan_cliques_count)
+ self.check_scan_method_calls(Scanner.deploy_monitoring_setup,
+ deploy_monitoring_setup_count)
+
+ def prepare_scan_mocks(self):
+ self.load_metadata = Scanner.load_metadata
+ self.scan = Scanner.scan
+ self.scan_links = Scanner.scan_links
+ self.scan_cliques = Scanner.scan_cliques
+ self.deploy_monitoring_setup = Scanner.deploy_monitoring_setup
+
+ Scanner.load_metadata = MagicMock()
+ Scanner.scan = MagicMock()
+ Scanner.scan_links = MagicMock()
+ Scanner.scan_cliques = MagicMock()
+ Scanner.deploy_monitoring_setup = MagicMock()
+
+ def reset_methods(self):
+ Scanner.load_metadata = self.load_metadata
+ Scanner.scan = self.scan
+ Scanner.scan_links = self.scan_links
+ Scanner.scan_cliques = self.scan_cliques
+ Scanner.deploy_monitoring_setup = self.deploy_monitoring_setup
+
+ def test_scan(self):
+ self.scan_controller.get_args = MagicMock()
+ InventoryMgr.is_feature_supported = MagicMock(return_value=False)
+ plan = self.scan_controller.prepare_scan_plan(ScanPlan(SCAN_ENV_PLAN_TO_BE_PREPARED))
+ self.scan_controller.get_scan_plan = MagicMock(return_value=plan)
+ self.prepare_scan_mocks()
+
+ self.scan_controller.run()
+ self.check_scan_counts(1, 1, 1, 0)
+ self.reset_methods()
+
+ def test_scan_with_monitoring_setup(self):
+ self.scan_controller.get_args = MagicMock()
+ InventoryMgr.is_feature_supported = MagicMock(return_value=True)
+ plan = self.scan_controller.prepare_scan_plan(ScanPlan(SCAN_ENV_PLAN_TO_BE_PREPARED))
+ self.scan_controller.get_scan_plan = MagicMock(return_value=plan)
+ self.prepare_scan_mocks()
+
+ self.scan_controller.run()
+ self.check_scan_counts(1, 1, 1, 1)
+ self.reset_methods()
+
+ def test_scan_with_inventory_only(self):
+ self.scan_controller.get_args = MagicMock()
+ scan_plan = ScanPlan(SCAN_ENV_INVENTORY_ONLY_PLAN_TO_BE_PREPARED)
+ plan = self.scan_controller.prepare_scan_plan(scan_plan)
+ self.scan_controller.get_scan_plan = MagicMock(return_value=plan)
+ self.prepare_scan_mocks()
+
+ self.scan_controller.run()
+ self.check_scan_counts(1, 0, 0, 0)
+ self.reset_methods()
+
+ def test_scan_with_links_only(self):
+ self.scan_controller.get_args = MagicMock()
+ scan_plan = ScanPlan(SCAN_ENV_LINKS_ONLY_PLAN_TO_BE_PREPARED)
+ plan = self.scan_controller.prepare_scan_plan(scan_plan)
+ self.scan_controller.get_scan_plan = MagicMock(return_value=plan)
+ self.prepare_scan_mocks()
+
+ self.scan_controller.run()
+ self.check_scan_counts(0, 1, 0, 0)
+ self.reset_methods()
+
+ def test_scan_with_cliques_only(self):
+ self.scan_controller.get_args = MagicMock()
+ scan_plan = ScanPlan(SCAN_ENV_CLIQUES_ONLY_PLAN_TO_BE_PREPARED)
+ plan = self.scan_controller.prepare_scan_plan(scan_plan)
+ self.scan_controller.get_scan_plan = MagicMock(return_value=plan)
+ self.prepare_scan_mocks()
+
+ self.scan_controller.run()
+ self.check_scan_counts(0, 0, 1, 0)
+ self.reset_methods()
diff --git a/app/test/scan/test_scan_metadata_parser.py b/app/test/scan/test_scan_metadata_parser.py
new file mode 100644
index 0000000..91c11ef
--- /dev/null
+++ b/app/test/scan/test_scan_metadata_parser.py
@@ -0,0 +1,152 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+from discover.fetchers.db.db_access import DbAccess
+from discover.scan_metadata_parser import ScanMetadataParser
+from test.scan.test_scan import TestScan
+from test.scan.test_data.metadata import *
+from unittest import mock
+from utils.mongo_access import MongoAccess
+
+
+SCANNERS_FILE = 'scanners.json'
+
+JSON_REQUIRED_FIELDS_ERROR = 'Metadata json should contain all the ' + \
+ 'following fields: scanners_package, scanners'
+
+JSON_NO_SCANNERS = 'no scanners found in scanners list'
+JSON_ERRORS_FOUND = 'Errors encountered during metadata file parsing:\n'
+
+
+class TestScanMetadataParser(TestScan):
+ def setUp(self):
+ super().setUp()
+ DbAccess.conn = mock.MagicMock()
+ self.prepare_constants()
+ self.parser = ScanMetadataParser(self.inv)
+
+ self.parser.check_metadata_file_ok = mock.MagicMock()
+
+ def prepare_metadata(self, content):
+ self.parser._load_json_file = mock.MagicMock(return_value=content)
+
+ def prepare_constants(self):
+ MongoAccess.db = mock.MagicMock()
+ MongoAccess.db["constants"].find_one = mock.MagicMock(side_effect=
+ lambda input:
+ CONSTANTS[input["name"]]
+ if CONSTANTS.get(input["name"])
+ else []
+ )
+
+ def handle_error_scenario(self, input_content, expected_error,
+ add_errors_encountered_pretext=True):
+ self.prepare_metadata(input_content)
+ found_exception = False
+ expected_message = expected_error
+ metadata = None
+ try:
+ metadata = self.parser.parse_metadata_file(SCANNERS_FILE)
+ except ValueError as e:
+ found_exception = True
+ expected_message = expected_error \
+ if not add_errors_encountered_pretext \
+ else JSON_ERRORS_FOUND + expected_error
+ self.assertEqual(str(e), expected_message)
+ self.assertTrue(found_exception,
+ 'failed to throw exception, expected_message: {}'
+ .format(expected_message))
+ self.assertIsNone(metadata)
+
+ def handle_json_missing_field(self, json_content):
+ self.handle_error_scenario(json_content, JSON_REQUIRED_FIELDS_ERROR,
+ add_errors_encountered_pretext=False)
+
+ def test_missing_field(self):
+ for content in [METADATA_EMPTY, METADATA_NO_PACKAGE,
+ METADATA_NO_SCANNERS]:
+ self.handle_json_missing_field(content)
+
+ def test_json_no_scanners(self):
+ self.handle_error_scenario(METADATA_ZERO_SCANNERS, JSON_NO_SCANNERS)
+
+ def test_json_scanner_errors(self):
+ errors_scenarios = [
+ {
+ 'input': METADATA_ZERO_SCANNERS,
+ 'msg': JSON_NO_SCANNERS
+ },
+ {
+ 'input': METADATA_SCANNER_UNKNOWN_ATTRIBUTE,
+ 'msg': 'unknown attribute xyz in scanner ScanAggregate, type #1'
+ },
+ {
+ 'input': METADATA_SCANNER_NO_TYPE,
+ 'msg': 'scanner ScanAggregate, type #1: ' +
+ 'missing attribute "type"'
+ },
+ {
+ 'input': METADATA_SCANNER_NO_FETCHER,
+ 'msg': 'scanner ScanAggregate, type #1: ' +
+ 'missing attribute "fetcher"'
+ },
+ {
+ 'input': METADATA_SCANNER_INCORRECT_TYPE,
+ 'msg': 'scanner ScanAggregate: value not in types: t1'
+ },
+ {
+ 'input': METADATA_SCANNER_INCORRECT_FETCHER,
+ 'msg': 'failed to find fetcher class f1 '
+ 'in scanner ScanAggregate type #1'
+ },
+ {
+ 'input': METADATA_SCANNER_WITH_INCORRECT_CHILD,
+ 'msg': 'scanner ScanAggregatesRoot type #1: '
+ 'children_scanner must be a string'
+ },
+ {
+ 'input': METADATA_SCANNER_WITH_MISSING_CHILD,
+ 'msg': 'scanner ScanAggregatesRoot type #1: '
+ 'children_scanner ScanAggregate not found '
+ },
+ {
+ 'input': METADATA_SCANNER_FETCHER_INVALID_DICT,
+ 'msg': 'scanner ScanEnvironment type #1: '
+ 'only folder dict accepted in fetcher'
+ },
+ {
+ 'input': METADATA_SCANNER_WITH_INVALID_CONDITION,
+ 'msg': 'scanner ScanHost type #1: condition must be dict'
+ }
+ ]
+ for scenario in errors_scenarios:
+ self.handle_error_scenario(scenario['input'], scenario['msg'])
+
+ def check_json_is_ok(self, json_content):
+ self.prepare_metadata(json_content)
+ found_exception = False
+ metadata = None
+ msg = None
+ try:
+ metadata = self.parser.parse_metadata_file(SCANNERS_FILE)
+ except ValueError as e:
+ found_exception = True
+ msg = str(e)
+ self.assertFalse(found_exception, 'Exception: {}'.format(msg))
+ self.assertIsNotNone(metadata)
+
+ def test_json_valid_content(self):
+ valid_content = [
+ METADATA_SIMPLE_SCANNER,
+ METADATA_SCANNER_WITH_CHILD,
+ METADATA_SCANNER_WITH_FOLDER,
+ METADATA_SCANNER_WITH_CONDITION
+ ]
+ for content in valid_content:
+ self.check_json_is_ok(content)
diff --git a/app/test/scan/test_scanner.py b/app/test/scan/test_scanner.py
new file mode 100644
index 0000000..4a7536e
--- /dev/null
+++ b/app/test/scan/test_scanner.py
@@ -0,0 +1,355 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+from discover.scanner import Scanner
+from test.scan.test_scan import TestScan
+from unittest.mock import MagicMock, patch
+from discover.scan_metadata_parser import ScanMetadataParser
+from test.scan.test_data.scanner import *
+from monitoring.setup.monitoring_setup_manager import MonitoringSetupManager
+
+
+class TestScanner(TestScan):
+
+ def setUp(self):
+ super().setUp()
+ ScanMetadataParser.parse_metadata_file = MagicMock(return_value=METADATA)
+ self.scanner = Scanner()
+ self.scanner.set_env(self.env)
+ MonitoringSetupManager.create_setup = MagicMock()
+ self.scanner.inv.monitoring_setup_manager = \
+ MonitoringSetupManager(self.env)
+
+ def test_check_type_env_without_environment_condition(self):
+ result = self.scanner.check_type_env(TYPE_TO_FETCH_WITHOUT_ENV_CON)
+
+ self.assertEqual(result, True,
+ "Can't get true when the type_to_fetch " +
+ "doesn't contain environment condition")
+
+ def test_check_type_with_error_value(self):
+ # store original method
+ original_get_env_config = self.scanner.config.get_env_config
+
+ # mock get_env_config method
+ self.scanner.config.get_env_config =\
+ MagicMock(return_value=CONFIGURATIONS)
+
+ result = self.scanner.check_type_env(TYPE_TO_FETCH_WITH_ERROR_VALUE)
+
+ # reset get_env_config method
+ self.scanner.config.get_env_config = original_get_env_config
+
+ self.assertEqual(result, False,
+ "Can't get false when the type_to_fetch " +
+ "contain error value")
+
+ def test_check_type_env_without_mechanism_drivers_in_env_config(self):
+ # store original method
+ original_get_env_config = self.scanner.config.get_env_config
+
+ # mock get_env_config_method
+ self.scanner.config.get_env_config =\
+ MagicMock(return_value=CONFIGURATIONS_WITHOUT_MECHANISM_DRIVERS)
+
+ result = self.scanner.check_type_env(TYPE_TO_FETCH)
+ # reset get_env_config method
+ self.scanner.check_type_env = original_get_env_config
+
+ self.assertEqual(result, False,
+ "Can't get false when configuration " +
+ "doesn't contain mechanism drivers")
+
+ def test_check_type_env_with_wrong_mech_drivers_in_env_condition(self):
+ # store original method
+ original_get_env_config = self.scanner.config.get_env_config
+
+ # mock get_env_config_method
+ self.scanner.config.get_env_config =\
+ MagicMock(return_value=CONFIGURATIONS)
+
+ result = self.scanner.\
+ check_type_env(TYPE_TO_FETCH_WITH_WRONG_ENVIRONMENT_CONDITION)
+ # reset get_env_config method
+ self.scanner.check_type_env = original_get_env_config
+
+ self.assertEqual(result, False, "Can't get false when the mechanism " +
+ "drivers in type_to_fetch " +
+ "don't exist in configurations")
+
+ def test_check_type_env(self):
+ # store original method
+ original_get_env_config = self.scanner.config.get_env_config
+
+ # mock method
+ self.scanner.config.get_env_config =\
+ MagicMock(return_value=CONFIGURATIONS)
+
+ result = self.scanner.check_type_env(TYPE_TO_FETCH)
+
+ # reset method
+ self.scanner.config.get_env_config = original_get_env_config
+
+ self.assertEqual(result, True,
+ "Can't get True when the type_to_fetch is correct")
+
+ def test_scan_error_type(self):
+ # store original method
+ original_check_type_env = self.scanner.check_type_env
+
+ # mock method
+ self.scanner.check_type_env = MagicMock(return_value=False)
+
+ result = self.scanner.scan_type(TYPE_TO_FETCH_FOR_ENVIRONMENT, PARENT,
+ ID_FIELD)
+
+ # reset method
+ self.scanner.check_type_env = original_check_type_env
+
+ self.assertEqual(result, [],
+ "Can't get [], when the type_to_fetch is wrong")
+
+ def test_scan_type_without_parent_id(self):
+ try:
+ self.scanner.scan_type(TYPE_TO_FETCH_FOR_ENVIRONMENT,
+ PARENT_WITHOUT_ID, ID_FIELD)
+ self.fail("Can't get error when the parent " +
+ "doesn't contain id attribute")
+ except:
+ pass
+
+ @patch("discover.fetchers.folder_fetcher.FolderFetcher.get")
+ def test_scan_type_with_get_exception(self, fetcher_get):
+ fetcher_get.side_effect = Exception("get exception")
+
+ try:
+ self.scanner.scan_type(TYPE_TO_FETCH_FOR_ENVIRONMENT,
+ PARENT, ID_FIELD)
+ self.fail("Can't get exception when fetcher.get throws an exception")
+ except:
+ pass
+
+ @patch("discover.fetchers.folder_fetcher.FolderFetcher.get")
+ def test_scan_type_without_master_parent(self, fetcher_get):
+ fetcher_get.return_value = DB_RESULTS_WITHOUT_MASTER_PARENT_IN_DB
+
+ # store original get_by_id
+ original_get_by_id = self.scanner.inv.get_by_id
+ original_set = self.scanner.inv.set
+
+ # mock methods
+ self.scanner.inv.get_by_id = MagicMock(return_value=[])
+
+ result = self.scanner.scan_type(TYPE_TO_FETCH_FOR_ENVIRONMENT, PARENT,
+ ID_FIELD)
+
+ # reset methods
+ self.scanner.inv.get_by_id = original_get_by_id
+ self.scanner.inv.set = original_set
+ self.assertEqual(result, [], "Can't get [], when the master parent " +
+ "doesn't exist in database")
+
+ @patch("discover.fetchers.folder_fetcher.FolderFetcher.get")
+ def test_scan_type_with_master_parent(self, fetcher_get):
+ fetcher_get.return_value = DB_RESULTS_WITH_MASTER_PARENT_IN_DB
+
+ # store original methods
+ original_get_by_id = self.scanner.inv.get_by_id
+ original_set = self.scanner.inv.set
+
+ # mock methods
+ self.scanner.inv.get_by_id = MagicMock(return_value=MASTER_PARENT)
+ self.scanner.inv.set = MagicMock()
+
+ self.scanner.scan_type(TYPE_TO_FETCH_FOR_ENVIRONMENT, PARENT, ID_FIELD)
+ self.assertEqual(self.scanner.inv.set.call_count, 2, "Can't create additional folder")
+ self.assertNotIn("master_parent_type", DB_RESULTS_WITH_MASTER_PARENT_IN_DB, "Can't delete the master_parent_type")
+ self.assertNotIn("master_parent_id", DB_RESULTS_WITH_MASTER_PARENT_IN_DB, "Can't delete the master_parent_id")
+
+ # reset methods
+ self.scanner.inv.get_by_id = original_get_by_id
+ self.scanner.inv.set = original_set
+
+ @patch("discover.fetchers.folder_fetcher.FolderFetcher.get")
+ def test_scan_type_with_in_project(self, fetcher_get):
+ fetcher_get.return_value = DB_RESULTS_WITH_PROJECT
+
+ # store original method
+ original_set = self.scanner.inv.set
+
+ # mock method
+ self.scanner.inv.set = MagicMock()
+
+ self.scanner.scan_type(TYPE_TO_FETCH_FOR_ENVIRONMENT, PARENT, ID_FIELD)
+ self.assertIn("projects", DB_RESULTS_WITH_PROJECT[0],
+ "Can't get the projects from DB result")
+ self.assertNotIn(PROJECT_KEY, DB_RESULTS_WITH_PROJECT[0],
+ "Can't delete the project key in the object")
+
+ self.scanner.inv.set = original_set
+
+ @patch("discover.fetchers.folder_fetcher.FolderFetcher.get")
+ def test_scan_type_without_create_object(self, fetcher_get):
+ fetcher_get.return_value = DB_RESULTS_WITHOUT_CREATE_OBJECT
+
+ original_set = self.scanner.inv.set
+
+ self.scanner.inv.set = MagicMock()
+ self.scanner.scan_type(TYPE_TO_FETCH_FOR_ENVIRONMENT, PARENT, ID_FIELD)
+
+ self.assertEqual(self.scanner.inv.set.call_count, 0,
+ "Set the object when the create object is false")
+
+ self.scanner.inv.set = original_set
+
+ @patch("discover.fetchers.folder_fetcher.FolderFetcher.get")
+ def test_scan_type_with_create_object(self, fetcher_get):
+ fetcher_get.return_value = DB_RESULTS_WITH_CREATE_OBJECT
+
+ original_set = self.scanner.inv.set
+
+ self.scanner.inv.set = MagicMock()
+ self.scanner.scan_type(TYPE_TO_FETCH_FOR_ENVIRONMENT, PARENT, ID_FIELD)
+
+ self.assertEqual(self.scanner.inv.set.call_count, 1,
+ "Set the object when the create object is false")
+
+ self.scanner.inv.set = original_set
+
+ @patch("discover.fetchers.folder_fetcher.FolderFetcher.get")
+ def test_scan_type_with_children_scanner(self, fetcher_get):
+ fetcher_get.return_value = DB_RESULTS_WITH_CREATE_OBJECT
+
+ original_set = self.scanner.inv.set
+ original_queue_for_scan = self.scanner.queue_for_scan
+
+ self.scanner.inv.set = MagicMock()
+ self.scanner.queue_for_scan = MagicMock()
+
+ self.scanner.scan_type(TYPE_TO_FETCH_FOR_ENVIRONMENT, PARENT, ID_FIELD)
+
+ self.assertEqual(self.scanner.queue_for_scan.call_count, 1,
+ "Can't put children scanner in the queue")
+
+ self.scanner.inv.set = original_set
+ self.scanner.queue_for_scan = original_queue_for_scan
+
+ @patch("discover.fetchers.folder_fetcher.FolderFetcher.get")
+ def test_scan_type_without_children_scanner(self, fetcher_get):
+ fetcher_get.return_value = DB_RESULTS_WITH_CREATE_OBJECT
+
+ original_set = self.scanner.inv.set
+ original_queue_for_scan = self.scanner.queue_for_scan
+
+ self.scanner.inv.set = MagicMock()
+ self.scanner.queue_for_scan = MagicMock()
+
+ self.scanner.scan_type(TYPE_TO_FETCH_FOR_ENV_WITHOUT_CHILDREN_FETCHER,
+ PARENT, ID_FIELD)
+
+ self.assertEqual(self.scanner.queue_for_scan.call_count, 0,
+ "Can't put children scanner in the queue")
+
+ self.scanner.inv.set = original_set
+ self.scanner.queue_for_scan = original_queue_for_scan
+
+ @patch("discover.fetchers.folder_fetcher.FolderFetcher.get")
+ def test_scan_type(self, fetcher_get):
+ fetcher_get.return_value = DB_RESULTS_WITH_CREATE_OBJECT
+
+ original_set = self.scanner.inv.set
+ original_queue_for_scan = self.scanner.queue_for_scan
+
+ self.scanner.inv.set = MagicMock()
+ self.scanner.queue_for_scan = MagicMock()
+
+ result = self.scanner.scan_type(TYPE_TO_FETCH_FOR_ENVIRONMENT, PARENT,
+ ID_FIELD)
+
+ self.assertNotEqual(result, [], "Can't get children form scan_type")
+
+ self.scanner.inv.set = original_set
+ self.scanner.queue_for_scan = original_queue_for_scan
+
+ def test_scan_with_limit_to_child_type(self):
+ original_scan_type = self.scanner.scan_type
+ original_get_scanner = self.scanner.get_scanner
+
+ self.scanner.scan_type = MagicMock(return_value=[])
+ self.scanner.get_scanner = MagicMock(return_value=TYPES_TO_FETCH)
+
+ limit_to_child_type = TYPES_TO_FETCH[0]['type']
+
+ self.scanner.scan(SCANNER_TYPE_FOR_ENV, PARENT, limit_to_child_type=limit_to_child_type)
+
+ # only scan the limit child type
+ self.scanner.scan_type.assert_called_with(TYPES_TO_FETCH[0], PARENT,
+ ID_FIELD)
+
+ self.scanner.scan_type = original_scan_type
+ self.scanner.get_scanner = original_get_scanner
+
+ def test_scan_with_limit_to_child_id(self):
+ original_scan_type = self.scanner.scan_type
+ original_get_scanner = self.scanner.get_scanner
+
+ self.scanner.get_scanner = MagicMock(return_value=TYPES_TO_FETCH)
+ limit_to_child_id = SCAN_TYPE_RESULTS[0][ID_FIELD]
+
+ self.scanner.scan_type = MagicMock(return_value=SCAN_TYPE_RESULTS)
+
+ children = self.scanner.scan(SCANNER_TYPE_FOR_ENV, PARENT, id_field=ID_FIELD,
+ limit_to_child_id=limit_to_child_id)
+
+ # only get the limit child
+ self.assertEqual(children, SCAN_TYPE_RESULTS[0])
+
+ self.scanner.scan_type = original_scan_type
+ self.scanner.get_scanner = original_get_scanner
+
+ def test_scan(self):
+ original_scan_type = self.scanner.scan_type
+ original_get_scanner = self.scanner.get_scanner
+
+ self.scanner.get_scanner = MagicMock(return_values=TYPES_TO_FETCH)
+ result = self.scanner.scan(SCANNER_TYPE_FOR_ENV, PARENT)
+
+ self.assertEqual(PARENT, result,
+ "Can't get the original parent after the scan")
+
+ self.scanner.get_scanner = original_get_scanner
+ self.scanner.scan_type = original_scan_type
+
+ def test_run_scan(self):
+ original_scan = self.scanner.scan
+ original_scan_from_queue = self.scanner.scan_from_queue
+
+ self.scanner.scan = MagicMock()
+ self.scanner.scan_from_queue = MagicMock()
+
+ self.scanner.run_scan(SCANNER_TYPE_FOR_ENV, PARENT, ID_FIELD, LIMIT_TO_CHILD_ID,
+ LIMIT_TO_CHILD_TYPE)
+
+ self.scanner.scan.assert_called_with(SCANNER_TYPE_FOR_ENV, PARENT, ID_FIELD,
+ LIMIT_TO_CHILD_ID,
+ LIMIT_TO_CHILD_TYPE)
+ self.scanner.scan_from_queue.assert_any_call()
+
+ self.scanner.scan = original_scan
+ self.scanner.scan_from_queue = original_scan_from_queue
+
+ @patch("discover.scanner.Scanner.scan")
+ def test_scan_from_queue(self, scan):
+ scan.return_value = []
+ Scanner.scan_queue = SCAN_QUEUE
+
+ self.scanner.scan_from_queue()
+
+ self.assertEqual(self.scanner.scan.call_count, QUEUE_SIZE,
+ "Can't scan all the objects in the queue")
diff --git a/app/test/test_suite.py b/app/test/test_suite.py
new file mode 100644
index 0000000..00e7492
--- /dev/null
+++ b/app/test/test_suite.py
@@ -0,0 +1,25 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+import argparse
+import unittest
+
+
+def get_args():
+ parser = argparse.ArgumentParser()
+ parser.add_argument("-d", "--dir", dest="start_dir", nargs="?",
+ type=str, default=".",
+ help="Name of root directory for test cases discovery")
+
+ return parser.parse_args()
+
+if __name__ == "__main__":
+ args = get_args()
+ suite = unittest.TestLoader().discover(start_dir=args.start_dir)
+ unittest.TextTestRunner(verbosity=2).run(suite) \ No newline at end of file
diff --git a/app/utils/__init__.py b/app/utils/__init__.py
new file mode 100644
index 0000000..1e85a2a
--- /dev/null
+++ b/app/utils/__init__.py
@@ -0,0 +1,10 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+
diff --git a/app/utils/binary_converter.py b/app/utils/binary_converter.py
new file mode 100644
index 0000000..70d4e40
--- /dev/null
+++ b/app/utils/binary_converter.py
@@ -0,0 +1,27 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+from utils.logging.console_logger import ConsoleLogger
+
+
+class BinaryConverter:
+
+ def __init__(self):
+ super().__init__()
+ self.log = ConsoleLogger()
+
+ def binary2str(self, txt):
+ if not isinstance(txt, bytes):
+ return str(txt)
+ try:
+ s = txt.decode("utf-8")
+ except TypeError:
+ s = str(txt)
+ return s
+
diff --git a/app/utils/config_file.py b/app/utils/config_file.py
new file mode 100644
index 0000000..1982acc
--- /dev/null
+++ b/app/utils/config_file.py
@@ -0,0 +1,48 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+import os
+
+
+class ConfigFile:
+
+ def __init__(self, file_path):
+ super().__init__()
+ if not os.path.isfile(file_path):
+ raise ValueError("config file doesn't exist in "
+ "the system: {0}"
+ .format(file_path))
+ self.config_file = file_path
+
+ def read_config(self):
+ params = {}
+ try:
+ with open(self.config_file) as f:
+ for line in f:
+ line = line.strip()
+ if line.startswith("#") or " " not in line:
+ continue
+ index = line.index(" ")
+ key = line[: index].strip()
+ value = line[index + 1:].strip()
+ if value:
+ params[key] = value
+ except Exception as e:
+ raise e
+ return params
+
+ @staticmethod
+ def get(file_name):
+ # config file is taken from app/config by default
+ # look in the current work directory to get the
+ # config path
+ python_path = os.environ['PYTHONPATH']
+ if os.pathsep in python_path:
+ python_path = python_path.split(os.pathsep)[0]
+ return python_path + '/config/' + file_name
diff --git a/app/utils/constants.py b/app/utils/constants.py
new file mode 100644
index 0000000..7aa0343
--- /dev/null
+++ b/app/utils/constants.py
@@ -0,0 +1,37 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+from enum import Enum
+
+
+class StringEnum(Enum):
+ def __str__(self):
+ return str(self.value)
+
+ def __repr__(self):
+ return repr(self.value)
+
+
+class ScanStatus(StringEnum):
+ PENDING = "pending"
+ RUNNING = "running"
+ COMPLETED = "completed"
+ FAILED = "failed"
+
+
+class OperationalStatus(StringEnum):
+ STOPPED = "stopped"
+ RUNNING = "running"
+ ERROR = "error"
+
+
+class EnvironmentFeatures(StringEnum):
+ SCANNING = "scanning"
+ MONITORING = "monitoring"
+ LISTENING = "listening"
diff --git a/app/utils/deep_merge.py b/app/utils/deep_merge.py
new file mode 100644
index 0000000..acb54ff
--- /dev/null
+++ b/app/utils/deep_merge.py
@@ -0,0 +1,77 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+"""
+Do a deep merge of dictionariesi,
+recursively merging dictionaries with boltons.iterutils.remap
+
+Taken from:
+https://gist.github.com/mahmoud/db02d16ac89fa401b968
+
+
+This is an extension of the technique first detailed here:
+http://sedimental.org/remap.html#add_common_keys
+In short, it calls remap on each container, back to front,
+using the accumulating previous values as the default for
+the current iteration.
+"""
+
+from boltons.iterutils import remap, get_path, default_enter, default_visit
+
+
+def remerge(target_list, sourced=False):
+ """
+ Takes a list of containers (e.g., dicts) and merges them using
+ boltons.iterutils.remap. Containers later in the list take
+ precedence (last-wins).
+ By default, returns a new, merged top-level container. With the
+ *sourced* option, `remerge` expects a list of (*name*, container*)
+ pairs, and will return a source map: a dictionary mapping between
+ path and the name of the container it came from.
+ """
+
+ if not sourced:
+ target_list = [(id(t), t) for t in target_list]
+
+ ret = None
+ source_map = {}
+
+ def remerge_enter(path, key, value):
+ new_parent, new_items = default_enter(path, key, value)
+ if ret and not path and key is None:
+ new_parent = ret
+ try:
+ cur_val = get_path(ret, path + (key,))
+ except KeyError:
+ pass
+ else:
+ # TODO: type check?
+ new_parent = cur_val
+
+ if isinstance(value, list):
+ # lists are purely additive.
+ # See https://github.com/mahmoud/boltons/issues/81
+ new_parent.extend(value)
+ new_items = []
+
+ return new_parent, new_items
+
+ for t_name, target in target_list:
+ if sourced:
+ def remerge_visit(path, key, value):
+ source_map[path + (key,)] = t_name
+ return True
+ else:
+ remerge_visit = default_visit
+
+ ret = remap(target, enter=remerge_enter, visit=remerge_visit)
+
+ if not sourced:
+ return ret
+ return ret, source_map
diff --git a/app/utils/dict_naming_converter.py b/app/utils/dict_naming_converter.py
new file mode 100644
index 0000000..91fea2e
--- /dev/null
+++ b/app/utils/dict_naming_converter.py
@@ -0,0 +1,40 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+from bson.objectid import ObjectId
+
+
+class DictNamingConverter:
+
+ # Convert a nested dictionary from one convention to another.
+ # Args:
+ # d (dict): dictionary (nested or not) to be converted.
+ # cf (func): convert function - takes the string in one convention,
+ # returns it in the other one.
+ # Returns:
+ # Dictionary with the new keys.
+ @staticmethod
+ def change_dict_naming_convention(d, cf):
+ new = {}
+ if not d:
+ return d
+ if isinstance(d, str):
+ return d
+ if isinstance(d, ObjectId):
+ return d
+ for k, v in d.items():
+ new_v = v
+ if isinstance(v, dict):
+ new_v = DictNamingConverter.change_dict_naming_convention(v, cf)
+ elif isinstance(v, list):
+ new_v = list()
+ for x in v:
+ new_v.append(DictNamingConverter.change_dict_naming_convention(x, cf))
+ new[cf(k)] = new_v
+ return new
diff --git a/app/utils/exceptions.py b/app/utils/exceptions.py
new file mode 100644
index 0000000..07e46dc
--- /dev/null
+++ b/app/utils/exceptions.py
@@ -0,0 +1,13 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+
+
+class ScanArgumentsError(ValueError):
+ pass
diff --git a/app/utils/inventory_mgr.py b/app/utils/inventory_mgr.py
new file mode 100644
index 0000000..2fe2894
--- /dev/null
+++ b/app/utils/inventory_mgr.py
@@ -0,0 +1,445 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+from datetime import datetime
+
+import bson
+
+from utils.constants import EnvironmentFeatures
+from utils.logging.console_logger import ConsoleLogger
+from utils.mongo_access import MongoAccess
+from utils.singleton import Singleton
+
+
+def inv_initialization_required(func):
+ def decorated(self, *args, **kwargs):
+ if self.inventory_collection is None:
+ raise TypeError("Inventory collection is not set.")
+ return func(self, *args, **kwargs)
+ return decorated
+
+
+class InventoryMgr(MongoAccess, metaclass=Singleton):
+
+ def __init__(self):
+ super().__init__()
+ self.log = ConsoleLogger()
+ self.inventory_collection = None
+ self.inventory_collection_name = None
+ self.collections = {}
+ self.monitoring_setup_manager = None
+
+ def set_collection(self, collection_type: str = None,
+ use_default_name: bool = False):
+ # do not allow setting the collection more than once
+ if not self.collections.get(collection_type):
+ collection_name = collection_type \
+ if use_default_name \
+ else self.get_coll_name(collection_type)
+
+ self.log.info("Using {} collection: {}"
+ .format(collection_type, collection_name))
+
+ self.collections[collection_type] = MongoAccess.db[collection_name]
+
+ def set_inventory_collection(self, collection_name: str = None):
+ if not self.inventory_collection:
+ if not collection_name:
+ collection_name = "inventory"
+
+ self.log.info("Using inventory collection: {}"
+ .format(collection_name))
+
+ collection = MongoAccess.db[collection_name]
+ self.collections["inventory"] = collection
+ self.inventory_collection = collection
+ self.inventory_collection_name = collection_name
+
+ def get_coll_name(self, coll_name):
+ if not self.inventory_collection_name:
+ raise TypeError("inventory_collection_name is not set")
+
+ return self.inventory_collection_name.replace("inventory", coll_name) \
+ if self.inventory_collection_name.startswith("inventory") \
+ else self.inventory_collection_name + "_" + coll_name
+
+ def set_collections(self, inventory_collection: str = None):
+ self.set_inventory_collection(inventory_collection)
+ self.set_collection("links")
+ self.set_collection("link_types")
+ self.set_collection("clique_types")
+ self.set_collection("clique_constraints")
+ self.set_collection("cliques")
+ self.set_collection("monitoring_config")
+ self.set_collection("constants", use_default_name=True)
+ self.set_collection("scans")
+ self.set_collection("messages")
+ self.set_collection("monitoring_config_templates",
+ use_default_name=True)
+ self.set_collection("environments_config")
+ self.set_collection("supported_environments")
+
+ def clear(self, scan_plan):
+ if scan_plan.inventory_only:
+ collections = {"inventory"}
+ elif scan_plan.links_only:
+ collections = {"links"}
+ elif scan_plan.cliques_only:
+ collections = {"cliques"}
+ else:
+ collections = {"inventory", "links", "cliques", "monitoring_config"}
+
+ env_cond = {} if scan_plan.clear_all else {"environment": scan_plan.env}
+
+ for collection_name in collections:
+ collection = self.collections[collection_name]
+ self.log.info("clearing collection: " + collection.full_name)
+ # delete docs from the collection,
+ # either all or just for the specified environment
+ collection.delete_many(env_cond)
+
+ # return single match
+ def get_by_id(self, environment, item_id):
+ return self.find({
+ "environment": environment,
+ "id": item_id
+ }, get_single=True)
+
+ # return matches for ID in list of values
+ def get_by_ids(self, environment, ids_list):
+ return self.find({
+ "environment": environment,
+ "id": {"$in": ids_list}
+ })
+
+ def get_by_field(self, environment, item_type, field_name, field_value,
+ get_single=False):
+ if field_value:
+ return self.find({"environment": environment,
+ "type": item_type,
+ field_name: field_value},
+ get_single=get_single)
+ else:
+ return self.find({"environment": environment,
+ "type": item_type},
+ get_single=get_single)
+
+ def get(self, environment, item_type, item_id, get_single=False):
+ return self.get_by_field(environment, item_type, "id", item_id,
+ get_single=get_single)
+
+ def get_children(self, environment, item_type, parent_id):
+ if parent_id:
+ if not item_type:
+ return self.find({"environment": environment,
+ "parent_id": parent_id})
+ else:
+ return self.find({"environment": environment,
+ "type": item_type,
+ "parent_id": parent_id})
+ else:
+ return self.find({"environment": environment,
+ "type": item_type})
+
+ def get_single(self, environment, item_type, item_id):
+ matches = self.find({"environment": environment,
+ "type": item_type,
+ "id": item_id})
+ if len(matches) > 1:
+ raise ValueError("Found multiple matches for item: " +
+ "type=" + item_type + ", id=" + item_id)
+ if len(matches) == 0:
+ raise ValueError("No matches for item: " +
+ "type=" + item_type + ", id=" + item_id)
+ return matches[0]
+
+ # item must contain properties 'environment', 'type' and 'id'
+ def set(self, item, collection=None):
+ col = collection
+ mongo_id = None
+ projects = None
+ if "_id" in item:
+ mongo_id = item.pop("_id", None)
+
+ if not collection or collection == self.collections['inventory']:
+ # make sure we have environment, type & id
+ self.check(item, "environment")
+ self.check(item, "type")
+ self.check(item, "id")
+
+ item["last_scanned"] = datetime.now()
+ item.pop("projects", [])
+
+ obj_name = item["name_path"]
+ obj_name = obj_name[obj_name.rindex('/') + 1:]
+
+ if 'object_name' not in item:
+ item['object_name'] = obj_name
+
+ self.set_collections() # make sure we have all collections set
+ if not col:
+ col = self.collections['inventory']
+
+ find_tuple = {"environment": item["environment"],
+ "type": item["type"], "id": item["id"]}
+ else:
+ find_tuple = {'_id': bson.ObjectId(mongo_id)}
+ doc = col.find_one(find_tuple)
+ if not doc:
+ raise ValueError('set(): could not find document with _id=' +
+ mongo_id)
+
+ col.update_one(find_tuple,
+ {'$set': self.encode_mongo_keys(item)},
+ upsert=True)
+ if mongo_id:
+ # restore original mongo ID of document, in case we need to use it
+ item['_id'] = mongo_id
+ if projects:
+ col.update_one(find_tuple,
+ {'$addToSet': {"projects": {'$each': projects}}},
+ upsert=True)
+
+ @staticmethod
+ def check(obj, field_name):
+ arg = obj[field_name]
+ if not arg or not str(arg).rstrip():
+ raise ValueError("Inventory item - " +
+ "the following field is not defined: " +
+ field_name)
+
+ # note: to use general find, call find_items(),
+ # which also does process_results
+ @inv_initialization_required
+ def find(self, search, projection=None, collection=None, get_single=False):
+ coll = self.inventory_collection if not collection \
+ else self.collections[collection]
+ if get_single is True:
+ return self.decode_object_id(
+ self.decode_mongo_keys(
+ coll.find_one(search, projection=projection)
+ )
+ )
+ else:
+ return list(
+ map(
+ self.decode_object_id,
+ map(
+ self.decode_mongo_keys,
+ coll.find(search, projection=projection))
+ )
+ )
+
+ def find_one(self, search, projection=None, collection=None) -> dict:
+ return self.find(search, projection, collection, True)
+
+ def find_items(self, search,
+ projection=None,
+ get_single=False,
+ collection=None):
+ return self.find(search, projection, collection, get_single)
+
+ # record a link between objects in the inventory, to be used in graphs
+ # returns - the new link document
+ # parameters -
+ # environment: name of environment
+ # host: name of host
+ # source: node mongo _id
+ # source_id: node id value of source node
+ # target: node mongo _id
+ # target_id: node id value of target node
+ # link_type: string showing types of connected objects, e.g. "instance-vnic"
+ # link_name: label for the link itself
+ # state: up/down
+ # link_weight: integer, position/priority for graph placement
+ # source_label, target_label: labels for the ends of the link (optional)
+ def create_link(self, env, host, src, source_id, target, target_id,
+ link_type, link_name, state, link_weight,
+ source_label="", target_label="",
+ extra_attributes=None):
+ s = bson.ObjectId(src)
+ t = bson.ObjectId(target)
+ link = {
+ "environment": env,
+ "host": host,
+ "source": s,
+ "source_id": source_id,
+ "target": t,
+ "target_id": target_id,
+ "link_type": link_type,
+ "link_name": link_name,
+ "state": state,
+ "link_weight": link_weight,
+ "source_label": source_label,
+ "target_label": target_label,
+ "attributes": extra_attributes if extra_attributes else {}
+ }
+ return self.write_link(link)
+
+ def write_link(self, link):
+ find_tuple = {
+ 'environment': link['environment'],
+ 'source_id': link['source_id'],
+ 'target_id': link['target_id']
+ }
+ if "_id" in link:
+ link.pop("_id", None)
+ link_encoded = self.encode_mongo_keys(link)
+ links_col = self.collections["links"]
+ result = links_col.update_one(find_tuple, {'$set': link_encoded},
+ upsert=True)
+ link['_id'] = result.upserted_id
+ return link
+
+ def values_replace_in_object(self, o, values_replacement):
+ for k in values_replacement.keys():
+ if k not in o:
+ continue
+ repl = values_replacement[k]
+ if 'from' not in repl or 'to' not in repl:
+ continue
+ o[k] = o[k].replace(repl['from'], repl['to'])
+ self.set(o)
+
+ # perform replacement of substring in values of objects in the inventory
+ # input:
+ # - search: dict with search parameters
+ # - values_replacement: dict,
+ # - keys: names of keys for which to replace the values
+ # - values: dict with "from" (value to be replaced) and "to" (new value)
+ @inv_initialization_required
+ def values_replace(self, search, values_replacement):
+ for doc in self.inventory_collection.find(search):
+ self.values_replace_in_object(doc, values_replacement)
+
+ def delete(self, coll, query_filter):
+ collection = self.collections[coll]
+ if not collection:
+ self.log.warn('delete(): collection not found - ' + coll)
+ return
+ result = collection.delete_many(query_filter)
+ count = result.deleted_count
+ self.log.info('delete(): ' + ('deleted ' + str(count) + ' documents'
+ if count else 'no matching documents'))
+ return count
+
+ def get_env_config(self, env: str):
+ return self.find_one(search={'name': env},
+ collection='environments_config')
+
+ def is_feature_supported(self, env: str, feature: EnvironmentFeatures)\
+ -> bool:
+ env_config = self.get_env_config(env)
+ if not env_config:
+ return False
+
+ # Workaround for mechanism_drivers field type
+ mechanism_driver = env_config['mechanism_drivers'][0] \
+ if isinstance(env_config['mechanism_drivers'], list) \
+ else env_config['mechanism_drivers']
+
+ full_env = {'environment.distribution': env_config['distribution'],
+ 'environment.type_drivers': env_config['type_drivers'],
+ 'environment.mechanism_drivers': mechanism_driver}
+ return self.is_feature_supported_in_env(full_env, feature)
+
+ def is_feature_supported_in_env(self, env_def: dict,
+ feature: EnvironmentFeatures) -> bool:
+
+ result = self.collections['supported_environments'].find_one(env_def)
+ if not result:
+ return False
+ features_in_env = result.get('features', {})
+ return features_in_env.get(feature.value) is True
+
+ def save_inventory_object(self, o: dict, parent: dict,
+ environment: str, type_to_fetch: dict = None) -> bool:
+ if not type_to_fetch:
+ type_to_fetch = {}
+
+ o["id"] = str(o["id"])
+ o["environment"] = environment
+ if type_to_fetch.get("type"):
+ o["type"] = type_to_fetch["type"]
+ o["show_in_tree"] = type_to_fetch.get("show_in_tree", True)
+
+ parent_id_path = parent.get("id_path", "/{}".format(environment))
+ parent_name_path = parent.get("name_path", "/{}".format(environment))
+
+ try:
+ # case of dynamic folder added by need
+ master_parent_type = o["master_parent_type"]
+ master_parent_id = o["master_parent_id"]
+ master_parent = self.get_by_id(environment, master_parent_id)
+ if not master_parent:
+ self.log.error("failed to find master parent " +
+ master_parent_id)
+ return False
+ folder_id_path = "/".join((master_parent["id_path"], o["parent_id"]))
+ folder_name_path = "/".join((master_parent["name_path"], o["parent_text"]))
+ folder = {
+ "environment": parent["environment"],
+ "parent_id": master_parent_id,
+ "parent_type": master_parent_type,
+ "id": o["parent_id"],
+ "id_path": folder_id_path,
+ "show_in_tree": True,
+ "name_path": folder_name_path,
+ "name": o["parent_id"],
+ "type": o["parent_type"],
+ "text": o["parent_text"]
+ }
+ # remove master_parent_type & master_parent_id after use,
+ # as they're there just ro help create the dynamic folder
+ o.pop("master_parent_type", True)
+ o.pop("master_parent_id", True)
+ self.set(folder)
+ except KeyError:
+ pass
+
+ if o.get("text"):
+ o["name"] = o["text"]
+ elif not o.get("name"):
+ o["name"] = o["id"]
+
+ if "parent_id" not in o and parent:
+ parent_id = parent["id"]
+ o["parent_id"] = parent_id
+ o["parent_type"] = parent["type"]
+ elif "parent_id" in o and o["parent_id"] != parent["id"]:
+ # using alternate parent - fetch parent path from inventory
+ parent_obj = self.get_by_id(environment, o["parent_id"])
+ if parent_obj:
+ parent_id_path = parent_obj["id_path"]
+ parent_name_path = parent_obj["name_path"]
+ o["id_path"] = "/".join((parent_id_path, o["id"].strip()))
+ o["name_path"] = "/".join((parent_name_path, o["name"]))
+
+ # keep list of projects that an object is in
+ associated_projects = []
+ keys_to_remove = []
+ for k in o:
+ if k.startswith("in_project-"):
+ proj_name = k[k.index('-') + 1:]
+ associated_projects.append(proj_name)
+ keys_to_remove.append(k)
+ for k in keys_to_remove:
+ o.pop(k)
+ if len(associated_projects) > 0:
+ projects = o["projects"] if "projects" in o.keys() else []
+ projects.extend(associated_projects)
+ if projects:
+ o["projects"] = projects
+
+ if "create_object" not in o or o["create_object"]:
+ # add/update object in DB
+ self.set(o)
+ if self.is_feature_supported(environment, EnvironmentFeatures.MONITORING):
+ self.monitoring_setup_manager.create_setup(o)
+ return True
diff --git a/app/utils/logging/__init__.py b/app/utils/logging/__init__.py
new file mode 100644
index 0000000..1e85a2a
--- /dev/null
+++ b/app/utils/logging/__init__.py
@@ -0,0 +1,10 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+
diff --git a/app/utils/logging/console_logger.py b/app/utils/logging/console_logger.py
new file mode 100644
index 0000000..bb8b2ed
--- /dev/null
+++ b/app/utils/logging/console_logger.py
@@ -0,0 +1,21 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+import logging
+
+from utils.logging.logger import Logger
+
+
+class ConsoleLogger(Logger):
+
+ def __init__(self, level: str = Logger.default_level):
+ super().__init__(logger_name="{}-Console".format(self.PROJECT_NAME),
+ level=level)
+ self.add_handler(logging.StreamHandler())
+
diff --git a/app/utils/logging/file_logger.py b/app/utils/logging/file_logger.py
new file mode 100644
index 0000000..e205bc3
--- /dev/null
+++ b/app/utils/logging/file_logger.py
@@ -0,0 +1,23 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+import logging.handlers
+
+from utils.logging.logger import Logger
+
+
+class FileLogger(Logger):
+
+ LOG_DIRECTORY = "/local_dir/log/calipso/"
+
+ def __init__(self, log_file: str, level: str = Logger.default_level):
+ super().__init__(logger_name="{}-File".format(self.PROJECT_NAME),
+ level=level)
+ self.add_handler(logging.handlers.WatchedFileHandler(log_file))
+
diff --git a/app/utils/logging/full_logger.py b/app/utils/logging/full_logger.py
new file mode 100644
index 0000000..a88f00e
--- /dev/null
+++ b/app/utils/logging/full_logger.py
@@ -0,0 +1,47 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+import logging
+import logging.handlers
+
+from utils.logging.logger import Logger
+from utils.logging.mongo_logging_handler import MongoLoggingHandler
+
+
+class FullLogger(Logger):
+
+ def __init__(self, env: str = None, log_file: str = None,
+ level: str = Logger.default_level):
+ super().__init__(logger_name="{}-Full".format(self.PROJECT_NAME),
+ level=level)
+
+ # Console handler
+ self.add_handler(logging.StreamHandler())
+
+ # Message handler
+ self.add_handler(MongoLoggingHandler(env, self.level))
+
+ # File handler
+ if log_file:
+ self.add_handler(logging.handlers.WatchedFileHandler(log_file))
+
+ # Make sure we update MessageHandler with new env
+ def set_env(self, env):
+ super().set_env(env)
+
+ defined_handler = next(
+ filter(
+ lambda handler: handler.__class__ == MongoLoggingHandler.__class__,
+ self.log.handlers
+ ), None)
+
+ if defined_handler:
+ defined_handler.env = env
+ else:
+ self.add_handler(MongoLoggingHandler(env, self.level))
diff --git a/app/utils/logging/logger.py b/app/utils/logging/logger.py
new file mode 100644
index 0000000..bcf8287
--- /dev/null
+++ b/app/utils/logging/logger.py
@@ -0,0 +1,99 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+import logging
+from abc import ABC
+
+
+class Logger(ABC):
+ DEBUG = 'DEBUG'
+ INFO = 'INFO'
+ WARNING = 'WARNING'
+ ERROR = 'ERROR'
+ CRITICAL = 'CRITICAL'
+
+ PROJECT_NAME = 'CALIPSO'
+
+ levels = [DEBUG, INFO, WARNING, ERROR, CRITICAL]
+ log_format = '%(asctime)s %(levelname)s: %(message)s'
+ formatter = logging.Formatter(log_format)
+ default_level = INFO
+
+ def __init__(self, logger_name: str = PROJECT_NAME,
+ level: str = default_level):
+ super().__init__()
+ self.check_level(level)
+ self.log = logging.getLogger(logger_name)
+ logging.basicConfig(format=self.log_format,
+ level=level)
+ self.log.propagate = False
+ self.set_loglevel(level)
+ self.env = None
+ self.level = level
+
+ def set_env(self, env):
+ self.env = env
+
+ @staticmethod
+ def check_level(level):
+ if level.upper() not in Logger.levels:
+ raise ValueError('Invalid log level: {}. Supported levels: ({})'
+ .format(level, ", ".join(Logger.levels)))
+
+ @staticmethod
+ def get_numeric_level(loglevel):
+ Logger.check_level(loglevel)
+ numeric_level = getattr(logging, loglevel.upper(), Logger.default_level)
+ if not isinstance(numeric_level, int):
+ raise ValueError('Invalid log level: {}'.format(loglevel))
+ return numeric_level
+
+ def set_loglevel(self, loglevel):
+ # assuming loglevel is bound to the string value obtained from the
+ # command line argument. Convert to upper case to allow the user to
+ # specify --log=DEBUG or --log=debug
+ numeric_level = self.get_numeric_level(loglevel)
+
+ for handler in self.log.handlers:
+ handler.setLevel(numeric_level)
+ self.log.setLevel(numeric_level)
+ self.level = loglevel
+
+ def _log(self, level, message, *args, exc_info=False, **kwargs):
+ self.log.log(level, message, *args, exc_info=exc_info, **kwargs)
+
+ def debug(self, message, *args, **kwargs):
+ self._log(logging.DEBUG, message, *args, **kwargs)
+
+ def info(self, message, *args, **kwargs):
+ self._log(logging.INFO, message, *args, **kwargs)
+
+ def warning(self, message, *args, **kwargs):
+ self._log(logging.WARNING, message, *args, **kwargs)
+
+ def warn(self, message, *args, **kwargs):
+ self.warning(message, *args, **kwargs)
+
+ def error(self, message, *args, **kwargs):
+ self._log(logging.ERROR, message, *args, **kwargs)
+
+ def exception(self, message, *args, **kwargs):
+ self._log(logging.ERROR, message, exc_info=True, *args, **kwargs)
+
+ def critical(self, message, *args, **kwargs):
+ self._log(logging.CRITICAL, message, *args, **kwargs)
+
+ def add_handler(self, handler):
+ handler_defined = handler.__class__ in map(lambda h: h.__class__,
+ self.log.handlers)
+
+ if not handler_defined:
+ handler.setLevel(self.level)
+ handler.setFormatter(self.formatter)
+ self.log.addHandler(handler)
diff --git a/app/utils/logging/message_logger.py b/app/utils/logging/message_logger.py
new file mode 100644
index 0000000..02e098f
--- /dev/null
+++ b/app/utils/logging/message_logger.py
@@ -0,0 +1,21 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+import logging
+
+from utils.logging.logger import Logger
+from utils.logging.mongo_logging_handler import MongoLoggingHandler
+
+
+class MessageLogger(Logger):
+
+ def __init__(self, env: str = None, level: str = None):
+ super().__init__(logger_name="{}-Message".format(self.PROJECT_NAME),
+ level=level)
+ self.add_handler(MongoLoggingHandler(env, self.level))
diff --git a/app/utils/logging/mongo_logging_handler.py b/app/utils/logging/mongo_logging_handler.py
new file mode 100644
index 0000000..b69270e
--- /dev/null
+++ b/app/utils/logging/mongo_logging_handler.py
@@ -0,0 +1,53 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+import datetime
+import logging
+
+from messages.message import Message
+from utils.inventory_mgr import InventoryMgr
+from utils.logging.logger import Logger
+from utils.string_utils import stringify_datetime
+
+
+class MongoLoggingHandler(logging.Handler):
+ """
+ Logging handler for MongoDB
+ """
+ SOURCE_SYSTEM = 'Calipso'
+
+ def __init__(self, env: str, level: str):
+ super().__init__(Logger.get_numeric_level(level))
+ self.str_level = level
+ self.env = env
+ self.inv = None
+
+ def emit(self, record):
+ # Try to invoke InventoryMgr for logging
+ if not self.inv:
+ try:
+ self.inv = InventoryMgr()
+ except:
+ return
+
+ # make sure we do not try to log to DB when DB is not ready
+ if not (self.inv.is_db_ready()
+ and 'messages' in self.inv.collections):
+ return
+
+ # make ID from current timestamp
+ now = datetime.datetime.utcnow()
+ d = now - datetime.datetime(1970, 1, 1)
+ ts = stringify_datetime(now)
+ timestamp_id = '{}.{}.{}'.format(d.days, d.seconds, d.microseconds)
+ source = self.SOURCE_SYSTEM
+ message = Message(msg_id=timestamp_id, env=self.env, source=source,
+ msg=Logger.formatter.format(record), ts=ts,
+ level=record.levelname)
+ self.inv.collections['messages'].insert_one(message.get()) \ No newline at end of file
diff --git a/app/utils/metadata_parser.py b/app/utils/metadata_parser.py
new file mode 100644
index 0000000..1ed49ab
--- /dev/null
+++ b/app/utils/metadata_parser.py
@@ -0,0 +1,83 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+import json
+import os
+from abc import abstractmethod, ABCMeta
+
+from utils.util import get_extension
+
+
+class MetadataParser(metaclass=ABCMeta):
+
+ def __init__(self):
+ super().__init__()
+ self.errors = []
+
+ @abstractmethod
+ def get_required_fields(self) -> list:
+ pass
+
+ def validate_metadata(self, metadata: dict) -> bool:
+ if not isinstance(metadata, dict):
+ raise ValueError('metadata needs to be a valid dict')
+
+ # make sure metadata json contains all fields we need
+ required_fields = self.get_required_fields()
+ if not all([field in metadata for field in required_fields]):
+ raise ValueError("Metadata json should contain "
+ "all the following fields: {}"
+ .format(', '.join(required_fields)))
+ return True
+
+ @staticmethod
+ def _load_json_file(file_path: str):
+ with open(file_path) as data_file:
+ return json.load(data_file)
+
+ def _parse_json_file(self, file_path: str):
+ metadata = self._load_json_file(file_path)
+
+ # validate metadata correctness
+ if not self.validate_metadata(metadata):
+ return None
+
+ return metadata
+
+ @staticmethod
+ def check_metadata_file_ok(file_path: str):
+ extension = get_extension(file_path)
+ if extension != 'json':
+ raise ValueError("Extension '{}' is not supported. "
+ "Please provide a .json metadata file."
+ .format(extension))
+
+ if not os.path.isfile(file_path):
+ raise ValueError("Couldn't load metadata file. "
+ "Path '{}' doesn't exist or is not a file"
+ .format(file_path))
+
+ def parse_metadata_file(self, file_path: str) -> dict:
+ # reset errors in case same parser is used to read multiple inputs
+ self.errors = []
+ self.check_metadata_file_ok(file_path)
+
+ # Try to parse metadata file if it has one of the supported extensions
+ metadata = self._parse_json_file(file_path)
+ self.check_errors()
+ return metadata
+
+ def check_errors(self):
+ if self.errors:
+ raise ValueError("Errors encountered during "
+ "metadata file parsing:\n{}"
+ .format("\n".join(self.errors)))
+
+ def add_error(self, msg):
+ self.errors.append(msg)
diff --git a/app/utils/mongo_access.py b/app/utils/mongo_access.py
new file mode 100644
index 0000000..1425017
--- /dev/null
+++ b/app/utils/mongo_access.py
@@ -0,0 +1,137 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+import os
+
+from pymongo import MongoClient
+
+from utils.config_file import ConfigFile
+from utils.dict_naming_converter import DictNamingConverter
+from utils.logging.console_logger import ConsoleLogger
+from utils.logging.file_logger import FileLogger
+
+
+# Provides access to MongoDB using PyMongo library
+#
+# Notes on authentication:
+# default config file is calipso_mongo_access.conf
+# you can also specify name of file from CLI with --mongo_config
+
+
+class MongoAccess(DictNamingConverter):
+ client = None
+ db = None
+ default_conf_file = '/local_dir/calipso_mongo_access.conf'
+ config_file = None
+
+ DB_NAME = 'calipso'
+ LOG_FILENAME = 'mongo_access.log'
+ DEFAULT_LOG_FILE = os.path.join(os.path.abspath("."), LOG_FILENAME)
+
+ def __init__(self):
+ super().__init__()
+ self.log_file = os.path.join(FileLogger.LOG_DIRECTORY,
+ MongoAccess.LOG_FILENAME)
+
+ try:
+ self.log = FileLogger(self.log_file)
+ except OSError as e:
+ ConsoleLogger().warning("Couldn't use file {} for logging. "
+ "Using default location: {}.\n"
+ "Error: {}"
+ .format(self.log_file,
+ self.DEFAULT_LOG_FILE,
+ e))
+
+ self.log_file = self.DEFAULT_LOG_FILE
+ self.log = FileLogger(self.log_file)
+
+ self.connect_params = {}
+ self.mongo_connect(self.config_file)
+
+ def is_db_ready(self) -> bool:
+ return MongoAccess.client is not None
+
+ @staticmethod
+ def set_config_file(_conf_file):
+ MongoAccess.config_file = _conf_file
+
+ def mongo_connect(self, config_file_path=""):
+ if MongoAccess.client:
+ return
+
+ self.connect_params = {
+ "server": "localhost",
+ "port": 27017
+ }
+
+ if not config_file_path:
+ config_file_path = self.default_conf_file
+
+ try:
+ config_file = ConfigFile(config_file_path)
+ # read connection parameters from config file
+ config_params = config_file.read_config()
+ self.connect_params.update(config_params)
+ except Exception as e:
+ self.log.exception(e)
+ raise
+
+ self.prepare_connect_uri()
+ MongoAccess.client = MongoClient(
+ self.connect_params["server"],
+ self.connect_params["port"]
+ )
+ MongoAccess.db = getattr(MongoAccess.client,
+ config_params.get('auth_db', self.DB_NAME))
+ self.log.info('Connected to MongoDB')
+
+ def prepare_connect_uri(self):
+ params = self.connect_params
+ self.log.debug('connecting to MongoDb server: {}'
+ .format(params['server']))
+ uri = 'mongodb://'
+ if 'password' in params:
+ uri = uri + params['user'] + ':' + params['password'] + '@'
+ uri = uri + params['server']
+ if 'auth_db' in params:
+ uri = uri + '/' + params['auth_db']
+ self.connect_params['server'] = uri
+
+ @staticmethod
+ def update_document(collection, document, upsert=False):
+ if isinstance(collection, str):
+ collection = MongoAccess.db[collection]
+ doc_id = document.pop('_id')
+ collection.update_one({'_id': doc_id}, {'$set': document},
+ upsert=upsert)
+ document['_id'] = doc_id
+
+ @staticmethod
+ def encode_dots(s):
+ return s.replace(".", "[dot]")
+
+ @staticmethod
+ def decode_dots(s):
+ return s.replace("[dot]", ".")
+
+ # Mongo will not accept dot (".") in keys, or $ in start of keys
+ # $ in beginning of key does not happen in OpenStack,
+ # so need to translate only "." --> "[dot]"
+ @staticmethod
+ def encode_mongo_keys(item):
+ return MongoAccess.change_dict_naming_convention(item, MongoAccess.encode_dots)
+
+ @staticmethod
+ def decode_mongo_keys(item):
+ return MongoAccess.change_dict_naming_convention(item, MongoAccess.decode_dots)
+
+ @staticmethod
+ def decode_object_id(item: dict):
+ return dict(item, **{"_id": str(item["_id"])}) if item and "_id" in item else item
diff --git a/app/utils/singleton.py b/app/utils/singleton.py
new file mode 100644
index 0000000..fc1147f
--- /dev/null
+++ b/app/utils/singleton.py
@@ -0,0 +1,16 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+class Singleton(type):
+ _instances = {}
+
+ def __call__(cls, *args, **kwargs):
+ if cls not in cls._instances:
+ cls._instances[cls] = super(Singleton, cls).__call__(*args, **kwargs)
+ return cls._instances[cls]
diff --git a/app/utils/special_char_converter.py b/app/utils/special_char_converter.py
new file mode 100644
index 0000000..fb469bb
--- /dev/null
+++ b/app/utils/special_char_converter.py
@@ -0,0 +1,32 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+import re
+
+
+class SpecialCharConverter:
+
+ translated_re = re.compile(r'---[.][.][0-9]+[.][.]---')
+
+ def encode_special_characters(self, s):
+ SPECIAL_CHARS = [':', '/']
+ for c in SPECIAL_CHARS:
+ if c in s:
+ s = s.replace(c, '---..' + str(ord(c)) + '..---')
+ return s
+
+ def decode_special_characters(self, s):
+ replaced = []
+ for m in re.finditer(self.translated_re, s):
+ match = m.group(0)
+ char_code = match[5:len(match)-5]
+ if char_code not in replaced:
+ replaced.append(char_code)
+ s = s.replace(match, chr(int(char_code)))
+ return s
diff --git a/app/utils/ssh_conn.py b/app/utils/ssh_conn.py
new file mode 100644
index 0000000..d4b7954
--- /dev/null
+++ b/app/utils/ssh_conn.py
@@ -0,0 +1,94 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+import os
+
+from discover.configuration import Configuration
+from utils.inventory_mgr import InventoryMgr
+from utils.ssh_connection import SshConnection
+
+
+class SshConn(SshConnection):
+ config = None
+ ssh = None
+ connections = {}
+
+ max_call_count_per_con = 100
+ timeout = 15 # timeout for exec in seconds
+
+ def __init__(self, host_name, for_sftp=False):
+ self.config = Configuration()
+ self.env_config = self.config.get_env_config()
+ self.env = self.env_config['name']
+ self.conf = self.config.get('CLI')
+ self.gateway = None
+ self.host = None
+ self.host_conf = self.get_host_conf(host_name)
+ self.ssh = None
+ self.ftp = None
+ self.for_sftp = for_sftp
+ self.key = None
+ self.port = None
+ self.user = None
+ self.pwd = None
+ self.check_definitions()
+ super().__init__(self.host, self.user, _pwd=self.pwd, _key=self.key,
+ _port=self.port, for_sftp=for_sftp)
+ self.inv = InventoryMgr()
+ if host_name in self.connections and not self.ssh:
+ self.ssh = self.connections[host_name]
+
+ def get_host_conf(self, host_name):
+ if 'hosts' in self.conf:
+ if not host_name:
+ raise ValueError('SshConn(): host must be specified ' +
+ 'if multi-host CLI config is used')
+ if host_name not in self.conf['hosts']:
+ raise ValueError('host details missing: ' + host_name)
+ return self.conf['hosts'][host_name]
+ else:
+ return self.conf
+
+ def check_definitions(self):
+ try:
+ self.host = self.host_conf['host']
+ if self.host in self.connections:
+ self.ssh = self.connections[self.host]
+ except KeyError:
+ raise ValueError('Missing definition of host for CLI access')
+ try:
+ self.user = self.host_conf['user']
+ except KeyError:
+ raise ValueError('Missing definition of user for CLI access')
+ try:
+ self.key = self.host_conf['key']
+ if self.key and not os.path.exists(self.key):
+ raise ValueError('Key file not found: ' + self.key)
+ except KeyError:
+ pass
+ try:
+ self.pwd = self.host_conf['pwd']
+ except KeyError:
+ self.pwd = None
+ if not self.key and not self.pwd:
+ raise ValueError('Must specify key or password for CLI access')
+
+ gateway_hosts = {}
+
+ @staticmethod
+ def get_gateway_host(host):
+ if not SshConn.gateway_hosts.get(host):
+ ssh = SshConn(host)
+ gateway = ssh.exec('uname -n')
+ SshConn.gateway_hosts[host] = gateway.strip()
+ return SshConn.gateway_hosts[host]
+
+ def is_gateway_host(self, host):
+ gateway_host = self.get_gateway_host(host)
+ return host == gateway_host
diff --git a/app/utils/ssh_connection.py b/app/utils/ssh_connection.py
new file mode 100644
index 0000000..0fa197a
--- /dev/null
+++ b/app/utils/ssh_connection.py
@@ -0,0 +1,217 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+import os
+
+import paramiko
+
+from utils.binary_converter import BinaryConverter
+
+
+class SshConnection(BinaryConverter):
+ config = None
+ ssh = None
+ connections = {}
+ cli_connections = {}
+ sftp_connections = {}
+
+ max_call_count_per_con = 100
+ timeout = 15 # timeout for exec in seconds
+
+ DEFAULT_PORT = 22
+
+ def __init__(self, _host: str, _user: str, _pwd: str=None, _key: str = None,
+ _port: int = None, _call_count_limit: int=None,
+ for_sftp: bool = False):
+ super().__init__()
+ self.host = _host
+ self.ssh = None
+ self.ftp = None
+ self.for_sftp = for_sftp
+ self.key = _key
+ self.port = _port
+ self.user = _user
+ self.pwd = _pwd
+ self.check_definitions()
+ self.fetched_host_details = False
+ self.call_count = 0
+ self.call_count_limit = 0 if for_sftp \
+ else (SshConnection.max_call_count_per_con
+ if _call_count_limit is None else _call_count_limit)
+ if for_sftp:
+ self.sftp_connections[_host] = self
+ else:
+ self.cli_connections[_host] = self
+
+ def check_definitions(self):
+ if not self.host:
+ raise ValueError('Missing definition of host for CLI access')
+ if not self.user:
+ raise ValueError('Missing definition of user ' +
+ 'for CLI access to host {}'.format(self.host))
+ if self.key and not os.path.exists(self.key):
+ raise ValueError('Key file not found: ' + self.key)
+ if not self.key and not self.pwd:
+ raise ValueError('Must specify key or password ' +
+ 'for CLI access to host {}'.format(self.host))
+
+ @staticmethod
+ def get_ssh(host, for_sftp=False):
+ if for_sftp:
+ return SshConnection.cli_connections.get(host)
+ return SshConnection.sftp_connections.get(host)
+
+ @staticmethod
+ def get_connection(host, for_sftp=False):
+ key = ('sftp-' if for_sftp else '') + host
+ return SshConnection.connections.get(key)
+
+ def disconnect(self):
+ if self.ssh:
+ self.ssh.close()
+
+ @staticmethod
+ def disconnect_all():
+ for ssh in SshConnection.cli_connections.values():
+ ssh.disconnect()
+ SshConnection.cli_connections = {}
+ for ssh in SshConnection.sftp_connections.values():
+ ssh.disconnect()
+ SshConnection.sftp_connections = {}
+
+ def get_host(self):
+ return self.host
+
+ def get_user(self):
+ return self.user
+
+ def set_call_limit(self, _limit: int):
+ self.call_count_limit = _limit
+
+ def connect(self, reconnect=False) -> bool:
+ connection = self.get_connection(self.host, self.for_sftp)
+ if connection:
+ self.ssh = connection
+ if reconnect:
+ self.log.info("SshConnection: " +
+ "****** forcing reconnect: %s ******",
+ self.host)
+ elif self.call_count >= self.call_count_limit > 0:
+ self.log.info("SshConnection: ****** reconnecting: %s, " +
+ "due to call count: %s ******",
+ self.host, self.call_count)
+ else:
+ return True
+ connection.close()
+ self.ssh = None
+ self.ssh = paramiko.SSHClient()
+ connection_key = ('sftp-' if self.for_sftp else '') + self.host
+ SshConnection.connections[connection_key] = self.ssh
+ self.ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
+ if self.key:
+ k = paramiko.RSAKey.from_private_key_file(self.key)
+ self.ssh.connect(hostname=self.host, username=self.user, pkey=k,
+ port=self.port if self.port is not None
+ else self.DEFAULT_PORT,
+ password=self.pwd, timeout=30)
+ else:
+ try:
+ port = self.port if self.port is not None else self.DEFAULT_PORT
+ self.ssh.connect(self.host,
+ username=self.user,
+ password=self.pwd,
+ port=port,
+ timeout=30)
+ except paramiko.ssh_exception.AuthenticationException:
+ self.log.error('Failed SSH connect to host {}, port={}'
+ .format(self.host, port))
+ self.ssh = None
+ self.call_count = 0
+ return self.ssh is not None
+
+ def exec(self, cmd):
+ if not self.connect():
+ return ''
+ self.call_count += 1
+ self.log.debug("call count: %s, running call:\n%s\n",
+ str(self.call_count), cmd)
+ stdin, stdout, stderr = self.ssh.exec_command(cmd, timeout=self.timeout)
+ stdin.close()
+ err = self.binary2str(stderr.read())
+ if err:
+ # ignore messages about loading plugin
+ err_lines = [l for l in err.splitlines()
+ if 'Loaded plugin: ' not in l]
+ if err_lines:
+ self.log.error("CLI access: \n" +
+ "Host: {}\nCommand: {}\nError: {}\n".
+ format(self.host, cmd, err))
+ stderr.close()
+ stdout.close()
+ return ""
+ ret = self.binary2str(stdout.read())
+ stderr.close()
+ stdout.close()
+ return ret
+
+ def copy_file(self, local_path, remote_path, mode=None):
+ if not self.connect():
+ return
+ if not self.ftp:
+ self.ftp = self.ssh.open_sftp()
+ try:
+ self.ftp.put(local_path, remote_path)
+ except IOError as e:
+ self.log.error('SFTP copy_file failed to copy file: ' +
+ 'local: ' + local_path +
+ ', remote host: ' + self.host +
+ ', error: ' + str(e))
+ return str(e)
+ try:
+ remote_file = self.ftp.file(remote_path, 'a+')
+ except IOError as e:
+ self.log.error('SFTP copy_file failed to open file after put(): ' +
+ 'local: ' + local_path +
+ ', remote host: ' + self.host +
+ ', error: ' + str(e))
+ return str(e)
+ try:
+ if mode:
+ remote_file.chmod(mode)
+ except IOError as e:
+ self.log.error('SFTP copy_file failed to chmod file: ' +
+ 'local: ' + local_path +
+ ', remote host: ' + self.host +
+ ', port: ' + self.port +
+ ', error: ' + str(e))
+ return str(e)
+ self.log.info('SFTP copy_file success: '
+ 'host={},port={},{} -> {}'.format(
+ str(self.host), str(self.port), str(local_path), str(remote_path)))
+ return ''
+
+ def copy_file_from_remote(self, remote_path, local_path):
+ if not self.connect():
+ return
+ if not self.ftp:
+ self.ftp = self.ssh.open_sftp()
+ try:
+ self.ftp.get(remote_path, local_path)
+ except IOError as e:
+ self.log.error('SFTP copy_file_from_remote failed to copy file: '
+ 'remote host: {}, '
+ 'remote_path: {}, local: {}, error: {}'
+ .format(self.host, remote_path, local_path, str(e)))
+ return str(e)
+ self.log.info('SFTP copy_file_from_remote success: host={},{} -> {}'.
+ format(self.host, remote_path, local_path))
+ return ''
+
+ def is_gateway_host(self, host):
+ return True
diff --git a/app/utils/string_utils.py b/app/utils/string_utils.py
new file mode 100644
index 0000000..1f51992
--- /dev/null
+++ b/app/utils/string_utils.py
@@ -0,0 +1,59 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+import json
+from datetime import datetime
+
+from bson import ObjectId
+
+
+def jsonify(obj, prettify=False):
+ if prettify:
+ return json.dumps(obj, sort_keys=True, indent=4, separators=(',', ': '))
+ else:
+ return json.dumps(obj)
+
+
+# stringify datetime object
+def stringify_datetime(dt):
+ return dt.strftime("%Y-%m-%dT%H:%M:%S.%f%z")
+
+
+# stringify ObjectId
+def stringify_object_id(object_id):
+ return str(object_id)
+
+
+stringify_map = {
+ ObjectId: stringify_object_id,
+ datetime: stringify_datetime
+}
+
+
+def stringify_object_values_by_type(obj, object_type):
+ if isinstance(obj, dict):
+ for key, value in obj.items():
+ if isinstance(value, object_type):
+ obj[key] = stringify_map[object_type](value)
+ else:
+ stringify_object_values_by_type(value, object_type)
+ elif isinstance(obj, list):
+ for index, value in enumerate(obj):
+ if isinstance(value, object_type):
+ obj[index] = stringify_map[object_type](value)
+ else:
+ stringify_object_values_by_type(value, object_type)
+
+
+# convert some values of the specific types of the object into string
+# e.g convert all the ObjectId to string
+# convert all the datetime object to string
+def stringify_object_values_by_types(obj, object_types):
+ for object_type in object_types:
+ stringify_object_values_by_type(obj, object_type)
diff --git a/app/utils/util.py b/app/utils/util.py
new file mode 100644
index 0000000..4695879
--- /dev/null
+++ b/app/utils/util.py
@@ -0,0 +1,172 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+import importlib
+import signal
+from argparse import Namespace
+from typing import Dict, Callable
+
+import os
+import re
+
+from bson.objectid import ObjectId
+
+
+class SignalHandler:
+
+ def __init__(self, signals=(signal.SIGTERM, signal.SIGINT)):
+ super().__init__()
+ self.terminated = False
+ for sig in signals:
+ signal.signal(sig, self.handle)
+
+ def handle(self, signum, frame):
+ self.terminated = True
+
+
+class ClassResolver:
+ instances = {}
+
+ # convert class name in camel case to module file name in underscores
+ @staticmethod
+ def get_module_file_by_class_name(class_name):
+ s1 = re.sub('(.)([A-Z][a-z]+)', r'\1_\2', class_name)
+ module_file = re.sub('([a-z0-9])([A-Z])', r'\1_\2', s1).lower()
+ return module_file
+
+ # convert module file name in underscores to class name in camel case
+ @staticmethod
+ def get_class_name_by_module(module_name):
+ name_parts = [word.capitalize() for word in module_name.split('_')]
+ class_name = ''.join(name_parts)
+ return class_name
+
+
+ @staticmethod
+ def get_fully_qualified_class(class_name: str = None,
+ package_name: str = "discover",
+ module_name: str = None):
+ module_file = module_name if module_name \
+ else ClassResolver.get_module_file_by_class_name(class_name)
+ module_parts = [package_name, module_file]
+ module_name = ".".join(module_parts)
+ try:
+ class_module = importlib.import_module(module_name)
+ except ImportError:
+ raise ValueError('could not import module {}'.format(module_name))
+
+ clazz = getattr(class_module, class_name)
+ return clazz
+
+ @staticmethod
+ def prepare_class(class_name: str = None,
+ package_name: str = "discover",
+ module_name: str = None):
+ if not class_name and not module_name:
+ raise ValueError('class_name or module_name must be provided')
+ if not class_name:
+ class_name = ClassResolver.get_class_name_by_module(module_name)
+ if class_name in ClassResolver.instances:
+ return 'instance', ClassResolver.instances[class_name]
+ clazz = ClassResolver.get_fully_qualified_class(class_name, package_name,
+ module_name)
+ return 'class', clazz
+
+ @staticmethod
+ def get_instance_of_class(class_name: str = None,
+ package_name: str = "discover",
+ module_name: str = None):
+ val_type, clazz = \
+ ClassResolver.prepare_class(class_name=class_name,
+ package_name=package_name,
+ module_name=module_name)
+ if val_type == 'instance':
+ return clazz
+ instance = clazz()
+ ClassResolver.instances[class_name] = instance
+ return instance
+
+ @staticmethod
+ def get_instance_single_arg(arg: object,
+ class_name: str = None,
+ package_name: str = "discover",
+ module_name: str = None):
+ val_type, clazz = \
+ ClassResolver.prepare_class(class_name=class_name,
+ package_name=package_name,
+ module_name=module_name)
+ if val_type == 'instance':
+ return clazz
+ instance = clazz(arg)
+ ClassResolver.instances[class_name] = instance
+ return instance
+
+
+# TODO: translate the following comment
+# when search in the mongo db, need to
+# generate the ObjectId with the string
+def generate_object_ids(keys, obj):
+ for key in keys:
+ if key in obj:
+ o = obj.pop(key)
+ if o:
+ try:
+ o = ObjectId(o)
+ except Exception:
+ raise Exception("{0} is not a valid object id".
+ format(o))
+ obj[key] = o
+
+
+# Get arguments from CLI or another source
+# and convert them to dict to enforce uniformity.
+# Throws a TypeError if arguments can't be converted to dict.
+def setup_args(args: dict,
+ defaults: Dict[str, object],
+ get_cmd_args: Callable[[], Namespace] = None):
+ if defaults is None:
+ defaults = {}
+
+ if args is None and get_cmd_args is not None:
+ args = vars(get_cmd_args())
+ elif not isinstance(args, dict):
+ try:
+ args = dict(args)
+ except TypeError:
+ try:
+ args = vars(args)
+ except TypeError:
+ raise TypeError("Wrong arguments format")
+
+ return dict(defaults, **args)
+
+
+def encode_router_id(host_id: str, uuid: str):
+ return '-'.join([host_id, 'qrouter', uuid])
+
+
+def decode_router_id(router_id: str):
+ return router_id.split('qrouter-')[-1]
+
+
+def get_extension(file_path: str) -> str:
+ return os.path.splitext(file_path)[1][1:]
+
+
+def encode_aci_dn(object_id):
+ return object_id.replace("topology/", "").replace("/", "___").replace("-", "__")
+
+
+def decode_aci_dn(object_id):
+ return object_id.replace("___", "/").replace("__", "-")
+
+
+def get_object_path_part(path: str, part_name: str):
+ match = re.match(".*/{}/(.+?)/.*".format(part_name), path)
+ return match.group(1) if match else None