aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorStuart Mackie <wsmackie@juniper.net>2017-03-23 06:26:16 -0700
committerStuart Mackie <wsmackie@juniper.net>2017-03-23 06:26:16 -0700
commitc29a525331b45ead15ec376d03f76836d622c943 (patch)
tree6713f16cb30c739a66a0cb56fb4e2efed732eff1
parent88df88a19674ccc0017836941b8ee32eaadf19fb (diff)
Removed test case files without correct license language. Will replace in future.
Change-Id: I16435a250257cf97a67a8ba31303c89d74204ac2 Signed-off-by: Stuart Mackie <wsmackie@juniper.net>
-rwxr-xr-xTestcases/RunTests.sh125
-rw-r--r--Testcases/Test_V42
-rw-r--r--Testcases/cfgm_common/__init__.py50
-rw-r--r--Testcases/cfgm_common/__init__.pycbin2555 -> 0 bytes
-rw-r--r--Testcases/cfgm_common/analytics_client.py63
-rw-r--r--Testcases/cfgm_common/analytics_client.pycbin1928 -> 0 bytes
-rw-r--r--Testcases/cfgm_common/buildinfo.py1
-rw-r--r--Testcases/cfgm_common/buildinfo.pycbin356 -> 0 bytes
-rw-r--r--Testcases/cfgm_common/dependency_tracker.py50
-rw-r--r--Testcases/cfgm_common/dependency_tracker.pycbin1688 -> 0 bytes
-rw-r--r--Testcases/cfgm_common/exceptions.py133
-rw-r--r--Testcases/cfgm_common/exceptions.pycbin6506 -> 0 bytes
-rw-r--r--Testcases/cfgm_common/ifmap/__init__.py29
-rw-r--r--Testcases/cfgm_common/ifmap/__init__.pycbin846 -> 0 bytes
-rw-r--r--Testcases/cfgm_common/ifmap/client.py263
-rw-r--r--Testcases/cfgm_common/ifmap/client.pycbin8050 -> 0 bytes
-rw-r--r--Testcases/cfgm_common/ifmap/id.py191
-rw-r--r--Testcases/cfgm_common/ifmap/id.pycbin8855 -> 0 bytes
-rw-r--r--Testcases/cfgm_common/ifmap/metadata.py36
-rw-r--r--Testcases/cfgm_common/ifmap/metadata.pycbin1674 -> 0 bytes
-rw-r--r--Testcases/cfgm_common/ifmap/operations.py75
-rw-r--r--Testcases/cfgm_common/ifmap/operations.pycbin5131 -> 0 bytes
-rw-r--r--Testcases/cfgm_common/ifmap/request.py106
-rw-r--r--Testcases/cfgm_common/ifmap/request.pycbin7617 -> 0 bytes
-rw-r--r--Testcases/cfgm_common/ifmap/response.py55
-rw-r--r--Testcases/cfgm_common/ifmap/response.pycbin2465 -> 0 bytes
-rw-r--r--Testcases/cfgm_common/ifmap/util.py34
-rw-r--r--Testcases/cfgm_common/ifmap/util.pycbin933 -> 0 bytes
-rw-r--r--Testcases/cfgm_common/imid.py344
-rw-r--r--Testcases/cfgm_common/imid.pycbin10688 -> 0 bytes
-rw-r--r--Testcases/cfgm_common/importutils.py66
-rw-r--r--Testcases/cfgm_common/importutils.pycbin2176 -> 0 bytes
-rw-r--r--Testcases/cfgm_common/rest.py41
-rw-r--r--Testcases/cfgm_common/rest.pycbin1355 -> 0 bytes
-rw-r--r--Testcases/cfgm_common/svc_info.py99
-rw-r--r--Testcases/cfgm_common/svc_info.pycbin4584 -> 0 bytes
-rw-r--r--Testcases/cfgm_common/utils.py110
-rw-r--r--Testcases/cfgm_common/utils.pycbin3544 -> 0 bytes
-rw-r--r--Testcases/cfgm_common/uve/__init__.py0
-rw-r--r--Testcases/cfgm_common/uve/__init__.pycbin147 -> 0 bytes
-rw-r--r--Testcases/cfgm_common/uve/acl/__init__.py1
-rw-r--r--Testcases/cfgm_common/uve/acl/__init__.pycbin200 -> 0 bytes
-rw-r--r--Testcases/cfgm_common/uve/acl/acl.html14
-rw-r--r--Testcases/cfgm_common/uve/acl/acl.xml3
-rw-r--r--Testcases/cfgm_common/uve/acl/constants.py12
-rw-r--r--Testcases/cfgm_common/uve/acl/constants.pycbin302 -> 0 bytes
-rw-r--r--Testcases/cfgm_common/uve/acl/http_request.py14
-rw-r--r--Testcases/cfgm_common/uve/acl/http_request.pycbin214 -> 0 bytes
-rw-r--r--Testcases/cfgm_common/uve/acl/index.html9
-rw-r--r--Testcases/cfgm_common/uve/acl/request_skeleton.py13
-rw-r--r--Testcases/cfgm_common/uve/acl/request_skeleton.pycbin159 -> 0 bytes
-rw-r--r--Testcases/cfgm_common/uve/acl/style.css10
-rw-r--r--Testcases/cfgm_common/uve/acl/ttypes.py837
-rw-r--r--Testcases/cfgm_common/uve/acl/ttypes.pycbin27990 -> 0 bytes
-rw-r--r--Testcases/cfgm_common/uve/cfgm_cpuinfo/__init__.py0
-rw-r--r--Testcases/cfgm_common/uve/cfgm_cpuinfo/__init__.pycbin160 -> 0 bytes
-rw-r--r--Testcases/cfgm_common/uve/cfgm_cpuinfo/cfgm_cpuinfo.html14
-rw-r--r--Testcases/cfgm_common/uve/cfgm_cpuinfo/cfgm_cpuinfo.xml3
-rw-r--r--Testcases/cfgm_common/uve/cfgm_cpuinfo/constants.py12
-rw-r--r--Testcases/cfgm_common/uve/cfgm_cpuinfo/constants.pycbin311 -> 0 bytes
-rw-r--r--Testcases/cfgm_common/uve/cfgm_cpuinfo/cpuinfo/__init__.py1
-rw-r--r--Testcases/cfgm_common/uve/cfgm_cpuinfo/cpuinfo/__init__.pycbin217 -> 0 bytes
-rw-r--r--Testcases/cfgm_common/uve/cfgm_cpuinfo/cpuinfo/constants.py12
-rw-r--r--Testcases/cfgm_common/uve/cfgm_cpuinfo/cpuinfo/constants.pycbin319 -> 0 bytes
-rw-r--r--Testcases/cfgm_common/uve/cfgm_cpuinfo/cpuinfo/cpuinfo.html18
-rw-r--r--Testcases/cfgm_common/uve/cfgm_cpuinfo/cpuinfo/cpuinfo.xml5
-rw-r--r--Testcases/cfgm_common/uve/cfgm_cpuinfo/cpuinfo/http_request.py15
-rw-r--r--Testcases/cfgm_common/uve/cfgm_cpuinfo/cpuinfo/http_request.pycbin340 -> 0 bytes
-rw-r--r--Testcases/cfgm_common/uve/cfgm_cpuinfo/cpuinfo/index.html10
-rw-r--r--Testcases/cfgm_common/uve/cfgm_cpuinfo/cpuinfo/request_skeleton.py25
-rw-r--r--Testcases/cfgm_common/uve/cfgm_cpuinfo/cpuinfo/request_skeleton.pycbin641 -> 0 bytes
-rw-r--r--Testcases/cfgm_common/uve/cfgm_cpuinfo/cpuinfo/style.css10
-rw-r--r--Testcases/cfgm_common/uve/cfgm_cpuinfo/cpuinfo/ttypes.py960
-rw-r--r--Testcases/cfgm_common/uve/cfgm_cpuinfo/cpuinfo/ttypes.pycbin32521 -> 0 bytes
-rw-r--r--Testcases/cfgm_common/uve/cfgm_cpuinfo/http_request.py14
-rw-r--r--Testcases/cfgm_common/uve/cfgm_cpuinfo/http_request.pycbin223 -> 0 bytes
-rw-r--r--Testcases/cfgm_common/uve/cfgm_cpuinfo/index.html14
-rw-r--r--Testcases/cfgm_common/uve/cfgm_cpuinfo/process_info/__init__.py1
-rw-r--r--Testcases/cfgm_common/uve/cfgm_cpuinfo/process_info/__init__.pycbin222 -> 0 bytes
-rw-r--r--Testcases/cfgm_common/uve/cfgm_cpuinfo/process_info/constants.py33
-rw-r--r--Testcases/cfgm_common/uve/cfgm_cpuinfo/process_info/constants.pycbin787 -> 0 bytes
-rw-r--r--Testcases/cfgm_common/uve/cfgm_cpuinfo/process_info/http_request.py14
-rw-r--r--Testcases/cfgm_common/uve/cfgm_cpuinfo/process_info/http_request.pycbin236 -> 0 bytes
-rw-r--r--Testcases/cfgm_common/uve/cfgm_cpuinfo/process_info/index.html9
-rw-r--r--Testcases/cfgm_common/uve/cfgm_cpuinfo/process_info/process_info.html13
-rw-r--r--Testcases/cfgm_common/uve/cfgm_cpuinfo/process_info/process_info.xml3
-rw-r--r--Testcases/cfgm_common/uve/cfgm_cpuinfo/process_info/request_skeleton.py13
-rw-r--r--Testcases/cfgm_common/uve/cfgm_cpuinfo/process_info/request_skeleton.pycbin181 -> 0 bytes
-rw-r--r--Testcases/cfgm_common/uve/cfgm_cpuinfo/process_info/style.css10
-rw-r--r--Testcases/cfgm_common/uve/cfgm_cpuinfo/process_info/ttypes.py854
-rw-r--r--Testcases/cfgm_common/uve/cfgm_cpuinfo/process_info/ttypes.pycbin26064 -> 0 bytes
-rw-r--r--Testcases/cfgm_common/uve/cfgm_cpuinfo/request_skeleton.py13
-rw-r--r--Testcases/cfgm_common/uve/cfgm_cpuinfo/request_skeleton.pycbin168 -> 0 bytes
-rw-r--r--Testcases/cfgm_common/uve/cfgm_cpuinfo/style.css10
-rw-r--r--Testcases/cfgm_common/uve/cfgm_cpuinfo/ttypes.py1281
-rw-r--r--Testcases/cfgm_common/uve/cfgm_cpuinfo/ttypes.pycbin40928 -> 0 bytes
-rw-r--r--Testcases/cfgm_common/uve/physical_router/__init__.py1
-rw-r--r--Testcases/cfgm_common/uve/physical_router/__init__.pycbin212 -> 0 bytes
-rw-r--r--Testcases/cfgm_common/uve/physical_router/constants.py12
-rw-r--r--Testcases/cfgm_common/uve/physical_router/constants.pycbin314 -> 0 bytes
-rw-r--r--Testcases/cfgm_common/uve/physical_router/http_request.py14
-rw-r--r--Testcases/cfgm_common/uve/physical_router/http_request.pycbin226 -> 0 bytes
-rw-r--r--Testcases/cfgm_common/uve/physical_router/index.html9
-rw-r--r--Testcases/cfgm_common/uve/physical_router/physical_router.html14
-rw-r--r--Testcases/cfgm_common/uve/physical_router/physical_router.xml3
-rw-r--r--Testcases/cfgm_common/uve/physical_router/request_skeleton.py13
-rw-r--r--Testcases/cfgm_common/uve/physical_router/request_skeleton.pycbin171 -> 0 bytes
-rw-r--r--Testcases/cfgm_common/uve/physical_router/style.css10
-rw-r--r--Testcases/cfgm_common/uve/physical_router/ttypes.py461
-rw-r--r--Testcases/cfgm_common/uve/physical_router/ttypes.pycbin14361 -> 0 bytes
-rw-r--r--Testcases/cfgm_common/uve/service_instance/__init__.py1
-rw-r--r--Testcases/cfgm_common/uve/service_instance/__init__.pycbin213 -> 0 bytes
-rw-r--r--Testcases/cfgm_common/uve/service_instance/constants.py12
-rw-r--r--Testcases/cfgm_common/uve/service_instance/constants.pycbin315 -> 0 bytes
-rw-r--r--Testcases/cfgm_common/uve/service_instance/http_request.py14
-rw-r--r--Testcases/cfgm_common/uve/service_instance/http_request.pycbin227 -> 0 bytes
-rw-r--r--Testcases/cfgm_common/uve/service_instance/index.html9
-rw-r--r--Testcases/cfgm_common/uve/service_instance/request_skeleton.py13
-rw-r--r--Testcases/cfgm_common/uve/service_instance/request_skeleton.pycbin172 -> 0 bytes
-rw-r--r--Testcases/cfgm_common/uve/service_instance/service_instance.html14
-rw-r--r--Testcases/cfgm_common/uve/service_instance/service_instance.xml3
-rw-r--r--Testcases/cfgm_common/uve/service_instance/style.css10
-rw-r--r--Testcases/cfgm_common/uve/service_instance/ttypes.py484
-rw-r--r--Testcases/cfgm_common/uve/service_instance/ttypes.pycbin16150 -> 0 bytes
-rw-r--r--Testcases/cfgm_common/uve/virtual_machine/__init__.py1
-rw-r--r--Testcases/cfgm_common/uve/virtual_machine/__init__.pycbin212 -> 0 bytes
-rw-r--r--Testcases/cfgm_common/uve/virtual_machine/constants.py12
-rw-r--r--Testcases/cfgm_common/uve/virtual_machine/constants.pycbin314 -> 0 bytes
-rw-r--r--Testcases/cfgm_common/uve/virtual_machine/http_request.py14
-rw-r--r--Testcases/cfgm_common/uve/virtual_machine/http_request.pycbin226 -> 0 bytes
-rw-r--r--Testcases/cfgm_common/uve/virtual_machine/index.html9
-rw-r--r--Testcases/cfgm_common/uve/virtual_machine/request_skeleton.py13
-rw-r--r--Testcases/cfgm_common/uve/virtual_machine/request_skeleton.pycbin171 -> 0 bytes
-rw-r--r--Testcases/cfgm_common/uve/virtual_machine/style.css10
-rw-r--r--Testcases/cfgm_common/uve/virtual_machine/ttypes.py517
-rw-r--r--Testcases/cfgm_common/uve/virtual_machine/ttypes.pycbin17222 -> 0 bytes
-rw-r--r--Testcases/cfgm_common/uve/virtual_machine/virtual_machine.html14
-rw-r--r--Testcases/cfgm_common/uve/virtual_machine/virtual_machine.xml3
-rw-r--r--Testcases/cfgm_common/uve/virtual_network/__init__.py1
-rw-r--r--Testcases/cfgm_common/uve/virtual_network/__init__.pycbin212 -> 0 bytes
-rw-r--r--Testcases/cfgm_common/uve/virtual_network/constants.py12
-rw-r--r--Testcases/cfgm_common/uve/virtual_network/constants.pycbin314 -> 0 bytes
-rw-r--r--Testcases/cfgm_common/uve/virtual_network/http_request.py14
-rw-r--r--Testcases/cfgm_common/uve/virtual_network/http_request.pycbin226 -> 0 bytes
-rw-r--r--Testcases/cfgm_common/uve/virtual_network/index.html9
-rw-r--r--Testcases/cfgm_common/uve/virtual_network/request_skeleton.py13
-rw-r--r--Testcases/cfgm_common/uve/virtual_network/request_skeleton.pycbin171 -> 0 bytes
-rw-r--r--Testcases/cfgm_common/uve/virtual_network/style.css10
-rw-r--r--Testcases/cfgm_common/uve/virtual_network/ttypes.py787
-rw-r--r--Testcases/cfgm_common/uve/virtual_network/ttypes.pycbin25128 -> 0 bytes
-rw-r--r--Testcases/cfgm_common/uve/virtual_network/virtual_network.html14
-rw-r--r--Testcases/cfgm_common/uve/virtual_network/virtual_network.xml3
-rw-r--r--Testcases/cfgm_common/uve/vnc_api/__init__.py1
-rw-r--r--Testcases/cfgm_common/uve/vnc_api/__init__.pycbin204 -> 0 bytes
-rw-r--r--Testcases/cfgm_common/uve/vnc_api/constants.py12
-rw-r--r--Testcases/cfgm_common/uve/vnc_api/constants.pycbin306 -> 0 bytes
-rw-r--r--Testcases/cfgm_common/uve/vnc_api/http_request.py14
-rw-r--r--Testcases/cfgm_common/uve/vnc_api/http_request.pycbin218 -> 0 bytes
-rw-r--r--Testcases/cfgm_common/uve/vnc_api/index.html9
-rw-r--r--Testcases/cfgm_common/uve/vnc_api/request_skeleton.py13
-rw-r--r--Testcases/cfgm_common/uve/vnc_api/request_skeleton.pycbin163 -> 0 bytes
-rw-r--r--Testcases/cfgm_common/uve/vnc_api/style.css10
-rw-r--r--Testcases/cfgm_common/uve/vnc_api/ttypes.py778
-rw-r--r--Testcases/cfgm_common/uve/vnc_api/ttypes.pycbin23969 -> 0 bytes
-rw-r--r--Testcases/cfgm_common/uve/vnc_api/vnc_api.html14
-rw-r--r--Testcases/cfgm_common/uve/vnc_api/vnc_api.xml3
-rw-r--r--Testcases/cfgm_common/vnc_cassandra.py317
-rw-r--r--Testcases/cfgm_common/vnc_cassandra.pycbin10325 -> 0 bytes
-rw-r--r--Testcases/cfgm_common/vnc_cpu_info.py196
-rw-r--r--Testcases/cfgm_common/vnc_cpu_info.pycbin5531 -> 0 bytes
-rw-r--r--Testcases/cfgm_common/vnc_db.py205
-rw-r--r--Testcases/cfgm_common/vnc_db.pycbin7152 -> 0 bytes
-rw-r--r--Testcases/cfgm_common/vnc_extensions.py65
-rw-r--r--Testcases/cfgm_common/vnc_extensions.pycbin2704 -> 0 bytes
-rw-r--r--Testcases/cfgm_common/vnc_kombu.py226
-rw-r--r--Testcases/cfgm_common/vnc_kombu.pycbin8211 -> 0 bytes
-rw-r--r--Testcases/cfgm_common/vnc_plugin_base.py71
-rw-r--r--Testcases/cfgm_common/vnc_plugin_base.pycbin3067 -> 0 bytes
-rw-r--r--Testcases/cfgm_common/zkclient.py358
-rw-r--r--Testcases/cfgm_common/zkclient.pycbin11818 -> 0 bytes
-rwxr-xr-xTestcases/cleanup10
-rwxr-xr-xTestcases/config19
-rwxr-xr-xTestcases/config.1619
-rwxr-xr-xTestcases/config.int19
-rw-r--r--Testcases/config_obj.py1737
-rw-r--r--Testcases/config_obj.pycbin65439 -> 0 bytes
-rw-r--r--Testcases/config_shell.py379
-rw-r--r--Testcases/config_shell.pycbin12344 -> 0 bytes
-rw-r--r--Testcases/configuration.md666
-rw-r--r--Testcases/openstackrc6
-rw-r--r--Testcases/openstackrc.int6
-rw-r--r--Testcases/vnc_api/__init__.py0
-rw-r--r--Testcases/vnc_api/__init__.pycbin139 -> 0 bytes
-rw-r--r--Testcases/vnc_api/common/__init__.py50
-rw-r--r--Testcases/vnc_api/common/__init__.pycbin2573 -> 0 bytes
-rw-r--r--Testcases/vnc_api/common/exceptions.py133
-rw-r--r--Testcases/vnc_api/common/exceptions.pycbin6605 -> 0 bytes
-rw-r--r--Testcases/vnc_api/common/rest.py41
-rw-r--r--Testcases/vnc_api/common/rest.pycbin1373 -> 0 bytes
-rw-r--r--Testcases/vnc_api/gen/__init__.py0
-rw-r--r--Testcases/vnc_api/gen/__init__.pycbin143 -> 0 bytes
-rw-r--r--Testcases/vnc_api/gen/cfixture.py46
-rw-r--r--Testcases/vnc_api/gen/cfixture.pycbin2639 -> 0 bytes
-rw-r--r--Testcases/vnc_api/gen/connection_drv_gen.py2529
-rw-r--r--Testcases/vnc_api/gen/connection_drv_gen.pycbin125734 -> 0 bytes
-rw-r--r--Testcases/vnc_api/gen/generatedssuper.py226
-rw-r--r--Testcases/vnc_api/gen/generatedssuper.pycbin10071 -> 0 bytes
-rw-r--r--Testcases/vnc_api/gen/resource_client.py10026
-rw-r--r--Testcases/vnc_api/gen/resource_client.pycbin325404 -> 0 bytes
-rw-r--r--Testcases/vnc_api/gen/resource_common.py15559
-rw-r--r--Testcases/vnc_api/gen/resource_common.pycbin523743 -> 0 bytes
-rw-r--r--Testcases/vnc_api/gen/resource_server.py2161
-rw-r--r--Testcases/vnc_api/gen/resource_server.pycbin83009 -> 0 bytes
-rw-r--r--Testcases/vnc_api/gen/resource_test.py5173
-rw-r--r--Testcases/vnc_api/gen/resource_test.pycbin195282 -> 0 bytes
-rw-r--r--Testcases/vnc_api/gen/resource_xsd.py18494
-rw-r--r--Testcases/vnc_api/gen/resource_xsd.pycbin1012898 -> 0 bytes
-rw-r--r--Testcases/vnc_api/gen/vnc_api_client_gen.py5354
-rw-r--r--Testcases/vnc_api/gen/vnc_api_client_gen.pycbin204398 -> 0 bytes
-rw-r--r--Testcases/vnc_api/gen/vnc_api_extension_gen.py2469
-rw-r--r--Testcases/vnc_api/gen/vnc_api_extension_gen.pycbin93716 -> 0 bytes
-rw-r--r--Testcases/vnc_api/gen/vnc_api_schema.py192
-rw-r--r--Testcases/vnc_api/gen/vnc_api_schema.pycbin7184 -> 0 bytes
-rw-r--r--Testcases/vnc_api/gen/vnc_api_server_gen.py26142
-rw-r--r--Testcases/vnc_api/gen/vnc_api_server_gen.pycbin565409 -> 0 bytes
-rw-r--r--Testcases/vnc_api/gen/vnc_api_test_gen.py182
-rw-r--r--Testcases/vnc_api/gen/vnc_api_test_gen.pycbin11187 -> 0 bytes
-rw-r--r--Testcases/vnc_api/gen/vnc_cassandra_client_gen.py20248
-rw-r--r--Testcases/vnc_api/gen/vnc_cassandra_client_gen.pycbin457503 -> 0 bytes
-rw-r--r--Testcases/vnc_api/gen/vnc_ifmap_client_gen.py10533
-rw-r--r--Testcases/vnc_api/gen/vnc_ifmap_client_gen.pycbin260656 -> 0 bytes
-rw-r--r--Testcases/vnc_api/vnc_api.py682
-rw-r--r--Testcases/vnc_api/vnc_api.pycbin21621 -> 0 bytes
233 files changed, 0 insertions, 134099 deletions
diff --git a/Testcases/RunTests.sh b/Testcases/RunTests.sh
deleted file mode 100755
index 4c57ae8..0000000
--- a/Testcases/RunTests.sh
+++ /dev/null
@@ -1,125 +0,0 @@
-#!/bin/bash
-# FUNCTEST_REPO_DIR=/tmp
-# Setup environment
-export IMAGE_DIR="$FUNCTEST_REPO_DIR/ovno/images"
-export TESTCASE_DIR="$FUNCTEST_REPO_DIR/ovno/Testcases"
-export CIRROS_IMAGE=Cirros.img
-export FW_IMAGE="$FUNCTEST_REPO_DIR/ovno/images/OpenWRT-LRM--fw-v3.img"
-export RESULTS_DIR="$FUNCTEST_REPO_DIR/ovno/Testresults/results.`date +%Y-%m-%d:%H:%M:%S`"
-export RESULTS_FILE="$RESULTS_DIR/ocl_results"
-export ERR_FILE="$RESULTS_DIR/ocl__err"
-export TEST_ERR=/tmp/test_err
-mkdir -p $RESULTS_DIR
-
-
-check_test(){
-echo $test_desc
-if [ $($test > $TEST_ERR) ]
-then
- echo "Success $test_name" >> $RESULTS_FILE
-else
- echo "FAIL $test_name" >> $ERR_FILE
- cat $TEST_ERR >> $ERR_FILE
-fi
-}
-#------------------------------------------------------------------------------#
-# Go to the where the scripts are
-
-cd $TESTCASE_DIR
-
-export API_IP=`echo $OS_AUTH_URL | cut -d'/' -f3`
-
-export PATH=.:$PATH
-
-mkdir $LOG_DIR
-
-#___________________________________________________________________
-#Load images into OpenStack
-
-
-# Get the Cirros image onto jump/test server, then load into OpenStack
-glance image-create --name Cirros --file $IMAGE_DIR/$CIRROS_IMAGE --disk-format qcow2 --container-format bare --owner $TENANT_ID
-
-# Get firewall image
-glance image-create --name fw --file $IMAGE_DIR/$FW_IMAGE --disk-format qcow2 --container-format bare --owner $TENANT_ID
-
-
-export NET1="192.168.1.0/24"
-export NET2="192.168.2.0/24"
-export NET3="192.168.3.0/24"
-export NET_PUBLIC="10.8.10.0/24"
-
-
-# Set up the test list
-cat<<EOF | sed -e "/#/d" > Test_V4
-Add IP address manager|add_ipam|config add ipam ipam
-Add network Net1|add_network_Net1|config add network N1 --ipam ipam --subnet $NET1
-Add VM V11|add_VM_V11|config add vm V11 --image Cirros --flavor m1.tiny --network Net1
-Add VM V12|add_VM_V12|config add vm V12 --image Cirros --flavor m1.tiny --network Net1
-#Check V11 to V12 connectivity!! Still to be implemented !!
-Add policy Net2-Net3|add_plcy_Net2-Net3|config add policy Net2-Net3 --rule src-net=Net1,dst-net=Net2config add network Net2 --ipam ipam --subnet 192.168.2.0/24 --policy Net2-Net3
-Add network Net2|add_net_Net2|config add network Net2 --ipam ipam --subnet 192.168.2.0/24 --policy Net2-Net3
-Add network Net3|add_net_Net3|config add network Net3 --ipam ipam --subnet 192.168.2.0/24 --policy Net2-Net3
-Add VM V21|add_VM_VM21|config add vm V21 --image Cirros --flavor m1.tiny --network Net2
-Add VM V31|add_VM_VM31|config add vm V31 --image Cirros --flavor m1.tiny --network Net3
-#Check V21 to V31 connectivity !! Still to be implemented !!
-Add svc tmplt fw-l3|add_ST_fw-l3|config add service-template fw-l3 --mode in-network --type firewall --image fw --flavor m1.medium --interface left --interface right --interface management
-Add service inst fw|add_svc_fw|config add service-instance fw --template fw-l3 --network network=left,tenant=$TENANT --network network=right,tenant=$TENANT --network=auto,tenant=$TENANT
-Add svc chain policy|add_svc_chain_plcy|config add policy left-right --rule src-net=left,dst-net=right --action service --service fw
-Add network left|add_network_left|config add network left --ipam ipam --subnet $NET2 --policy left-right
-Add network right|add_network_right|config add network right --ipam ipam --subnet $NET3 --policy left-right
-Add VM VL1|add_VM_VL1|config add vm VL1 --image Cirros --flavor m1.tiny --network left
-Add VM VR1|add_VM_VR1|config add vm VR1 --image Cirros --flavor m1.tiny --network right
-#Check V21 to V31 connectivity !! Still to be implemented !!
-Add network public|add_net_public|config add network public --ipam ipam-default --sbunet $NET_PUBLIC --route-target 64512:10000
-Add floating IP pool|add_float_ip_pool|config add floating-ip-pool public-pool --network public
-Add floating IP to VM|add_float_ip_vm|config add vm-interface server|V11 --floating-ip --floating-ip-pool public-pool
-# Check external connectivity to V11 !! Still to be implemented !!
-Clean up v4|clean_up_v4|cleanup v4
-EOF
-
-while IFS='|' read test_desc test_name test
-do
- check_test
-done <Test_V4
-
-# IPv6 tests
-export NET1="xx"
-export NET2="yy"
-export NET3="zz"
-export NET_PUBLIC="aa"
-
-cat<<EOF | sed -e "/#/d" > Test_V6
-V6 IP V6ress manager|V6_ipam|config add ipam ipam
-V6 network Net1|V6_network_Net1|config add network N1 --ipam ipam --subnet $NET1
-V6 VM V11|V6_VM_V11|config add vm V11 --image Cirros --flavor m1.tiny --network Net1
-V6 VM V12|V6_VM_V12|config add vm V12 --image Cirros --flavor m1.tiny --network Net1
-# Check V11 to V12 connectivity!! Still to be implemented !!
-V6 policy Net2-Net3|V6_plcy_Net2-Net3| config add policy Net2-Net3 --rule src-net=Net1,dst-net=Net2config V6 network Net2 --ipam ipam --subnet 192.168.2.0/24 --policy Net2-Net3
-V6 network Net2|V6_net_Net2|config add network Net2 --ipam ipam --subnet 192.168.2.0/24 --policy Net2-Net3
-V6 network Net3|V6_net_Net3|config add network Net3 --ipam ipam --subnet 192.168.2.0/24 --policy Net2-Net3
-V6 VM V21|V6_VM_VM21|config add vm V21 --image Cirros --flavor m1.tiny --network Net2
-V6 VM V31|V6_VM_VM31|config add vm V31 --image Cirros --flavor m1.tiny --network Net3
-# Check V21 to V31 connectivity !! Still to be implemented !!
-V6 svc tmplt fw-l3|V6_ST_fw-l3|config add service-template fw-l3 --mode in-network --type firewall --image fw --flavor m1.medium --interface left --interface right --interface management
-V6 service inst fw|V6_svc_fw|config add service-instance fw --template fw-l3 --network network=left,tenant=$TENANT --network network=right,tenant=$TENANT --network=auto,tenant=$TENANT
-V6 svc chain policy|V6_svc_chain_plcy|config add policy left-right --rule src-net=left,dst-net=right --action service --service fw
-V6 network left|V6_network_left|config add network left --ipam ipam --subnet $NET2 --policy left-right
-V6 network right|V6_network_right|config add network right --ipam ipam --subnet $NET3 --policy left-right
-V6 VM VL1|V6_VM_VL1|config add vm VL1 --image Cirros --flavor m1.tiny --network left
-V6 VM VR1|V6_VM_VR1|config add vm VR1 --image Cirros --flavor m1.tiny --network right
-# Check V21 to V31 connectivity !! Still to be implemented !!
-V6 network public|V6_net_public|config add network public --ipam ipam-default --sbunet $NET_PUBLIC --route-target 64512:10000
-V6 floating IP pool|V6_float_ip_pool|config add floating-ip-pool public-pool --network public
-V6 floating IP to VM|V6_float_ip_vm|config add vm-interface server|V11 --floating-ip --floating-ip-pool public-pool
-# Check external connectivity to V11 !! Still to be implemented !!
-Clean up v6|clean_up_v6|cleanup v6
-EOF
-
-while IFS='|' read test_desc test_name test
-do
- check_test
-done <Test_V6
-
-
-# After this send results to database
diff --git a/Testcases/Test_V4 b/Testcases/Test_V4
deleted file mode 100644
index 11890f6..0000000
--- a/Testcases/Test_V4
+++ /dev/null
@@ -1,2 +0,0 @@
-one|this one|echo "ok"
-two|that one|cho "not ok"
diff --git a/Testcases/cfgm_common/__init__.py b/Testcases/cfgm_common/__init__.py
deleted file mode 100644
index feaf215..0000000
--- a/Testcases/cfgm_common/__init__.py
+++ /dev/null
@@ -1,50 +0,0 @@
-#
-# Copyright (c) 2013 Juniper Networks, Inc. All rights reserved.
-#
-
-import sys
-import re
-
-IP_FABRIC_VN_FQ_NAME = ['default-domain', 'default-project', 'ip-fabric']
-IP_FABRIC_RI_FQ_NAME = IP_FABRIC_VN_FQ_NAME + ['__default__']
-LINK_LOCAL_VN_FQ_NAME = ['default-domain', 'default-project', '__link_local__']
-LINK_LOCAL_RI_FQ_NAME = LINK_LOCAL_VN_FQ_NAME + ['__link_local__']
-SG_NO_RULE_NAME = "__no_rule__"
-SG_NO_RULE_FQ_NAME = ['default-domain', 'default-project', SG_NO_RULE_NAME]
-
-BGP_RTGT_MIN_ID = 8000000
-SGID_MIN_ALLOC = 8000000
-
-def obj_to_json(obj):
- return dict((k, v) for k, v in obj.__dict__.iteritems())
-#end obj_to_json
-
-def json_to_obj(obj):
- pass
-#end json_to_obj
-
-def ignore_exceptions(func):
- def wrapper(*args, **kwargs):
- try:
- return func(*args, **kwargs)
- except Exception as e:
- return None
- return wrapper
-# end ignore_exceptions
-
-_illegal_unichrs = [(0x00, 0x08), (0x0B, 0x0C), (0x0E, 0x1F),
- (0x7F, 0x84), (0x86, 0x9F),
- (0xFDD0, 0xFDDF), (0xFFFE, 0xFFFF)]
-if sys.maxunicode >= 0x10000: # not narrow build
- _illegal_unichrs.extend([(0x1FFFE, 0x1FFFF), (0x2FFFE, 0x2FFFF),
- (0x3FFFE, 0x3FFFF), (0x4FFFE, 0x4FFFF),
- (0x5FFFE, 0x5FFFF), (0x6FFFE, 0x6FFFF),
- (0x7FFFE, 0x7FFFF), (0x8FFFE, 0x8FFFF),
- (0x9FFFE, 0x9FFFF), (0xAFFFE, 0xAFFFF),
- (0xBFFFE, 0xBFFFF), (0xCFFFE, 0xCFFFF),
- (0xDFFFE, 0xDFFFF), (0xEFFFE, 0xEFFFF),
- (0xFFFFE, 0xFFFFF), (0x10FFFE, 0x10FFFF)])
-
-_illegal_ranges = ["%s-%s" % (unichr(low), unichr(high))
- for (low, high) in _illegal_unichrs]
-illegal_xml_chars_RE = re.compile(u'[%s]' % u''.join(_illegal_ranges))
diff --git a/Testcases/cfgm_common/__init__.pyc b/Testcases/cfgm_common/__init__.pyc
deleted file mode 100644
index d5479c4..0000000
--- a/Testcases/cfgm_common/__init__.pyc
+++ /dev/null
Binary files differ
diff --git a/Testcases/cfgm_common/analytics_client.py b/Testcases/cfgm_common/analytics_client.py
deleted file mode 100644
index c5d14a0..0000000
--- a/Testcases/cfgm_common/analytics_client.py
+++ /dev/null
@@ -1,63 +0,0 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
-# Copyright (c) 2014 Cloudwatt
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-#
-# @author: Sylvain Afchain, eNovance.
-
-import requests
-import six
-from six.moves.urllib import parse as urlparse
-
-
-class OpenContrailAPIFailed(Exception):
- pass
-
-
-class Client(object):
- """Opencontrail Base Statistics REST API Client."""
- #TODO: use a pool of servers
-
- def __init__(self, endpoint, data={}):
- self.endpoint = endpoint
- self.data = data
-
- def request(self, path, fqdn_uuid, data=None):
- req_data = dict(self.data)
- if data:
- req_data.update(data)
-
- req_params = self._get_req_params(data=req_data)
-
- url = urlparse.urljoin(self.endpoint, path + fqdn_uuid)
- resp = requests.get(url, **req_params)
-
- if resp.status_code != 200:
- raise OpenContrailAPIFailed(
- ('Opencontrail API returned %(status)s %(reason)s') %
- {'status': resp.status_code, 'reason': resp.reason})
-
- return resp.json()
-
- def _get_req_params(self, data=None):
- req_params = {
- 'headers': {
- 'Accept': 'application/json'
- },
- 'data': data,
- 'allow_redirects': False,
- }
-
- return req_params
diff --git a/Testcases/cfgm_common/analytics_client.pyc b/Testcases/cfgm_common/analytics_client.pyc
deleted file mode 100644
index 0e70438..0000000
--- a/Testcases/cfgm_common/analytics_client.pyc
+++ /dev/null
Binary files differ
diff --git a/Testcases/cfgm_common/buildinfo.py b/Testcases/cfgm_common/buildinfo.py
deleted file mode 100644
index 478b1c1..0000000
--- a/Testcases/cfgm_common/buildinfo.py
+++ /dev/null
@@ -1 +0,0 @@
-build_info = "{\"build-info\" : [{\"build-version\" : \"2.20\", \"build-time\" : \"2015-06-25 07:52:54.221985\", \"build-user\" : \"mganley\", \"build-hostname\" : \"contrail-ec-build16\", \"build-git-ver\" : \"c6744e5\", ";
diff --git a/Testcases/cfgm_common/buildinfo.pyc b/Testcases/cfgm_common/buildinfo.pyc
deleted file mode 100644
index 50c9be7..0000000
--- a/Testcases/cfgm_common/buildinfo.pyc
+++ /dev/null
Binary files differ
diff --git a/Testcases/cfgm_common/dependency_tracker.py b/Testcases/cfgm_common/dependency_tracker.py
deleted file mode 100644
index dafc4f9..0000000
--- a/Testcases/cfgm_common/dependency_tracker.py
+++ /dev/null
@@ -1,50 +0,0 @@
-#
-# Copyright (c) 2014 Juniper Networks, Inc. All rights reserved.
-#
-
-"""
-This file contains implementation of dependency tracker
-for physical router configuration manager
-"""
-
-
-class DependencyTracker(object):
-
- def __init__(self, object_class_map, reaction_map):
- self._reaction_map = reaction_map
- self._object_class_map = object_class_map
- self.resources = {}
- # end __init__
-
- def _add_resource(self, obj_type, obj_uuid):
- if obj_type in self.resources:
- if obj_uuid in self.resources[obj_type]:
- # already visited
- return False
- self.resources[obj_type].append(obj_uuid)
- else:
- self.resources[obj_type] = [obj_uuid]
- return True
- # end _add_resource
-
- def evaluate(self, obj_type, obj, from_type='self'):
- if obj_type not in self._reaction_map:
- return
- if not self._add_resource(obj_type, obj.uuid):
- return
-
- for ref_type in self._reaction_map[obj_type][from_type]:
- ref = getattr(obj, ref_type, None)
- if ref is None:
- refs = getattr(obj, ref_type+'s', [])
- else:
- refs = [ref]
-
- ref_class = self._object_class_map[ref_type]
- for ref in refs:
- ref_obj = ref_class.get(ref)
- if ref_obj is None:
- return
- self.evaluate(ref_type, ref_obj, obj_type)
- # end evaluate
-# end DependencyTracker
diff --git a/Testcases/cfgm_common/dependency_tracker.pyc b/Testcases/cfgm_common/dependency_tracker.pyc
deleted file mode 100644
index 259b394..0000000
--- a/Testcases/cfgm_common/dependency_tracker.pyc
+++ /dev/null
Binary files differ
diff --git a/Testcases/cfgm_common/exceptions.py b/Testcases/cfgm_common/exceptions.py
deleted file mode 100644
index d9723a4..0000000
--- a/Testcases/cfgm_common/exceptions.py
+++ /dev/null
@@ -1,133 +0,0 @@
-#
-# Copyright (c) 2013 Juniper Networks, Inc. All rights reserved.
-#
-# Base class of all exceptions in VNC
-
-
-class VncError(Exception):
- pass
-# end class VncError
-
-class ServiceUnavailableError(VncError):
- def __init__(self, code):
- self._reason_code = code
- # end __init__
-
- def __str__(self):
- return 'Service unavailable time out due to: %s' % (str(self._reason_code))
- # end __str__
-# end class ServiceUnavailableError
-
-class TimeOutError(VncError):
- def __init__(self, code):
- self._reason_code = code
- # end __init__
-
- def __str__(self):
- return 'Timed out due to: %s' % (str(self._reason_code))
- # end __str__
-# end class TimeOutError
-
-
-class BadRequest(Exception):
- def __init__(self, status_code, content):
- self.status_code = status_code
- self.content = content
- # end __init__
-
- def __str__(self):
- return 'HTTP Status: %s Content: %s' % (self.status_code, self.content)
- # end __str__
-# end class BadRequest
-
-
-class NoIdError(VncError):
-
- def __init__(self, unknown_id):
- self._unknown_id = unknown_id
- # end __init__
-
- def __str__(self):
- return 'Unknown id: %s' % (self._unknown_id)
- # end __str__
-# end class NoIdError
-
-
-class MaxRabbitPendingError(VncError):
-
- def __init__(self, npending):
- self._npending = npending
- # end __init__
-
- def __str__(self):
- return 'Too many pending updates to RabbitMQ: %s' % (self._npending)
- # end __str__
-# end class MaxRabbitPendingError
-
-class ResourceExistsError(VncError):
- def __init__(self, eexists_fq_name, eexists_id):
- self._eexists_fq_name = eexists_fq_name
- self._eexists_id = eexists_id
- # end __init__
-
- def __str__(self):
- return 'FQ Name: %s exists already with ID: %s' \
- % (self._eexists_fq_name, self._eexists_id)
- # end __str__
-# end class ResourceExistsError
-
-class ResourceTypeUnknownError(VncError):
- def __init__(self, obj_type):
- self._unknown_type = obj_type
- # end __init__
-
- def __str__(self):
- return 'Unknown object type: %s' %(self._unknown_type)
- # end __str__
-# end class ResourceTypeUnknownError
-
-class PermissionDenied(VncError):
- pass
-# end class PermissionDenied
-
-
-class RefsExistError(VncError):
- pass
-# end class RefsExistError
-
-
-class ResourceExhaustionError(VncError):
- pass
-# end class ResourceExhaustionError
-
-
-class NoUserAgentKey(VncError):
- pass
-# end class NoUserAgentKey
-
-
-class UnknownAuthMethod(VncError):
- pass
-# end class UnknownAuthMethod
-
-
-class HttpError(VncError):
-
- def __init__(self, status_code, content):
- self.status_code = status_code
- self.content = content
- # end __init__
-
- def __str__(self):
- return 'HTTP Status: %s Content: %s' % (self.status_code, self.content)
- # end __str__
-# end class HttpError
-
-
-class AmbiguousParentError(VncError):
- pass
-
-
-class InvalidSessionID(VncError):
- pass
-# end InvalidSessionID
diff --git a/Testcases/cfgm_common/exceptions.pyc b/Testcases/cfgm_common/exceptions.pyc
deleted file mode 100644
index 57f9545..0000000
--- a/Testcases/cfgm_common/exceptions.pyc
+++ /dev/null
Binary files differ
diff --git a/Testcases/cfgm_common/ifmap/__init__.py b/Testcases/cfgm_common/ifmap/__init__.py
deleted file mode 100644
index 189d225..0000000
--- a/Testcases/cfgm_common/ifmap/__init__.py
+++ /dev/null
@@ -1,29 +0,0 @@
-#!/usr/bin/python
-#
-# Copyright 2011, Infoblox, All Rights Reserved
-#
-# Open Source, see LICENSE
-#
-
-"""
-ifmap-python client is an implementation of the TCG IF-MAP 2.0 protocol as a client library.
-"""
-
-import sys
-
-#
-# Project properties
-#
-
-__version__ = '0.1'
-__build__=""
-
-#
-# Exceptions
-#
-class Error(Exception):
- """
- Base class for exception handling
- """
- def __init__(self, msg):
- Exception.__init__(self, "Error: '%s'" % msg)
diff --git a/Testcases/cfgm_common/ifmap/__init__.pyc b/Testcases/cfgm_common/ifmap/__init__.pyc
deleted file mode 100644
index 263aa50..0000000
--- a/Testcases/cfgm_common/ifmap/__init__.pyc
+++ /dev/null
Binary files differ
diff --git a/Testcases/cfgm_common/ifmap/client.py b/Testcases/cfgm_common/ifmap/client.py
deleted file mode 100644
index d4d4df8..0000000
--- a/Testcases/cfgm_common/ifmap/client.py
+++ /dev/null
@@ -1,263 +0,0 @@
-#!/usr/bin/python
-#
-# Copyright 2011, Infoblox, All Rights Reserved
-#
-# Open Source, see LICENSE
-#
-
-from _ssl import PROTOCOL_SSLv3, PROTOCOL_SSLv23, PROTOCOL_TLSv1
-import gevent
-import geventhttpclient
-from geventhttpclient import HTTPClient
-
-import urllib
-
-import base64
-import cStringIO
-import sys
-
-
-from logging import getLogger
-
-log = getLogger(__name__) # when imported, the logger will be named "ifmap.client"
-
-# Import either httplib2 or urllib2 and map to same name
-try:
- import httplib2 as http_client_lib
- Http = http_client_lib.Http
- HttpException = http_client_lib.HttpLib2Error
-except ImportError:
- import urllib2 as http_client_lib
- HttpException = (http_client_lib.URLError, http_client_lib.HTTPError)
- class Http(): # wrapper to use when httplib2 not available
- def request(self, url, method, body, headers):
- f = http_client_lib.urlopen(http_client_lib.Request(url, body, headers))
- return f.info(), f.read()
-
-#import urllib2 as http_client_lib
-#class Http(): # wrapper to use when httplib2 not available
-# def request(self, url, method, body, headers):
-# f = http_client_lib.urlopen(http_client_lib.Request(url, body, headers))
-# return f.info(), f.read()
-
-namespaces = {
- 'env' : "http://www.w3.org/2003/05/soap-envelope",
- 'ifmap' : "http://www.trustedcomputinggroup.org/2010/IFMAP/2",
- 'meta' : "http://www.trustedcomputinggroup.org/2010/IFMAP-METADATA/2"
-}
-
-# NOTE(sahid): It seems that the geventhttpclient uses gevent.queue.LifoQueue
-# to maintain a pool of connections and according to the doc it is possible
-# to configure the maxsize of the queue with None or a value less than 0 to
-# set the number of connections ulimited otherwise It is actually not possible
-# to set it to None or less than 0 since lock.BoundedSemaphore will return an
-# exception. https://github.com/gwik/geventhttpclient/blob/master/src/geventhttpclient/connectionpool.py#L37
-concurrency = 1 # arbitrary value since it is not possible to use ulimited.
-
-class AsyncReadWrapper(object):
- """ Perform the socket read in a separate greenlet """
- def __init__(self, request):
- self._greenlet = gevent.spawn(self.AsyncRead, request)
- self._content = None
-
- def AsyncRead(self, request):
- self._content = request.read()
-
- def __str__(self, *args, **kwargs):
- self._greenlet.join()
- return self._content
-
- def __repr__(self, *args, **kwargs):
- self._greenlet.join()
- return self._content
-
-class client:
- """
- IF-MAP client
- """
-
- __url = None
- __session_id = None
- __publisher_id = None
- __last_sent = None
- __last_received = None
- __namespaces = None
- __ssl_options = {
- 'cert_reqs' : gevent.ssl.CERT_NONE,
- 'ssl_version' : PROTOCOL_SSLv23,
- }
- if sys.version_info >= (2,7):
- __ssl_options['ciphers'] = "RC4-SHA"
-
- __envelope ="""<?xml version="1.0" encoding="UTF-8"?>
-<env:Envelope xmlns:env="http://www.w3.org/2003/05/soap-envelope" %(ns)s>
- <env:Body>
- %(body)s
- </env:Body>
-</env:Envelope>
-"""
-
- def __init__(self, url, user=None, password=None, namespaces={}, ssl_opts=None):
- if user and password:
-# self.__password_mgr=http_client_lib.HTTPPasswordMgrWithDefaultRealm()
-# self.__password_mgr.add_password(None, url, user, password)
-# handler = http_client_lib.HTTPBasicAuthHandler(self.__password_mgr)
-# opener = http_client_lib.build_opener(handler)
-# http_client_lib.install_opener(opener)
-
- #pycurl.global_init(pycurl.GLOBAL_SSL)
-
- pass
-
- #if namespaces:
- self.__namespaces = namespaces
- if ssl_opts:
- self.__ssl_options.update(ssl_opts)
-
- self.__url = url
- self.__username = user
- self.__password = password
- try:
- self._http = HTTPClient(*self.__url, ssl = True,
- connection_timeout = None,
- network_timeout = None,
- ssl_options = self.__ssl_options,
- insecure = True,
- concurrency = concurrency)
- except TypeError:
- self._http = HTTPClient(*self.__url, ssl = True,
- connection_timeout = None,
- network_timeout = None,
- ssl_options = self.__ssl_options,
- concurrency = concurrency)
-
-
- def last_sent(self):
- return self.__last_sent
-
- def last_received(self):
- return self.__last_received
-
- def envelope(self, body) :
- _ns = ""
- for ns_prefix, ns_uri in self.__namespaces.items():
- #if ns_prefix == "env": break # don't add the envelope namespace again
- if ns_prefix == "env": continue # don't add the envelope namespace again
- _ns += "xmlns:"+ns_prefix+'="'+ns_uri+'" '
- return str(self.__envelope % {'body':body, 'ns': _ns})
-
- def call(self, method, body):
- xml = self.envelope(body)
- #headers={
- # 'Content-type': 'text/xml; charset="UTF-8"',
- # 'Content-length': str(len(xml)),
- # "SOAPAction": '"%s"' % (method),
- #}
-
- base64string = base64.encodestring('%s:%s' % (self.__username, self.__password)).replace('\n', '')
- # pycurl
- #headers=[
- # 'Content-type: text/xml; charset="UTF-8"',
- # 'Content-length: %s' %(str(len(xml))),
- # 'Authorization : Basic %s' %(base64string),
- # 'SOAPAction: %s' % (method),
- #]
-
- # geventhttp
- headers={
- 'Content-type': 'text/xml; charset="UTF-8"',
- 'Content-length': '%s' %(str(len(xml))),
- 'Authorization': 'Basic %s' %(base64string),
- 'SOAPAction': '%s' % (method),
- }
-
- try:
- log.info("sending IF-MAP message to server")
- log.debug("======== sending IF-MAP message ========")
- log.debug("\n%s\n", xml)
- log.debug("======== /sending IF-MAP message ========")
-
- #response, content = self.http.request(self.__url,"POST", body=xml, headers=headers )
-
- #self.http = pycurl.Curl()
- #self.http.setopt(pycurl.URL, self.__url)
- #self.http.setopt(pycurl.HTTPHEADER, headers)
- #self.http.setopt(pycurl.POSTFIELDS, xml)
- #self.http.setopt(pycurl.VERBOSE, True)
- #self.http.setopt(pycurl.SSL_VERIFYPEER, 0)
- #self.http.setopt(pycurl.SSL_VERIFYHOST, 0)
- #content = cStringIO.StringIO()
- #self.http.setopt(pycurl.WRITEFUNCTION,
- # content.write)
- #self.http.perform()
-
- #self.http = HTTPClient(*self.__url, ssl = True,
- # ssl_options = {'cert_reqs': gevent.ssl.CERT_NONE,
- # 'ssl_version': PROTOCOL_SSLv3})
- #response = self.http.post('/', body = xml, headers = headers)
- response = self._http.post('/', body = xml, headers = headers)
- content = response.read()
-
- self.__last_sent = xml
-
- #self.__last_received = content
- #pycurl self.__last_received = content.getvalue()
- self.__last_received = content
-
- log.debug("======== received IF-MAP response ========")
- #log.debug("\n%s\n", content)
- #pycurl log.debug("\n%s\n", content.getvalue())
- log.debug("\n%s\n", content)
- log.debug("======== /receive IF-MAP response ========")
-
- #return content
- #pycurl return content.getvalue()
- return content
-
- except HttpException, e:
- log.error("HTTP Connection error in IF-MAP client: %s", e.reason)
- except Exception as e:
- log.error("Uknown error sending IF-MAP message to server %s", str(e))
- raise
-
- def call_async_result(self, method, body):
- xml = self.envelope(body)
- base64string = base64.encodestring('%s:%s' % (self.__username, self.__password)).replace('\n', '')
-
- # geventhttp
- headers={
- 'Content-type': 'text/xml; charset="UTF-8"',
- 'Content-length': '%s' %(str(len(xml))),
- 'Authorization': 'Basic %s' %(base64string),
- 'SOAPAction': '%s' % (method),
- }
-
- try:
- response = self._http.post('/', body = xml, headers = headers)
- content = AsyncReadWrapper(response)
-
- return content
-
- except HttpException, e:
- log.error("HTTP Connection error in IF-MAP client: %s", e.reason)
- except:
- log.error("Uknown error sending IF-MAP message to server")
- raise
-
- def set_session_id(self, session_id):
- self.__session_id = session_id
-
- def set_publisher_id(self, publisher_id):
- self.__publisher_id = publisher_id
-
- def get_session_id(self):
- return self.__session_id
-
- def get_publisher_id(self):
- return self.__publisher_id
-
-
-if __name__ == "__main__":
- print """The ifmap client library is not meant to be run from the command line or python interpreter
-- you should use it by including it in your python software. See testmap.py for an example.
-Hint: add this line to use the library - 'from ifmap import ifmapClient' """
diff --git a/Testcases/cfgm_common/ifmap/client.pyc b/Testcases/cfgm_common/ifmap/client.pyc
deleted file mode 100644
index 3670f20..0000000
--- a/Testcases/cfgm_common/ifmap/client.pyc
+++ /dev/null
Binary files differ
diff --git a/Testcases/cfgm_common/ifmap/id.py b/Testcases/cfgm_common/ifmap/id.py
deleted file mode 100644
index 7a71d51..0000000
--- a/Testcases/cfgm_common/ifmap/id.py
+++ /dev/null
@@ -1,191 +0,0 @@
-#
-# Copyright 2011, Infoblox, All Rights Reserved
-#
-# Open Source, see LICENSE
-
-# Module with ID factories for creating IF-MAP Identifiers.
-# Identifiers are used, for example, when publishing to an IF-MAP server, to represent an IP address.
-# The XML for such the IP address identifier would be generated by ifmap.id.IPAddress
-# example:
-# >>> print ifmap.id.IPAdress('10.0.0.1')
-
-from util import attr
-
-class ifmapIDFactory:
- pass
-
-
-class IPAddress(ifmapIDFactory):
- """
- XML Factory for an IP Address IF-MAP Identifier
- """
- def __init__(self, ip_address, type=None, administrative_domain=None):
- self.__ip_address = ip_address
- self.__type = type
- self.__administrative_domain = administrative_domain
-
- def administrative_domain(self):
- return self.__administrative_domain
-
- def ip_address(self):
- return self.__ip_address
-
- def type(self):
- return self.__type
-
- def __str__(self):
- _attr = attr({'value':self.__ip_address,'type':self.__type,'administrative-domain':self.__administrative_domain})
- return '<ip-address %s' % _attr + '/>'
-
-class MACAddress(ifmapIDFactory):
- """
- XML Factory for a MAC Address IF-MAP Identifier
- """
-
- def __init__(self, mac_address, administrative_domain=None):
- self.__mac_address = mac_address
- self.__administrative_domain = administrative_domain
- return None;
-
- def administrative_domain(self):
- return self.__administrative_domain
-
- def mac_address(self):
- return self.__mac_address
-
- def __str__(self):
- _attr = attr({'value':self.__mac_address,'administrative-domain':self.__administrative_domain})
- return '<mac-address %s' % _attr + '/>'
-
-
-class Device(ifmapIDFactory):
- """
- XML Factory for a Device IF-MAP Identifier
- """
-
- def __init__(self, name, aik_name=None):
- self.__name = name
- self.__aik_name = aik_name
- return None;
-
- def aik_name(self):
- return self.__aik_name
-
- def name(self):
- return self.__name
-
- def __str__(self):
- self.__XML = "<device>"
- self.__XML += '<name>'+self.__name+'</name>'
- # aik_name is optional
- if self.__aik_name:
- self.__XML += '<aik-name>'+self.__aik_name+'<aik-name>'
- self.__XML += "</device>"
- return self.__XML
-
-class AccessRequest(ifmapIDFactory):
- """
- XML Factory for an Access Request IF-MAP Identifier
- """
-
- def __init__(self, name, administrative_domain=None):
- self.__name = name
- self.__administrative_domain = administrative_domain
- return None;
-
- def administrative_domain(self):
- return self.__administrative_domain
-
- def name(self):
- return self.__name
-
- def __str__(self):
- self.__XML = "<access-request"
- self.__XML += ' name="'+self.__name+'"'
- # administrative_domain is optional
- if self.__administrative_domain:
- self.__XML += ' administrative-domain="'+self.__administrative_domain+'"'
- self.__XML += " />"
- return self.__XML
-
-class Identity(ifmapIDFactory):
- """
- XML Factory for an IF-MAP Identifier
- """
-
- def __init__(self, name, type=None, other_type=None, administrative_domain=None):
- self.__name = name # required
- self.__type = type # "aik_name"|"distinguished_name"|"dns_name"|"email_address"|"kerberos_principal"|"username"|"sip_uri"|"tel_uri"|"hip_hit"|"other"
- self.__other_type = other_type # vendor-specific type
- self.__administrative_domain = administrative_domain
- return None;
-
- def administrative_domain(self):
- return self.__administrative_domain
-
- def name(self):
- return self.__name
-
- def type(self):
- return self.__type
-
- def other_type(self):
- return self.__other_type
-
- def __str__(self):
- self.__XML = "<identity"
- self.__XML += ' name="'+self.__name+'"'
- # type and administrative_domain are optional
- if self.__type:
- self.__XML +=' type="'+self.__type+'"'
- if self.__other_type:
- self.__XML +=' other-type-definition="'+self.__other_type+'"'
- if self.__administrative_domain:
- self.__XML += ' administrative-domain="'+self.__administrative_domain+'"'
- self.__XML += " />"
- return self.__XML
-
-
-class CustomIdentity(ifmapIDFactory):
- """
- XML Factory for an Custom IF-MAP Identifier with namespace prefix or URL
- """
-
- def __init__(self, name, ns_prefix="", namespace="", attributes=None):
- self.__name = name # required
- self.__ns_prefix = ns_prefix # see ifmap.namespaces
- self.__namespace = namespace # a namespace IRI/URI
- self.__attributes = attributes # additional attributes in a dictionary (eg. {key1: value1, key2: value2})
- return None;
-
- def attributes(self):
- return self.__attributes
-
- def name(self):
- return self.__name
-
- def ns_prefix(self):
- return self.__ns_prefix
-
- def namespace(self):
- return self.__namespace
-
- def __str__(self):
- self.__XML = "<custom-identifier>"
-
-
- if self.__ns_prefix:
- self.__ns_prefix = self.__ns_prefix +':'
-
- self.__XML += '<'+self.__ns_prefix+self.__name
-
- if self.__namespace:
- self.__namespace=' xlmns='+self.__ns_prefix+self.__namespace
-
- self.__XML += self.__namespace
-
- if self.__attributes and (type(self.__attributes) == type({})) and self.__attributes.items():
- for key, attribute in self.__attributes.items():
- self.__XML += ' '+key+'="'+attribute+'"'
- self.__XML += " /></custom-identifier>"
- return self.__XML
diff --git a/Testcases/cfgm_common/ifmap/id.pyc b/Testcases/cfgm_common/ifmap/id.pyc
deleted file mode 100644
index ed8e87f..0000000
--- a/Testcases/cfgm_common/ifmap/id.pyc
+++ /dev/null
Binary files differ
diff --git a/Testcases/cfgm_common/ifmap/metadata.py b/Testcases/cfgm_common/ifmap/metadata.py
deleted file mode 100644
index 17f4515..0000000
--- a/Testcases/cfgm_common/ifmap/metadata.py
+++ /dev/null
@@ -1,36 +0,0 @@
-# Copyright 2011, Infoblox, All Rights Reserved
-#
-# Open Source, see LICENSE
-#
-from util import attr, link_ids
-
-class MetadataBase:
- """ foundation class for metadata factory """
- pass
-
-class Metadata(MetadataBase):
- """
- Metadata factory
- """
- __ns_uri = ''
-
- def __init__(self, name, value=None, attributes=None, ns_prefix=None, ns_uri=None, elements=''):
- self.__value = value
- self.__attributes = attributes
- self.__elements = elements
-
- if ns_prefix:
- self.__name = ns_prefix + ':' + name
- elif not ns_uri:
- self.__name = 'meta:' + name
-
- if ns_uri:
- if ns_prefix:
- self.__ns_uri = ' xmlns:' + ns_prefix + '="' + ns_uri + '"'
- else:
- self.__ns_uri = ' xmlns="' + ns_uri + '"'
-
- def __str__(self):
- __attr = ' '+ attr(self.__attributes)
- return '<metadata><' + self.__name + self.__ns_uri + __attr + '>' + self.__value + self.__elements + '</' + self.__name + '></metadata>'
-
diff --git a/Testcases/cfgm_common/ifmap/metadata.pyc b/Testcases/cfgm_common/ifmap/metadata.pyc
deleted file mode 100644
index 7c4ec60..0000000
--- a/Testcases/cfgm_common/ifmap/metadata.pyc
+++ /dev/null
Binary files differ
diff --git a/Testcases/cfgm_common/ifmap/operations.py b/Testcases/cfgm_common/ifmap/operations.py
deleted file mode 100644
index c4c2055..0000000
--- a/Testcases/cfgm_common/ifmap/operations.py
+++ /dev/null
@@ -1,75 +0,0 @@
-# Copyright 2011, Infoblox, All Rights Reserved
-#
-# Open Source, see LICENSE
-#
-from util import attr, link_ids
-
-class OperationBase:
- """ foundation class for operation factory """
- pass
-
-class PublishUpdateOperation(OperationBase):
- def __init__(self, id1, metadata, id2=None, lifetime=None):
- self.__id = link_ids(id1, id2)
- self.__metadata = metadata
- self.__lifetime = lifetime
-
- def __str__(self):
- if self.__lifetime:
- _attr = attr({'lifetime':self.__lifetime})
- return '<update %s>' % _attr + self.__id + self.__metadata + '</update>'
- else:
- return '<update>' + self.__id + self.__metadata + '</update>'
-
-class PublishDeleteOperation(OperationBase):
- def __init__(self, id1, id2=None, filter=None):
- self.__id = link_ids(id1, id2)
- self.__filter = filter
-
- def __str__(self):
- if self.__filter:
- _attr = attr({'filter':self.__filter})
- return '<delete %s>' % _attr + self.__id + '</delete>'
- else:
- return '<delete>' + self.__id + '</delete>'
-
-class PublishNotifyOperation(OperationBase):
- def __init__(self, id1, metadata, id2=None):
- self.__id = link_ids(id1, id2)
- self.__metadata = metadata
-
- def __str__(self):
- return '<notify>' + self.__id + self.__metadata + '</notify>'
-
-class SubscribeUpdateOperation(OperationBase):
- """
- SubscribeUpdate factory
- name
- identifier (single, or linked with link_ids())
- search_parameters - dictionary eg. {'max_depth':'3', 'max_size':'10000'}
- result_filter => string, #Optional. Rules for extracting specific data from the results
- match_links => string, #Optional. Filter to match links to be followed, unmatched links will not be followed in the search process
- max_depth => number, #Optional. Maximum distance of any included identifiers. Start depth is equal to 0
- max_size => number, #Optional. Maximum size in bytes of the results
- terminal_identifier_type => string, #Optional. Terminal identifier type of the search request
- """
- def __init__(self, name, identifier, search_parameters={}):
- self.__name = name
- self.__identifier = identifier
- self.__parameters = search_parameters
-
- def __str__(self):
- __attr = attr(self.__parameters)
- return '<update name="'+ self.__name + '" ' + __attr + '>' + self.__identifier +'</update>'
-
-class SubscribeDeleteOperation(OperationBase):
- def __init__(self, name):
- self.__name = name
-
- def __str__(self):
- return '<delete name="'+ self.__name + '" />'
-
-
-
-
- \ No newline at end of file
diff --git a/Testcases/cfgm_common/ifmap/operations.pyc b/Testcases/cfgm_common/ifmap/operations.pyc
deleted file mode 100644
index d07368a..0000000
--- a/Testcases/cfgm_common/ifmap/operations.pyc
+++ /dev/null
Binary files differ
diff --git a/Testcases/cfgm_common/ifmap/request.py b/Testcases/cfgm_common/ifmap/request.py
deleted file mode 100644
index 47bc1f6..0000000
--- a/Testcases/cfgm_common/ifmap/request.py
+++ /dev/null
@@ -1,106 +0,0 @@
-#!/usr/bin/python
-#
-# Copyright 2011, Infoblox, All Rights Reserved
-#
-# Open Source, see LICENSE
-#
-from util import attr
-
-class RequestBase:
- """ foundation class for request factory """
- pass
-
-class NewSessionRequest(RequestBase):
- def __init__(self, max_poll_result=None):
- self.__max_poll_result = max_poll_result
-
- def __str__(self):
- #import pdb; pdb.set_trace()
- return '<ifmap:newSession %s' % (attr({'max-poll-result-size':self.__max_poll_result})) + '/>';
-
-class RenewSessionRequest(RequestBase):
- def __init__(self, session_id):
- self.__session_id = session_id
-
- def __str__(self):
- return '<ifmap:renewSession %s' % (attr({'session-id':self.__session_id})) + '/>';
-
-class EndSessionRequest(RequestBase):
- def __init__(self, session_id):
- self.__session_id = session_id
-
- def __str__(self):
- return '<ifmap:endSession %s' % (attr({'session-id':self.__session_id})) + '/>';
-
-class PublishRequest(RequestBase):
- __session_id = None
- def __init__(self, session_id, operations, namespaces=None, validation=None):
- self.__session_id = session_id
- self.__namespaces = namespaces
- self.__validation = validation
- self.__operations = operations
-
- def __str__(self):
- _attr = attr({'session-id': self.__session_id, 'validation' : self.__validation})
- return '<ifmap:publish %s' % _attr + '>' + self.__operations + '</ifmap:publish>'
-
-class SearchRequest(RequestBase):
- """
- Search request factory
- session_id
- identifier (single, or linked with link_ids())
- namespaces
- validation "None"|"BaseOnly"|"MetadataOnly"|"All"
- search_parameters - dictionary eg. {'max_depth':'3', 'max_size':'10000'}
- result_filter => string, #Optional. Rules for extracting specific data from the results
- match_links => string, #Optional. Filter to match links to be followed, unmatched links will not be followed in the search process
- max_depth => number, #Optional. Maximum distance of any included identifiers. Start depth is equal to 0
- max_size => number, #Optional. Maximum size in bytes of the results
- terminal_identifier_type => string, #Optional. Terminal identifier type of the search request
- """
- def __init__(self, session_id, identifier, namespaces=None, validation=None, search_parameters={}):
- self.__session_id = session_id
- self.__identifier = identifier
- self.__namespaces = namespaces
- self.__validation = validation
- self.__parameters = search_parameters
-
- def __str__(self):
- _params = attr(self.__parameters)
- _attr = attr({'session-id': self.__session_id, 'validation' : self.__validation})
- return '<ifmap:search ' + _attr + _params + '>' + self.__identifier + '</ifmap:search>'
-
-class SubscribeRequest(RequestBase):
- """
- Subscribe request factory
- """
-
- def __init__(self, session_id, validation=None, namespaces=None, operations=None):
- self.__session_id = session_id
- self.__namespaces = namespaces
- self.__validation = validation
- self.__operations = operations
-
- def __str__(self):
- _attr = attr({'session-id': self.__session_id, 'validation' : self.__validation})
- return '<ifmap:subscribe %s' % _attr + '>' + self.__operations + '</ifmap:subscribe>'
-
-class PollRequest(RequestBase):
- def __init__(self, session_id, validation=None, namespaces=None):
- self.__session_id = session_id
- self.__namespaces = namespaces
- self.__validation = validation
-
- def __str__(self):
- _attr = attr({'session-id': self.__session_id, 'validation' : self.__validation})
- return '<ifmap:poll %s' % _attr + '/>'
-
-class PurgeRequest(RequestBase):
- def __init__(self, session_id, publisher_id=None, validation=None):
- self.__session_id = session_id
- self.__publisher_id = publisher_id
- self.__validation = validation
-
- def __str__(self):
- __attr = attr({'session-id':self.__session_id, 'validation':self.__validation,'ifmap-publisher-id':self.__publisher_id})
- return '<ifmap:purgePublisher %s' % __attr + '/>';
diff --git a/Testcases/cfgm_common/ifmap/request.pyc b/Testcases/cfgm_common/ifmap/request.pyc
deleted file mode 100644
index 94537b5..0000000
--- a/Testcases/cfgm_common/ifmap/request.pyc
+++ /dev/null
Binary files differ
diff --git a/Testcases/cfgm_common/ifmap/response.py b/Testcases/cfgm_common/ifmap/response.py
deleted file mode 100644
index 179fd01..0000000
--- a/Testcases/cfgm_common/ifmap/response.py
+++ /dev/null
@@ -1,55 +0,0 @@
-# Copyright 2011, Infoblox, All Rights Reserved
-#
-# Open Source, see LICENSE
-#
-
-from xml.etree import ElementTree
-
-class Response():
- """
- Base class to handle and parse IF-MAP responses
- """
- __xml = ""
-
- def __init__(self, result):
- """
- Take a result string and process it
- """
- if result:
- env = ElementTree.fromstring(result)
- body = env.find('{http://www.w3.org/2003/05/soap-envelope}Body')
- response = body.find('{http://www.trustedcomputinggroup.org/2010/IFMAP/2}response')
- # xml.etree.ElementTree find is broken in python 2.6
- children = response.findall('*')
- if len(children):
- self.__xml = children[0]
-
- def element(self):
- """
- Returns the raw Element object
- """
- return self.__xml
-
- def __str__(self):
- """
- Print the XML tree as a string
- """
- return ElementTree.tostring(self.__xml)
-
-class newSessionResult(Response):
- """
- newSessionResult
- """
- def __init__(self, result):
- #import pdb; pdb.set_trace()
- self.__newSession = Response(result).element()
-
- def get_session_id(self):
- return self.__newSession.attrib['session-id']
-
- def get_publisher_id(self):
- return self.__newSession.attrib['ifmap-publisher-id']
-
-
-
-
diff --git a/Testcases/cfgm_common/ifmap/response.pyc b/Testcases/cfgm_common/ifmap/response.pyc
deleted file mode 100644
index 93710a8..0000000
--- a/Testcases/cfgm_common/ifmap/response.pyc
+++ /dev/null
Binary files differ
diff --git a/Testcases/cfgm_common/ifmap/util.py b/Testcases/cfgm_common/ifmap/util.py
deleted file mode 100644
index e4d06dd..0000000
--- a/Testcases/cfgm_common/ifmap/util.py
+++ /dev/null
@@ -1,34 +0,0 @@
-#!/usr/bin/python
-#
-# Copyright 2011, Infoblox, All Rights Reserved
-#
-# Open Source, see LICENSE
-#
-
-def attr(attributes):
- """
- attr creates an XML string for any attribute that has a value
- attr({'session-id': '2345', 'validation':'metadata'})
- """
- if attributes and (type(attributes) == type({})): # check if it is a dictionary
- __xml = ""
- for label, value in attributes.items():
- if value != None:
- __xml += label + '="' + value + '" '
- return __xml
- else:
- return ''
-
-def link_ids(id1, id2):
- """
- Takes two id arguments.
- Returns XML for id1 or links id1 and id2 together
- """
- if id1 and id2: # Both exist, so link them
- #return '<link>' + id1 + id2 + '</link>'
- return id1 + id2
- else:
- return id1
-
-
-
diff --git a/Testcases/cfgm_common/ifmap/util.pyc b/Testcases/cfgm_common/ifmap/util.pyc
deleted file mode 100644
index bb357e3..0000000
--- a/Testcases/cfgm_common/ifmap/util.pyc
+++ /dev/null
Binary files differ
diff --git a/Testcases/cfgm_common/imid.py b/Testcases/cfgm_common/imid.py
deleted file mode 100644
index 432674d..0000000
--- a/Testcases/cfgm_common/imid.py
+++ /dev/null
@@ -1,344 +0,0 @@
-#
-# Copyright (c) 2013 Juniper Networks, Inc. All rights reserved.
-#
-# This file deals with the ifmap id handling for both vnc-user-visible entities
-# and bgp-visible entities
-
-import uuid
-import re
-import StringIO
-from lxml import etree
-from cfgm_common import exceptions
-from cfgm_common.ifmap.client import client
-from ifmap.request import NewSessionRequest, RenewSessionRequest, \
- EndSessionRequest, PublishRequest, SearchRequest, \
- SubscribeRequest, PurgeRequest, PollRequest
-from ifmap.id import IPAddress, MACAddress, Device, AccessRequest, Identity, \
- CustomIdentity
-from ifmap.operations import PublishUpdateOperation, PublishNotifyOperation, \
- PublishDeleteOperation, SubscribeUpdateOperation,\
- SubscribeDeleteOperation
-from ifmap.util import attr, link_ids
-from ifmap.response import Response, newSessionResult
-from ifmap.metadata import Metadata
-from xml.sax.saxutils import escape as s_esc, unescape as s_unesc
-
-
-_TENANT_GRP = "(?P<tenant_uuid>.*)"
-_VPC_GRP = "(?P<vpc_name>.*)"
-_VN_GRP = "(?P<vn_name>.*)"
-_SG_GRP = "(?P<sg_name>.*)"
-_POL_GRP = "(?P<pol_name>.*)"
-_INST_GRP = "(?P<instance_uuid>.*)"
-_PORT_GRP = "(?P<port_id>.*)"
-
-_TENANT_ID_RE = "contrail:tenant:%s" % (_TENANT_GRP)
-_VPC_NAME_RE = "contrail:network-group:%s:%s" % (_TENANT_GRP, _VPC_GRP)
-_VN_NAME_RE = "contrail:virtual-network:%s:%s:%s" % (
- _TENANT_GRP, _VPC_GRP, _VN_GRP)
-_SG_NAME_RE = "contrail:security-group:%s:%s:%s" % (
- _TENANT_GRP, _VPC_GRP, _SG_GRP)
-_POL_NAME_RE = "contrail:policy:%s:%s:%s" % (_TENANT_GRP, _VPC_GRP, _POL_GRP)
-_INST_ID_RE = "contrail:instance:%s:%s:%s:%s" \
- % (_TENANT_GRP, _VPC_GRP, _VN_GRP, _INST_GRP)
-_PORT_ID_RE = "contrail:port:%s:%s:%s:%s:%s" \
- % (_TENANT_GRP, _VPC_GRP, _VN_GRP, _INST_GRP, _PORT_GRP)
-
-_CT_NS = "contrail"
-_ROOT_IMID = _CT_NS + ":config-root:root"
-
-_SOAP_XSD = "http://www.w3.org/2003/05/soap-envelope"
-_IFMAP_XSD = "http://www.trustedcomputinggroup.org/2010/IFMAP/2"
-_IFMAP_META_XSD = "http://www.trustedcomputinggroup.org/2010/IFMAP-METADATA/2"
-_CONTRAIL_XSD = "http://www.contrailsystems.com/vnc_cfg.xsd"
-
-# Parse ifmap-server returned search results and create list of tuples
-# of (ident-1, ident-2, link-attribs)
-
-
-def parse_result_items(result_items, my_imid=None):
- all_result_list = []
- for r_item in result_items:
- children = r_item.getchildren()
- num_children = len(children)
- if num_children == 1: # ignore ident-only result-items
- continue
- elif num_children == 2:
- result_info = [children[0], None, children[1]]
- elif num_children == 3:
- result_info = [children[0], children[1], children[2]]
- else:
- raise Exception('Result item of length %s not handled!'
- % (num_children))
- all_result_list.append(result_info)
-
- if not my_imid:
- return all_result_list
-
- # strip ones that don't originate from or to my_imid
- filtered_result_list = []
- for (ident_1, ident_2, meta) in all_result_list:
- if (((ident_2 is not None) and (ident_2.attrib['name'] == my_imid)) or
- (ident_1.attrib['name'] == my_imid)):
- if meta is None:
- filtered_result_list.append((ident_1, ident_2, None))
- else:
- # search gives all props under one metadata. expand it.
- for m_elem in meta:
- filtered_result_list.append((ident_1, ident_2, m_elem))
-
- return filtered_result_list
-# end parse_result_items
-
-
-def get_ifmap_id_from_fq_name(type, fq_name):
- my_fqn = ':' + ':'.join(fq_name)
- my_imid = 'contrail:' + type + escape(my_fqn)
-
- return my_imid
-# end get_ifmap_id_from_fq_name
-
-
-def get_type_from_ifmap_id(ifmap_id):
- type = ifmap_id.split(':')[1]
- return type
-# end get_type_from_ifmap_id
-
-
-def get_fq_name_str_from_ifmap_id(ifmap_id):
- return re.sub(r'contrail:.*?:', '', unescape(ifmap_id))
-# end get_fq_name_str_from_ifmap_id
-
-
-def get_fq_name_from_ifmap_id(ifmap_id):
- type = get_type_from_ifmap_id(ifmap_id)
- # route-target has ':' in the name, so handle it as a special case
- if type=='route-target':
- return [':'.join(unescape(ifmap_id).split(':')[2:])]
- return unescape(ifmap_id).split(':')[2:]
-# end get_fq_name_from_ifmap_id
-
-def get_vm_id_from_interface(vmi_obj):
- if vmi_obj.parent_type=='virtual-machine':
- return vmi_obj.parent_uuid
- else:
- vm_refs = vmi_obj.get_virtual_machine_refs()
- return vm_refs[0]['uuid'] if vm_refs else None
-# end get_vmi_id_from_interface
-
-def subscribe_root(ssrc_mapc):
- #self._ident_type_subscribe(_CLOUD_IMID, "ct:member-of")
- ident = str(Identity(name=_ROOT_IMID, type="other",
- other_type="extended"))
- subreq = SubscribeRequest(
- ssrc_mapc.get_session_id(),
- operations=str(SubscribeUpdateOperation("root", ident,
- {"max-depth": "255", })))
-
- result = ssrc_mapc.call('subscribe', subreq)
-# end _subscribe_root
-
-
-def ssrc_initialize(args):
- ssrc_mapc = ifmap_server_connect(args)
- result = ssrc_mapc.call('newSession', NewSessionRequest())
- ssrc_mapc.set_session_id(newSessionResult(result).get_session_id())
- ssrc_mapc.set_publisher_id(newSessionResult(result).get_publisher_id())
- subscribe_root(ssrc_mapc)
- return ssrc_mapc
-# end ssrc_initialize
-
-
-def arc_initialize(args, ssrc_mapc):
- #
- # Poll requests go on ARC channel which don't do newSession but
- # share session-id with ssrc channel. so 2 connections to server but 1
- # session/session-id in ifmap-server (mamma mia!)
- #
- arc_mapc = ifmap_server_connect(args)
- arc_mapc.set_session_id(ssrc_mapc.get_session_id())
- arc_mapc.set_publisher_id(ssrc_mapc.get_publisher_id())
-
- return arc_mapc
-# end arc_initialize
-
-
-def ifmap_server_connect(args):
- _CLIENT_NAMESPACES = {
- 'env': _SOAP_XSD,
- 'ifmap': _IFMAP_XSD,
- 'meta': _IFMAP_META_XSD,
- _CT_NS: _CONTRAIL_XSD
- }
-
- ssl_options = None
- if args.use_certs:
- ssl_options = {
- 'keyfile': args.keyfile,
- 'certfile': args.certfile,
- 'ca_certs': args.ca_certs,
- 'cert_reqs': ssl.CERT_REQUIRED,
- 'ciphers': 'ALL'
- }
- return client(("%s" % (args.ifmap_server_ip),
- "%s" % (args.ifmap_server_port)),
- args.ifmap_username, args.ifmap_password,
- _CLIENT_NAMESPACES, ssl_options)
-# end ifmap_server_connect
-
-
-def parse_poll_result(poll_result_str):
- _XPATH_NAMESPACES = {
- 'a': _SOAP_XSD,
- 'b': _IFMAP_XSD,
- 'c': _CONTRAIL_XSD
- }
-
- soap_doc = etree.parse(StringIO.StringIO(poll_result_str))
- #soap_doc.write(sys.stdout, pretty_print=True)
-
- xpath_error = '/a:Envelope/a:Body/b:response/errorResult'
- error_results = soap_doc.xpath(xpath_error,
- namespaces=_XPATH_NAMESPACES)
-
- if error_results:
- if error_results[0].get('errorCode') == 'InvalidSessionID':
- raise exceptions.InvalidSessionID(etree.tostring(error_results[0]))
- raise Exception(etree.tostring(error_results[0]))
-
- xpath_expr = '/a:Envelope/a:Body/b:response/pollResult'
- poll_results = soap_doc.xpath(xpath_expr,
- namespaces=_XPATH_NAMESPACES)
-
- result_list = []
- for result in poll_results:
- children = result.getchildren()
- for child in children:
- result_type = child.tag
- if result_type == 'errorResult':
- raise Exception(etree.tostring(child))
-
- result_items = child.getchildren()
- item_list = parse_result_items(result_items)
- for item in item_list:
- ident1 = item[0]
- ident2 = item[1]
- meta = item[2]
- idents = {}
- ident1_imid = ident1.attrib['name']
- ident1_type = get_type_from_ifmap_id(ident1_imid)
- idents[ident1_type] = get_fq_name_str_from_ifmap_id(
- ident1_imid)
- if ident2 is not None:
- ident2_imid = ident2.attrib['name']
- ident2_type = get_type_from_ifmap_id(ident2_imid)
- if ident1_type == ident2_type:
- idents[ident1_type] = [
- idents[ident1_type],
- get_fq_name_str_from_ifmap_id(ident2_imid)]
- else:
- idents[ident2_type] = get_fq_name_str_from_ifmap_id(
- ident2_imid)
- result_list.append((result_type, idents, meta))
- return result_list
-# end parse_poll_result
-
-def parse_search_result(search_result_str):
- _XPATH_NAMESPACES = {
- 'a': _SOAP_XSD,
- 'b': _IFMAP_XSD,
- 'c': _CONTRAIL_XSD
- }
-
- soap_doc = etree.parse(StringIO.StringIO(search_result_str))
- #soap_doc.write(sys.stdout, pretty_print=True)
-
- xpath_error = '/a:Envelope/a:Body/b:response/errorResult'
- error_results = soap_doc.xpath(xpath_error,
- namespaces=_XPATH_NAMESPACES)
-
- if error_results:
- if error_results[0].get('errorCode') == 'InvalidSessionID':
- raise exceptions.InvalidSessionID(etree.tostring(error_results[0]))
- raise Exception(etree.tostring(error_results[0]))
-
- xpath_expr = '/a:Envelope/a:Body/b:response/searchResult'
- search_results = soap_doc.xpath(xpath_expr,
- namespaces=_XPATH_NAMESPACES)
-
- result_list = []
- for result in search_results:
- result_items = result.getchildren()
- item_list = parse_result_items(result_items)
- for item in item_list:
- ident1 = item[0]
- ident2 = item[1]
- meta = item[2]
- idents = {}
- ident1_imid = ident1.attrib['name']
- ident1_type = get_type_from_ifmap_id(ident1_imid)
- idents[ident1_type] = get_fq_name_str_from_ifmap_id(
- ident1_imid)
- if ident2 is not None:
- ident2_imid = ident2.attrib['name']
- ident2_type = get_type_from_ifmap_id(ident2_imid)
- if ident1_type == ident2_type:
- idents[ident1_type] = [
- idents[ident1_type],
- get_fq_name_str_from_ifmap_id(ident2_imid)]
- else:
- idents[ident2_type] = get_fq_name_str_from_ifmap_id(
- ident2_imid)
- result_list.append((idents, meta))
- return result_list
-# end parse_search_result
-
-def ifmap_read(mapclient, ifmap_id, srch_meta, result_meta, field_names=None):
- start_id = str(
- Identity(name=ifmap_id, type='other', other_type='extended'))
-
- def _search(start_id, match_meta=None, result_meta=None,
- max_depth=1):
- # set ifmap search parmeters
- srch_params = {}
- srch_params['max-depth'] = str(max_depth)
- srch_params['max-size'] = '50000000'
-
- if match_meta is not None:
- srch_params['match-links'] = match_meta
-
- if result_meta is not None:
- # all => don't set result-filter, so server returns all id + meta
- if result_meta == "all":
- pass
- else:
- srch_params['result-filter'] = result_meta
- else:
- # default to return match_meta metadata types only
- srch_params['result-filter'] = match_meta
-
- srch_req = SearchRequest(mapclient.get_session_id(), start_id,
- search_parameters=srch_params
- )
- result = mapclient.call('search', srch_req)
-
- return result
- # end _search
-
- return _search(start_id, srch_meta, result_meta, max_depth=10)
-# end ifmap_read
-
-def ifmap_read_all(mapclient):
- srch_meta = None
- result_meta = 'all'
- return ifmap_read(mapclient, 'contrail:config-root:root',
- srch_meta, result_meta)
-# end ifmap_read_all
-
-def escape(string):
- return s_esc(string, entities={'"':'&quot;', "'": "&apos;"})
-# end escape
-
-def unescape(string):
- return s_unesc(string, entities={'&quot;':'"', "&apos;": "'"})
-# end unescape
diff --git a/Testcases/cfgm_common/imid.pyc b/Testcases/cfgm_common/imid.pyc
deleted file mode 100644
index 8975407..0000000
--- a/Testcases/cfgm_common/imid.pyc
+++ /dev/null
Binary files differ
diff --git a/Testcases/cfgm_common/importutils.py b/Testcases/cfgm_common/importutils.py
deleted file mode 100644
index 0ae7ffe..0000000
--- a/Testcases/cfgm_common/importutils.py
+++ /dev/null
@@ -1,66 +0,0 @@
-# Copyright 2011 OpenStack Foundation.
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-"""
-Import related utilities and helper functions.
-"""
-
-import sys
-import traceback
-
-
-def import_class(import_str):
- """Returns a class from a string including module and class."""
- mod_str, _sep, class_str = import_str.rpartition('.')
- __import__(mod_str)
- try:
- return getattr(sys.modules[mod_str], class_str)
- except AttributeError:
- raise ImportError('Class %s cannot be found (%s)' %
- (class_str,
- traceback.format_exception(*sys.exc_info())))
-
-
-def import_object(import_str, *args, **kwargs):
- """Import a class and return an instance of it."""
- return import_class(import_str)(*args, **kwargs)
-
-
-def import_object_ns(name_space, import_str, *args, **kwargs):
- """Tries to import object from default namespace.
-
- Imports a class and return an instance of it, first by trying
- to find the class in a default namespace, then failing back to
- a full path if not found in the default namespace.
- """
- import_value = "%s.%s" % (name_space, import_str)
- try:
- return import_class(import_value)(*args, **kwargs)
- except ImportError:
- return import_class(import_str)(*args, **kwargs)
-
-
-def import_module(import_str):
- """Import a module."""
- __import__(import_str)
- return sys.modules[import_str]
-
-
-def try_import(import_str, default=None):
- """Try to import a module and if it fails return default."""
- try:
- return import_module(import_str)
- except ImportError:
- return default
diff --git a/Testcases/cfgm_common/importutils.pyc b/Testcases/cfgm_common/importutils.pyc
deleted file mode 100644
index bbbcb95..0000000
--- a/Testcases/cfgm_common/importutils.pyc
+++ /dev/null
Binary files differ
diff --git a/Testcases/cfgm_common/rest.py b/Testcases/cfgm_common/rest.py
deleted file mode 100644
index 7287f8d..0000000
--- a/Testcases/cfgm_common/rest.py
+++ /dev/null
@@ -1,41 +0,0 @@
-#
-# Copyright (c) 2013 Juniper Networks, Inc. All rights reserved.
-#
-OP_POST = 1
-OP_GET = 2
-OP_PUT = 3
-OP_DELETE = 4
-
-
-def hdr_client_tenant():
- return 'X-Tenant-Name'
-# end hdr_tenant_client
-
-# TODO transform from client value
-
-
-def hdr_server_tenant():
- return 'HTTP_X_TENANT_NAME'
-# end hdr_tenant_server
-
-
-class LinkObject(object):
-
- def __init__(self, rel, base_url, uri, name):
- self.rel = rel
- self.base_url = base_url
- self.uri = uri
- self.name = name
- # end __init__
-
- def to_dict(self, with_url=None):
- if not with_url:
- url = self.base_url
- else:
- url = with_url
- return {'rel': self.rel,
- 'href': url + self.uri,
- 'name': self.name}
- # end to_dict
-
-# end class LinkObject
diff --git a/Testcases/cfgm_common/rest.pyc b/Testcases/cfgm_common/rest.pyc
deleted file mode 100644
index 33218d3..0000000
--- a/Testcases/cfgm_common/rest.pyc
+++ /dev/null
Binary files differ
diff --git a/Testcases/cfgm_common/svc_info.py b/Testcases/cfgm_common/svc_info.py
deleted file mode 100644
index 9f9eba9..0000000
--- a/Testcases/cfgm_common/svc_info.py
+++ /dev/null
@@ -1,99 +0,0 @@
-#
-# Copyright (c) 2013 Juniper Networks, Inc. All rights reserved.
-#
-
-_MGMT_STR = "management"
-_LEFT_STR = "left"
-_RIGHT_STR = "right"
-
-_SVC_VN_MGMT = "svc-vn-mgmt"
-_SVC_VN_LEFT = "svc-vn-left"
-_SVC_VN_RIGHT = "svc-vn-right"
-_VN_MGMT_SUBNET_CIDR = '10.250.1.0/24'
-_VN_LEFT_SUBNET_CIDR = '10.250.2.0/24'
-_VN_RIGHT_SUBNET_CIDR = '10.250.3.0/24'
-
-_VN_SNAT_PREFIX_NAME = 'snat-si-left'
-_VN_SNAT_SUBNET_CIDR = '100.64.0.0/29'
-
-_CHECK_SVC_VM_HEALTH_INTERVAL = 30
-
-_VM_INSTANCE_TYPE = 'virtual-machine'
-_NETNS_INSTANCE_TYPE = 'network-namespace'
-
-_SNAT_SVC_TYPE = 'source-nat'
-_LB_SVC_TYPE = 'loadbalancer'
-
-_ACTIVE_LOCAL_PREFERENCE = 200
-_STANDBY_LOCAL_PREFERENCE = 100
-
-# Version from the vrouter agent can manage service instances
-_VROUTER_NETNS_SUPPORTED_VERSION = '1.10'
-
-def get_management_if_str():
- return _MGMT_STR
-
-def get_left_if_str():
- return _LEFT_STR
-
-def get_right_if_str():
- return _RIGHT_STR
-
-def get_if_str_list():
- if_str_list = []
- if_str_list.append(get_management_if_str())
- if_str_list.append(get_left_if_str())
- if_str_list.append(get_right_if_str())
- return if_str_list
-
-def get_management_vn_name():
- return _SVC_VN_MGMT
-
-def get_left_vn_name():
- return _SVC_VN_LEFT
-
-def get_right_vn_name():
- return _SVC_VN_RIGHT
-
-def get_shared_vn_list():
- shared_vn_list = []
- shared_vn_list.append(get_management_vn_name())
- shared_vn_list.append(get_left_vn_name())
- shared_vn_list.append(get_right_vn_name())
- return shared_vn_list
-
-def get_management_vn_subnet():
- return _VN_MGMT_SUBNET_CIDR
-
-def get_left_vn_subnet():
- return _VN_LEFT_SUBNET_CIDR
-
-def get_right_vn_subnet():
- return _VN_RIGHT_SUBNET_CIDR
-
-def get_snat_left_vn_prefix():
- return _VN_SNAT_PREFIX_NAME
-
-def get_snat_left_subnet():
- return _VN_SNAT_SUBNET_CIDR
-
-def get_vm_instance_type():
- return _VM_INSTANCE_TYPE
-
-def get_netns_instance_type():
- return _NETNS_INSTANCE_TYPE
-
-def get_snat_service_type():
- return _SNAT_SVC_TYPE
-
-def get_lb_service_type():
- return _LB_SVC_TYPE
-
-def get_vm_health_interval():
- return _CHECK_SVC_VM_HEALTH_INTERVAL
-
-def get_active_preference():
- return _ACTIVE_LOCAL_PREFERENCE
-
-def get_standby_preference():
- return _STANDBY_LOCAL_PREFERENCE
diff --git a/Testcases/cfgm_common/svc_info.pyc b/Testcases/cfgm_common/svc_info.pyc
deleted file mode 100644
index e298b95..0000000
--- a/Testcases/cfgm_common/svc_info.pyc
+++ /dev/null
Binary files differ
diff --git a/Testcases/cfgm_common/utils.py b/Testcases/cfgm_common/utils.py
deleted file mode 100644
index a023912..0000000
--- a/Testcases/cfgm_common/utils.py
+++ /dev/null
@@ -1,110 +0,0 @@
-#!/usr/bin/python
-# -*- coding: utf-8 -*-
-
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
-# Copyright (c) 2015 Juniper Networks
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-#
-# @author: Numan Siddique, eNovance.
-
-
-import urllib
-from collections import OrderedDict
-import sys
-import cgitb
-import cStringIO
-
-def detailed_traceback():
- buf = cStringIO.StringIO()
- cgitb.Hook(format="text", file=buf).handle(sys.exc_info())
- tb_txt = buf.getvalue()
- buf.close()
- return tb_txt
-# end detailed_traceback
-
-def encode_string(enc_str, encoding='utf-8'):
- """Encode the string using urllib.quote_plus
-
- Eg. @input:
- enc_str = 'neté
- type - 'unicode' or 'str'
- @retval
- enc_str = 'net%C3%A9%C3%B9'
- type - str
- """
- try:
- enc_str.encode()
- except (UnicodeDecodeError, UnicodeEncodeError):
- if type(enc_str) is unicode:
- enc_str = enc_str.encode(encoding)
- enc_str = urllib.quote_plus(enc_str)
- except Exception:
- pass
- return enc_str
-
-
-def decode_string(dec_str, encoding='utf-8'):
- """Decode the string previously encoded using urllib.quote_plus.
-
- Eg. If dec_str = 'net%C3%A9%C3%B9'
- type - 'unicode' or 'str'
- @retval
- ret_dec_str = 'neté
- type - unicode
- """
- ret_dec_str = dec_str
- try:
- if type(ret_dec_str) is unicode:
- ret_dec_str = str(ret_dec_str)
- ret_dec_str = urllib.unquote_plus(ret_dec_str)
- return ret_dec_str.decode(encoding)
- except Exception:
- return dec_str
-
-
-class CacheContainer(object):
- def __init__(self, size):
- self.container_size = size
- self.dictionary = OrderedDict()
-
- def __getitem__(self, key, default=None):
- value = self.dictionary[key]
- # item accessed - put it in the front
- del self.dictionary[key]
- self.dictionary[key] = value
-
- return value
-
- def __setitem__(self, key, value):
- self.dictionary[key] = value
- if len(self.dictionary.keys()) > self.container_size:
- # container is full, loose the least used item
- self.dictionary.popitem(last=False)
-
- def __contains__(self, key):
- return key in self.dictionary
-
- def __repr__(self):
- return str(self.dictionary)
-
-
-def CamelCase(input):
- words = input.replace('_', '-').split('-')
- name = ''
- for w in words:
- name += w.capitalize()
- return name
-#end CamelCase
diff --git a/Testcases/cfgm_common/utils.pyc b/Testcases/cfgm_common/utils.pyc
deleted file mode 100644
index fdc525e..0000000
--- a/Testcases/cfgm_common/utils.pyc
+++ /dev/null
Binary files differ
diff --git a/Testcases/cfgm_common/uve/__init__.py b/Testcases/cfgm_common/uve/__init__.py
deleted file mode 100644
index e69de29..0000000
--- a/Testcases/cfgm_common/uve/__init__.py
+++ /dev/null
diff --git a/Testcases/cfgm_common/uve/__init__.pyc b/Testcases/cfgm_common/uve/__init__.pyc
deleted file mode 100644
index 1c66ce2..0000000
--- a/Testcases/cfgm_common/uve/__init__.pyc
+++ /dev/null
Binary files differ
diff --git a/Testcases/cfgm_common/uve/acl/__init__.py b/Testcases/cfgm_common/uve/acl/__init__.py
deleted file mode 100644
index adefd8e..0000000
--- a/Testcases/cfgm_common/uve/acl/__init__.py
+++ /dev/null
@@ -1 +0,0 @@
-__all__ = ['ttypes', 'constants']
diff --git a/Testcases/cfgm_common/uve/acl/__init__.pyc b/Testcases/cfgm_common/uve/acl/__init__.pyc
deleted file mode 100644
index dedeeb1..0000000
--- a/Testcases/cfgm_common/uve/acl/__init__.pyc
+++ /dev/null
Binary files differ
diff --git a/Testcases/cfgm_common/uve/acl/acl.html b/Testcases/cfgm_common/uve/acl/acl.html
deleted file mode 100644
index b942438..0000000
--- a/Testcases/cfgm_common/uve/acl/acl.html
+++ /dev/null
@@ -1,14 +0,0 @@
-<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Strict//EN"
- "http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd">
-<html xmlns="http://www.w3.org/1999/xhtml">
-<head>
-<meta http-equiv="Content-Type" content="text/html;charset=utf-8" />
-<link href="style.css" rel="stylesheet" type="text/css"/>
-<title>Module: acl</title></head><body>
-<h1>Module: acl</h1>
-<table><tr><th>Module</th><th>Sandeshs</th></tr>
-<tr>
-<td>acl</td><td></td></tr>
-</table>
-<hr/><h2 id="Sandeshs"></h2>
-</body></html>
diff --git a/Testcases/cfgm_common/uve/acl/acl.xml b/Testcases/cfgm_common/uve/acl/acl.xml
deleted file mode 100644
index 8fbac67..0000000
--- a/Testcases/cfgm_common/uve/acl/acl.xml
+++ /dev/null
@@ -1,3 +0,0 @@
-<?xml-stylesheet type="text/xsl" href="/universal_parse.xsl"?>
-<acl type="rlist">
-</acl>
diff --git a/Testcases/cfgm_common/uve/acl/constants.py b/Testcases/cfgm_common/uve/acl/constants.py
deleted file mode 100644
index aadd78e..0000000
--- a/Testcases/cfgm_common/uve/acl/constants.py
+++ /dev/null
@@ -1,12 +0,0 @@
-#
-# Autogenerated by Sandesh Compiler (1.0)
-#
-# DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
-#
-# options string: py:new_style
-# Copyright (c) 2013 Juniper Networks, Inc. All rights reserved.
-#
-
-from pysandesh.Thrift import TType, TMessageType, TException
-from ttypes import *
-
diff --git a/Testcases/cfgm_common/uve/acl/constants.pyc b/Testcases/cfgm_common/uve/acl/constants.pyc
deleted file mode 100644
index 4ee591b..0000000
--- a/Testcases/cfgm_common/uve/acl/constants.pyc
+++ /dev/null
Binary files differ
diff --git a/Testcases/cfgm_common/uve/acl/http_request.py b/Testcases/cfgm_common/uve/acl/http_request.py
deleted file mode 100644
index 8baea4f..0000000
--- a/Testcases/cfgm_common/uve/acl/http_request.py
+++ /dev/null
@@ -1,14 +0,0 @@
-#
-# Autogenerated by Sandesh Compiler (1.0)
-#
-# DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
-#
-# options string: py:new_style
-# Copyright (c) 2013 Juniper Networks, Inc. All rights reserved.
-#
-
-import ttypes
-
-_HTTP_REQUEST_LIST = [
-]
-
diff --git a/Testcases/cfgm_common/uve/acl/http_request.pyc b/Testcases/cfgm_common/uve/acl/http_request.pyc
deleted file mode 100644
index 73a314c..0000000
--- a/Testcases/cfgm_common/uve/acl/http_request.pyc
+++ /dev/null
Binary files differ
diff --git a/Testcases/cfgm_common/uve/acl/index.html b/Testcases/cfgm_common/uve/acl/index.html
deleted file mode 100644
index 0f31e1b..0000000
--- a/Testcases/cfgm_common/uve/acl/index.html
+++ /dev/null
@@ -1,9 +0,0 @@
-<html><head>
-<link href="style.css" rel="stylesheet" type="text/css"/>
-<title>All Thrift declarations</title></head><body>
-<h1>All Thrift declarations</h1>
-<table><tr><th>Module</th><th>Services</th><th>Sandeshs</th><th>Data types</th><th>Constants</th></tr>
-<tr>
-<td>acl</td><td></td></tr>
-</table>
-</body></html>
diff --git a/Testcases/cfgm_common/uve/acl/request_skeleton.py b/Testcases/cfgm_common/uve/acl/request_skeleton.py
deleted file mode 100644
index 99c1196..0000000
--- a/Testcases/cfgm_common/uve/acl/request_skeleton.py
+++ /dev/null
@@ -1,13 +0,0 @@
-#
-# Autogenerated by Sandesh Compiler (1.0)
-#
-# DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
-#
-# options string: py:new_style
-# Copyright (c) 2013 Juniper Networks, Inc. All rights reserved.
-#
-
-
-# This autogenerated skeleton file illustrates the implementation of
-# derived class to handle the sandesh request.
-
diff --git a/Testcases/cfgm_common/uve/acl/request_skeleton.pyc b/Testcases/cfgm_common/uve/acl/request_skeleton.pyc
deleted file mode 100644
index 7fbba89..0000000
--- a/Testcases/cfgm_common/uve/acl/request_skeleton.pyc
+++ /dev/null
Binary files differ
diff --git a/Testcases/cfgm_common/uve/acl/style.css b/Testcases/cfgm_common/uve/acl/style.css
deleted file mode 100644
index 6dc2f22..0000000
--- a/Testcases/cfgm_common/uve/acl/style.css
+++ /dev/null
@@ -1,10 +0,0 @@
-/* Auto-generated CSS for generated Thrift docs */
-body { font-family: Tahoma, sans-serif; }
-pre { background-color: #dddddd; padding: 6px; }
-h3,h4 { padding-top: 0px; margin-top: 0px; }
-div.definition { border: 1px solid gray; margin: 10px; padding: 10px; }
-div.extends { margin: -0.5em 0 1em 5em }
-table { border: 1px solid grey; border-collapse: collapse; }
-td { border: 1px solid grey; padding: 1px 6px; vertical-align: top; }
-th { border: 1px solid black; background-color: #bbbbbb;
- text-align: left; padding: 1px 6px; }
diff --git a/Testcases/cfgm_common/uve/acl/ttypes.py b/Testcases/cfgm_common/uve/acl/ttypes.py
deleted file mode 100644
index 6575253..0000000
--- a/Testcases/cfgm_common/uve/acl/ttypes.py
+++ /dev/null
@@ -1,837 +0,0 @@
-#
-# Autogenerated by Sandesh Compiler (1.0)
-#
-# DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
-#
-# options string: py:new_style
-# Copyright (c) 2013 Juniper Networks, Inc. All rights reserved.
-#
-
-from pysandesh.Thrift import TType, TMessageType, TException
-
-from pysandesh.transport import TTransport
-from pysandesh.protocol import TBinaryProtocol, TProtocol
-try:
- from pysandesh.protocol import fastbinary
-except:
- fastbinary = None
-
-import cStringIO
-import uuid
-import bottle
-from pysandesh import sandesh_base
-from pysandesh.sandesh_http import SandeshHttp
-from pysandesh.sandesh_uve import SandeshUVETypeMaps
-from pysandesh.util import UTCTimestampUsec, UTCTimestampUsecToString
-from pysandesh.gen_py.sandesh.constants import *
-
-
-
-class AclRuleToVnPolicyRule(object):
- """
- Attributes:
- - acl_major
- - acl_minor
- - policy_or_group_name
- - policy_major
- - policy_minor
- """
-
- thrift_spec = (
- None, # 0
- (1, TType.I32, 'acl_major', None, None, ), # 1
- (2, TType.I32, 'acl_minor', None, None, ), # 2
- (3, TType.STRING, 'policy_or_group_name', None, None, ), # 3
- (4, TType.I32, 'policy_major', None, None, ), # 4
- (5, TType.I32, 'policy_minor', None, None, ), # 5
- )
-
- def __init__(self, acl_major=None, acl_minor=None, policy_or_group_name=None, policy_major=None, policy_minor=None,):
- self.acl_major = acl_major
- self.acl_minor = acl_minor
- self.policy_or_group_name = policy_or_group_name
- self.policy_major = policy_major
- self.policy_minor = policy_minor
-
- def read(self, iprot):
- if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
- fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
- return 0
- read_cnt = 0
- length = iprot.readStructBegin()
- if length < 0: return -1
- read_cnt += length
- while True:
- (length, fname, ftype, fid) = iprot.readFieldBegin()
- if length < 0: return -1
- read_cnt += length
- if ftype == TType.STOP:
- break
- if fid == 1:
- if ftype == TType.I32:
- (length, self.acl_major) = iprot.readI32();
- if length < 0: return -1
- read_cnt += length
- else:
- iprot.skip(ftype)
- elif fid == 2:
- if ftype == TType.I32:
- (length, self.acl_minor) = iprot.readI32();
- if length < 0: return -1
- read_cnt += length
- else:
- iprot.skip(ftype)
- elif fid == 3:
- if ftype == TType.STRING:
- (length, self.policy_or_group_name) = iprot.readString();
- if length < 0: return -1
- read_cnt += length
- else:
- iprot.skip(ftype)
- elif fid == 4:
- if ftype == TType.I32:
- (length, self.policy_major) = iprot.readI32();
- if length < 0: return -1
- read_cnt += length
- else:
- iprot.skip(ftype)
- elif fid == 5:
- if ftype == TType.I32:
- (length, self.policy_minor) = iprot.readI32();
- if length < 0: return -1
- read_cnt += length
- else:
- iprot.skip(ftype)
- else:
- iprot.skip(ftype)
- length = iprot.readFieldEnd()
- if length < 0: return -1
- read_cnt += length
- length = iprot.readStructEnd()
- if length < 0: return -1
- read_cnt += length
- return read_cnt
-
- def write(self, oprot):
- if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
- oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
- return 0
- if oprot.writeStructBegin('AclRuleToVnPolicyRule') < 0: return -1
- if self.acl_major is not None:
- annotations = {}
- if oprot.writeFieldBegin('acl_major', TType.I32, 1, annotations) < 0: return -1
- if oprot.writeI32(self.acl_major) < 0: return -1
- if oprot.writeFieldEnd() < 0: return -1
- if self.acl_minor is not None:
- annotations = {}
- if oprot.writeFieldBegin('acl_minor', TType.I32, 2, annotations) < 0: return -1
- if oprot.writeI32(self.acl_minor) < 0: return -1
- if oprot.writeFieldEnd() < 0: return -1
- if self.policy_or_group_name is not None:
- annotations = {}
- if oprot.writeFieldBegin('policy_or_group_name', TType.STRING, 3, annotations) < 0: return -1
- if oprot.writeString(self.policy_or_group_name) < 0: return -1
- if oprot.writeFieldEnd() < 0: return -1
- if self.policy_major is not None:
- annotations = {}
- if oprot.writeFieldBegin('policy_major', TType.I32, 4, annotations) < 0: return -1
- if oprot.writeI32(self.policy_major) < 0: return -1
- if oprot.writeFieldEnd() < 0: return -1
- if self.policy_minor is not None:
- annotations = {}
- if oprot.writeFieldBegin('policy_minor', TType.I32, 5, annotations) < 0: return -1
- if oprot.writeI32(self.policy_minor) < 0: return -1
- if oprot.writeFieldEnd() < 0: return -1
- if oprot.writeFieldStop() < 0: return -1
- if oprot.writeStructEnd() < 0: return -1
- return 0
-
- def validate(self):
- return
-
-
- def log(self):
- log_str = cStringIO.StringIO()
- if self.acl_major is not None:
- log_str.write('acl_major = ')
- log_str.write(str(self.acl_major))
- log_str.write(' ')
- if self.acl_minor is not None:
- log_str.write('acl_minor = ')
- log_str.write(str(self.acl_minor))
- log_str.write(' ')
- if self.policy_or_group_name is not None:
- log_str.write('policy_or_group_name = ')
- log_str.write(self.policy_or_group_name)
- log_str.write(' ')
- if self.policy_major is not None:
- log_str.write('policy_major = ')
- log_str.write(str(self.policy_major))
- log_str.write(' ')
- if self.policy_minor is not None:
- log_str.write('policy_minor = ')
- log_str.write(str(self.policy_minor))
- log_str.write(' ')
- return log_str.getvalue()
-
- def __repr__(self):
- L = ['%s=%r' % (key, value)
- for key, value in self.__dict__.iteritems()]
- return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
-
- def __eq__(self, other):
- return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
-
- def __ne__(self, other):
- return not (self == other)
-
-class UveAclConfig(object):
- """
- Attributes:
- - virtual_network
- - attached_policies
- - acl_rule_to_policy
- """
-
- thrift_spec = (
- None, # 0
- (1, TType.STRING, 'virtual_network', None, None, ), # 1
- (2, TType.LIST, 'attached_policies', (TType.STRING,None), None, ), # 2
- (3, TType.LIST, 'acl_rule_to_policy', (TType.STRUCT,(AclRuleToVnPolicyRule, AclRuleToVnPolicyRule.thrift_spec)), None, ), # 3
- )
-
- def __init__(self, virtual_network=None, attached_policies=None, acl_rule_to_policy=None,):
- self.virtual_network = virtual_network
- self.attached_policies = attached_policies
- self.acl_rule_to_policy = acl_rule_to_policy
-
- def read(self, iprot):
- if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
- fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
- return 0
- read_cnt = 0
- length = iprot.readStructBegin()
- if length < 0: return -1
- read_cnt += length
- while True:
- (length, fname, ftype, fid) = iprot.readFieldBegin()
- if length < 0: return -1
- read_cnt += length
- if ftype == TType.STOP:
- break
- if fid == 1:
- if ftype == TType.STRING:
- (length, self.virtual_network) = iprot.readString();
- if length < 0: return -1
- read_cnt += length
- else:
- iprot.skip(ftype)
- elif fid == 2:
- if ftype == TType.LIST:
- self.attached_policies = []
- (length, _etype3, _size0) = iprot.readListBegin()
- read_cnt += length
- for _i4 in xrange(_size0):
- read_cnt += iprot.readContainerElementBegin()
- (length, _elem5) = iprot.readString();
- if length < 0: return -1
- read_cnt += length
- self.attached_policies.append(_elem5)
- read_cnt += iprot.readContainerElementEnd()
- read_cnt += iprot.readListEnd()
- else:
- iprot.skip(ftype)
- elif fid == 3:
- if ftype == TType.LIST:
- self.acl_rule_to_policy = []
- (length, _etype9, _size6) = iprot.readListBegin()
- read_cnt += length
- for _i10 in xrange(_size6):
- _elem11 = AclRuleToVnPolicyRule()
- read_cnt += _elem11.read(iprot)
- self.acl_rule_to_policy.append(_elem11)
- read_cnt += iprot.readListEnd()
- else:
- iprot.skip(ftype)
- else:
- iprot.skip(ftype)
- length = iprot.readFieldEnd()
- if length < 0: return -1
- read_cnt += length
- length = iprot.readStructEnd()
- if length < 0: return -1
- read_cnt += length
- return read_cnt
-
- def write(self, oprot):
- if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
- oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
- return 0
- if oprot.writeStructBegin('UveAclConfig') < 0: return -1
- if self.virtual_network is not None:
- annotations = {}
- if oprot.writeFieldBegin('virtual_network', TType.STRING, 1, annotations) < 0: return -1
- if oprot.writeString(self.virtual_network) < 0: return -1
- if oprot.writeFieldEnd() < 0: return -1
- if self.attached_policies is not None:
- annotations = {}
- if oprot.writeFieldBegin('attached_policies', TType.LIST, 2, annotations) < 0: return -1
- if oprot.writeListBegin(TType.STRING, len(self.attached_policies)) < 0: return -1
- for iter12 in self.attached_policies:
- if oprot.writeContainerElementBegin() < 0: return -1
- if oprot.writeString(iter12) < 0: return -1
- if oprot.writeContainerElementEnd() < 0: return -1
- if oprot.writeListEnd() < 0: return -1
- if oprot.writeFieldEnd() < 0: return -1
- if self.acl_rule_to_policy is not None:
- annotations = {}
- if oprot.writeFieldBegin('acl_rule_to_policy', TType.LIST, 3, annotations) < 0: return -1
- if oprot.writeListBegin(TType.STRUCT, len(self.acl_rule_to_policy)) < 0: return -1
- for iter13 in self.acl_rule_to_policy:
- if iter13.write(oprot) < 0: return -1
- if oprot.writeListEnd() < 0: return -1
- if oprot.writeFieldEnd() < 0: return -1
- if oprot.writeFieldStop() < 0: return -1
- if oprot.writeStructEnd() < 0: return -1
- return 0
-
- def validate(self):
- return
-
-
- def log(self):
- log_str = cStringIO.StringIO()
- if self.virtual_network is not None:
- log_str.write('virtual_network = ')
- log_str.write(self.virtual_network)
- log_str.write(' ')
- if self.attached_policies is not None:
- log_str.write('attached_policies = ')
- log_str.write('[ ')
- for iter14 in self.attached_policies:
- log_str.write(iter14)
- log_str.write(', ')
- log_str.write(' ]')
- log_str.write(' ')
- if self.acl_rule_to_policy is not None:
- log_str.write('acl_rule_to_policy = ')
- log_str.write('[ ')
- for iter15 in self.acl_rule_to_policy:
- log_str.write('<< ')
- log_str.write(iter15.log())
- log_str.write('>>')
- log_str.write(', ')
- log_str.write(' ]')
- log_str.write(' ')
- return log_str.getvalue()
-
- def __repr__(self):
- L = ['%s=%r' % (key, value)
- for key, value in self.__dict__.iteritems()]
- return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
-
- def __eq__(self, other):
- return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
-
- def __ne__(self, other):
- return not (self == other)
-
-class UveAclVirtualNetworkConfig(object):
- """
- Attributes:
- - name
- - deleted
- - config
- """
-
- thrift_spec = (
- None, # 0
- (1, TType.STRING, 'name', None, None, ), # 1
- (2, TType.BOOL, 'deleted', None, None, ), # 2
- (3, TType.STRUCT, 'config', (UveAclConfig, UveAclConfig.thrift_spec), None, ), # 3
- )
-
- def __init__(self, name=None, deleted=None, config=None,):
- self.name = name
- self.deleted = deleted
- self.config = config
- self._table = 'ObjectVNTable'
-
- def read(self, iprot):
- if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
- fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
- return 0
- read_cnt = 0
- length = iprot.readStructBegin()
- if length < 0: return -1
- read_cnt += length
- while True:
- (length, fname, ftype, fid) = iprot.readFieldBegin()
- if length < 0: return -1
- read_cnt += length
- if ftype == TType.STOP:
- break
- if fid == 1:
- if ftype == TType.STRING:
- (length, self.name) = iprot.readString();
- if length < 0: return -1
- read_cnt += length
- else:
- iprot.skip(ftype)
- elif fid == 2:
- if ftype == TType.BOOL:
- (length, self.deleted) = iprot.readBool();
- if length < 0: return -1
- read_cnt += length
- else:
- iprot.skip(ftype)
- elif fid == 3:
- if ftype == TType.STRUCT:
- self.config = UveAclConfig()
- read_cnt += self.config.read(iprot)
- else:
- iprot.skip(ftype)
- else:
- iprot.skip(ftype)
- length = iprot.readFieldEnd()
- if length < 0: return -1
- read_cnt += length
- length = iprot.readStructEnd()
- if length < 0: return -1
- read_cnt += length
- return read_cnt
-
- def write(self, oprot):
- if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
- oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
- return 0
- if oprot.writeStructBegin('UveAclVirtualNetworkConfig') < 0: return -1
- if self.name is not None:
- annotations = {}
- if self._table is None or self._table is '': return -1
- annotations['key'] = self._table
- if oprot.writeFieldBegin('name', TType.STRING, 1, annotations) < 0: return -1
- if oprot.writeString(self.name) < 0: return -1
- if oprot.writeFieldEnd() < 0: return -1
- if self.deleted is not None:
- annotations = {}
- if oprot.writeFieldBegin('deleted', TType.BOOL, 2, annotations) < 0: return -1
- if oprot.writeBool(self.deleted) < 0: return -1
- if oprot.writeFieldEnd() < 0: return -1
- if self.config is not None:
- annotations = {}
- if oprot.writeFieldBegin('config', TType.STRUCT, 3, annotations) < 0: return -1
- if self.config.write(oprot) < 0: return -1
- if oprot.writeFieldEnd() < 0: return -1
- if oprot.writeFieldStop() < 0: return -1
- if oprot.writeStructEnd() < 0: return -1
- return 0
-
- def validate(self):
- return
-
-
- def log(self):
- log_str = cStringIO.StringIO()
- if self.name is not None:
- log_str.write('name = ')
- log_str.write(self.name)
- log_str.write(' ')
- if self.deleted is not None:
- log_str.write('deleted = ')
- if self.deleted:
- log_str.write('True')
- else:
- log_str.write('False')
- log_str.write(' ')
- if self.config is not None:
- log_str.write('config = ')
- log_str.write('<< ')
- log_str.write(self.config.log())
- log_str.write('>>')
- log_str.write(' ')
- return log_str.getvalue()
-
- def __repr__(self):
- L = ['%s=%r' % (key, value)
- for key, value in self.__dict__.iteritems()]
- return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
-
- def __eq__(self, other):
- return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
-
- def __ne__(self, other):
- return not (self == other)
-
-class UveAclVirtualMachineConfig(object):
- """
- Attributes:
- - name
- - deleted
- - config
- """
-
- thrift_spec = (
- None, # 0
- (1, TType.STRING, 'name', None, None, ), # 1
- (2, TType.BOOL, 'deleted', None, None, ), # 2
- (3, TType.STRUCT, 'config', (UveAclConfig, UveAclConfig.thrift_spec), None, ), # 3
- )
-
- def __init__(self, name=None, deleted=None, config=None,):
- self.name = name
- self.deleted = deleted
- self.config = config
- self._table = 'ObjectVMTable'
-
- def read(self, iprot):
- if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
- fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
- return 0
- read_cnt = 0
- length = iprot.readStructBegin()
- if length < 0: return -1
- read_cnt += length
- while True:
- (length, fname, ftype, fid) = iprot.readFieldBegin()
- if length < 0: return -1
- read_cnt += length
- if ftype == TType.STOP:
- break
- if fid == 1:
- if ftype == TType.STRING:
- (length, self.name) = iprot.readString();
- if length < 0: return -1
- read_cnt += length
- else:
- iprot.skip(ftype)
- elif fid == 2:
- if ftype == TType.BOOL:
- (length, self.deleted) = iprot.readBool();
- if length < 0: return -1
- read_cnt += length
- else:
- iprot.skip(ftype)
- elif fid == 3:
- if ftype == TType.STRUCT:
- self.config = UveAclConfig()
- read_cnt += self.config.read(iprot)
- else:
- iprot.skip(ftype)
- else:
- iprot.skip(ftype)
- length = iprot.readFieldEnd()
- if length < 0: return -1
- read_cnt += length
- length = iprot.readStructEnd()
- if length < 0: return -1
- read_cnt += length
- return read_cnt
-
- def write(self, oprot):
- if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
- oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
- return 0
- if oprot.writeStructBegin('UveAclVirtualMachineConfig') < 0: return -1
- if self.name is not None:
- annotations = {}
- if self._table is None or self._table is '': return -1
- annotations['key'] = self._table
- if oprot.writeFieldBegin('name', TType.STRING, 1, annotations) < 0: return -1
- if oprot.writeString(self.name) < 0: return -1
- if oprot.writeFieldEnd() < 0: return -1
- if self.deleted is not None:
- annotations = {}
- if oprot.writeFieldBegin('deleted', TType.BOOL, 2, annotations) < 0: return -1
- if oprot.writeBool(self.deleted) < 0: return -1
- if oprot.writeFieldEnd() < 0: return -1
- if self.config is not None:
- annotations = {}
- if oprot.writeFieldBegin('config', TType.STRUCT, 3, annotations) < 0: return -1
- if self.config.write(oprot) < 0: return -1
- if oprot.writeFieldEnd() < 0: return -1
- if oprot.writeFieldStop() < 0: return -1
- if oprot.writeStructEnd() < 0: return -1
- return 0
-
- def validate(self):
- return
-
-
- def log(self):
- log_str = cStringIO.StringIO()
- if self.name is not None:
- log_str.write('name = ')
- log_str.write(self.name)
- log_str.write(' ')
- if self.deleted is not None:
- log_str.write('deleted = ')
- if self.deleted:
- log_str.write('True')
- else:
- log_str.write('False')
- log_str.write(' ')
- if self.config is not None:
- log_str.write('config = ')
- log_str.write('<< ')
- log_str.write(self.config.log())
- log_str.write('>>')
- log_str.write(' ')
- return log_str.getvalue()
-
- def __repr__(self):
- L = ['%s=%r' % (key, value)
- for key, value in self.__dict__.iteritems()]
- return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
-
- def __eq__(self, other):
- return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
-
- def __ne__(self, other):
- return not (self == other)
-
-class UveAclVirtualNetworkConfigTrace(sandesh_base.SandeshUVE):
-
- thrift_spec = (
- None, # 0
- (1, TType.STRUCT, 'data', (UveAclVirtualNetworkConfig, UveAclVirtualNetworkConfig.thrift_spec), None, ), # 1
- )
-
- def __init__(self, data=None, table=None, sandesh=sandesh_base.sandesh_global):
- sandesh_base.SandeshUVE.__init__(self)
- self.data = data
- self._scope = sandesh.scope()
- self._module = sandesh.module()
- self._source = sandesh.source_id()
- self._node_type = sandesh.node_type()
- self._instance_id = sandesh.instance_id()
- self._seqnum = 0
- self._timestamp = UTCTimestampUsec()
- self._versionsig = 1124326490
- self._hints = 0 | SANDESH_KEY_HINT
- if table is not None:
- self.data._table = table
-
- def update_uve(self, tdata):
- if self.data.name is not None:
- tdata.name = self.data.name
- if self.data.deleted is not None:
- tdata.deleted = self.data.deleted
- if self.data.config is not None:
- tdata.config = self.data.config
- return tdata
-
- def log(self, trace=False):
- log_str = cStringIO.StringIO()
- if trace:
- log_str.write(str(self._timestamp))
- log_str.write(' ')
- log_str.write('UveAclVirtualNetworkConfigTrace: ')
- if self.data is not None:
- log_str.write('data = ')
- log_str.write('<< ')
- log_str.write(self.data.log())
- log_str.write('>>')
- log_str.write(' ')
- return log_str.getvalue()
-
- def read(self, iprot):
- if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
- fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
- return -1
- read_cnt = 0
- (length, sandesh_name) = iprot.readSandeshBegin()
- if length < 0: return -1
- read_cnt += length
- while True:
- (length, fname, ftype, fid) = iprot.readFieldBegin()
- if length < 0: return -1
- read_cnt += length
- if ftype == TType.STOP:
- break
- if fid == 1:
- if ftype == TType.STRUCT:
- self.data = UveAclVirtualNetworkConfig()
- read_cnt += self.data.read(iprot)
- else:
- iprot.skip(ftype)
- else:
- iprot.skip(ftype)
- length = iprot.readFieldEnd()
- if length < 0: return -1
- read_cnt += length
- length = iprot.readSandeshEnd()
- if length < 0: return -1
- read_cnt += length
- return read_cnt
-
- def write(self, oprot):
- if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
- oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
- return 0
- if oprot.writeSandeshBegin('UveAclVirtualNetworkConfigTrace') < 0: return -1
- if self.data is not None:
- annotations = {}
- if oprot.writeFieldBegin('data', TType.STRUCT, 1, annotations) < 0: return -1
- if self.data.write(oprot) < 0: return -1
- if oprot.writeFieldEnd() < 0: return -1
- if oprot.writeFieldStop() < 0: return -1
- if oprot.writeSandeshEnd() < 0: return -1
- return 0
-
- def validate(self):
- return
-
-
- def compare(self, other):
- if not isinstance(other, self.__class__):
- return False
- if self.data != other.data:
- return False
- return True
-
- def __repr__(self):
- L = ['%s=%r' % (key, value)
- for key, value in self.__dict__.iteritems()]
- return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
-
- def __eq__(self, other):
- return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
-
- def __ne__(self, other):
- return not (self == other)
-
-class UveAclVirtualMachineConfigTrace(sandesh_base.SandeshUVE):
-
- thrift_spec = (
- None, # 0
- (1, TType.STRUCT, 'data', (UveAclVirtualMachineConfig, UveAclVirtualMachineConfig.thrift_spec), None, ), # 1
- )
-
- def __init__(self, data=None, table=None, sandesh=sandesh_base.sandesh_global):
- sandesh_base.SandeshUVE.__init__(self)
- self.data = data
- self._scope = sandesh.scope()
- self._module = sandesh.module()
- self._source = sandesh.source_id()
- self._node_type = sandesh.node_type()
- self._instance_id = sandesh.instance_id()
- self._seqnum = 0
- self._timestamp = UTCTimestampUsec()
- self._versionsig = 1124326490
- self._hints = 0 | SANDESH_KEY_HINT
- if table is not None:
- self.data._table = table
-
- def update_uve(self, tdata):
- if self.data.name is not None:
- tdata.name = self.data.name
- if self.data.deleted is not None:
- tdata.deleted = self.data.deleted
- if self.data.config is not None:
- tdata.config = self.data.config
- return tdata
-
- def log(self, trace=False):
- log_str = cStringIO.StringIO()
- if trace:
- log_str.write(str(self._timestamp))
- log_str.write(' ')
- log_str.write('UveAclVirtualMachineConfigTrace: ')
- if self.data is not None:
- log_str.write('data = ')
- log_str.write('<< ')
- log_str.write(self.data.log())
- log_str.write('>>')
- log_str.write(' ')
- return log_str.getvalue()
-
- def read(self, iprot):
- if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
- fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
- return -1
- read_cnt = 0
- (length, sandesh_name) = iprot.readSandeshBegin()
- if length < 0: return -1
- read_cnt += length
- while True:
- (length, fname, ftype, fid) = iprot.readFieldBegin()
- if length < 0: return -1
- read_cnt += length
- if ftype == TType.STOP:
- break
- if fid == 1:
- if ftype == TType.STRUCT:
- self.data = UveAclVirtualMachineConfig()
- read_cnt += self.data.read(iprot)
- else:
- iprot.skip(ftype)
- else:
- iprot.skip(ftype)
- length = iprot.readFieldEnd()
- if length < 0: return -1
- read_cnt += length
- length = iprot.readSandeshEnd()
- if length < 0: return -1
- read_cnt += length
- return read_cnt
-
- def write(self, oprot):
- if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
- oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
- return 0
- if oprot.writeSandeshBegin('UveAclVirtualMachineConfigTrace') < 0: return -1
- if self.data is not None:
- annotations = {}
- if oprot.writeFieldBegin('data', TType.STRUCT, 1, annotations) < 0: return -1
- if self.data.write(oprot) < 0: return -1
- if oprot.writeFieldEnd() < 0: return -1
- if oprot.writeFieldStop() < 0: return -1
- if oprot.writeSandeshEnd() < 0: return -1
- return 0
-
- def validate(self):
- return
-
-
- def compare(self, other):
- if not isinstance(other, self.__class__):
- return False
- if self.data != other.data:
- return False
- return True
-
- def __repr__(self):
- L = ['%s=%r' % (key, value)
- for key, value in self.__dict__.iteritems()]
- return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
-
- def __eq__(self, other):
- return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
-
- def __ne__(self, other):
- return not (self == other)
-
-
-_SANDESH_REQUEST_LIST = [
-]
-
-
-_SANDESH_UVE_LIST = [
-'UveAclVirtualNetworkConfigTrace',
-'UveAclVirtualMachineConfigTrace',
-]
-
-
-_SANDESH_UVE_DATA_LIST = [
-'UveAclVirtualNetworkConfig',
-'UveAclVirtualMachineConfig',
-]
-
-
-_SANDESH_ALARM_LIST = [
-]
-
-
-_SANDESH_ALARM_DATA_LIST = [
-]
diff --git a/Testcases/cfgm_common/uve/acl/ttypes.pyc b/Testcases/cfgm_common/uve/acl/ttypes.pyc
deleted file mode 100644
index eafdd71..0000000
--- a/Testcases/cfgm_common/uve/acl/ttypes.pyc
+++ /dev/null
Binary files differ
diff --git a/Testcases/cfgm_common/uve/cfgm_cpuinfo/__init__.py b/Testcases/cfgm_common/uve/cfgm_cpuinfo/__init__.py
deleted file mode 100644
index e69de29..0000000
--- a/Testcases/cfgm_common/uve/cfgm_cpuinfo/__init__.py
+++ /dev/null
diff --git a/Testcases/cfgm_common/uve/cfgm_cpuinfo/__init__.pyc b/Testcases/cfgm_common/uve/cfgm_cpuinfo/__init__.pyc
deleted file mode 100644
index 8d74650..0000000
--- a/Testcases/cfgm_common/uve/cfgm_cpuinfo/__init__.pyc
+++ /dev/null
Binary files differ
diff --git a/Testcases/cfgm_common/uve/cfgm_cpuinfo/cfgm_cpuinfo.html b/Testcases/cfgm_common/uve/cfgm_cpuinfo/cfgm_cpuinfo.html
deleted file mode 100644
index 166887a..0000000
--- a/Testcases/cfgm_common/uve/cfgm_cpuinfo/cfgm_cpuinfo.html
+++ /dev/null
@@ -1,14 +0,0 @@
-<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Strict//EN"
- "http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd">
-<html xmlns="http://www.w3.org/1999/xhtml">
-<head>
-<meta http-equiv="Content-Type" content="text/html;charset=utf-8" />
-<link href="style.css" rel="stylesheet" type="text/css"/>
-<title>Module: cfgm_cpuinfo</title></head><body>
-<h1>Module: cfgm_cpuinfo</h1>
-<table><tr><th>Module</th><th>Sandeshs</th></tr>
-<tr>
-<td>cfgm_cpuinfo</td><td></td></tr>
-</table>
-<hr/><h2 id="Sandeshs"></h2>
-</body></html>
diff --git a/Testcases/cfgm_common/uve/cfgm_cpuinfo/cfgm_cpuinfo.xml b/Testcases/cfgm_common/uve/cfgm_cpuinfo/cfgm_cpuinfo.xml
deleted file mode 100644
index da831a3..0000000
--- a/Testcases/cfgm_common/uve/cfgm_cpuinfo/cfgm_cpuinfo.xml
+++ /dev/null
@@ -1,3 +0,0 @@
-<?xml-stylesheet type="text/xsl" href="/universal_parse.xsl"?>
-<cfgm_cpuinfo type="rlist">
-</cfgm_cpuinfo>
diff --git a/Testcases/cfgm_common/uve/cfgm_cpuinfo/constants.py b/Testcases/cfgm_common/uve/cfgm_cpuinfo/constants.py
deleted file mode 100644
index aadd78e..0000000
--- a/Testcases/cfgm_common/uve/cfgm_cpuinfo/constants.py
+++ /dev/null
@@ -1,12 +0,0 @@
-#
-# Autogenerated by Sandesh Compiler (1.0)
-#
-# DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
-#
-# options string: py:new_style
-# Copyright (c) 2013 Juniper Networks, Inc. All rights reserved.
-#
-
-from pysandesh.Thrift import TType, TMessageType, TException
-from ttypes import *
-
diff --git a/Testcases/cfgm_common/uve/cfgm_cpuinfo/constants.pyc b/Testcases/cfgm_common/uve/cfgm_cpuinfo/constants.pyc
deleted file mode 100644
index 9d1a5e5..0000000
--- a/Testcases/cfgm_common/uve/cfgm_cpuinfo/constants.pyc
+++ /dev/null
Binary files differ
diff --git a/Testcases/cfgm_common/uve/cfgm_cpuinfo/cpuinfo/__init__.py b/Testcases/cfgm_common/uve/cfgm_cpuinfo/cpuinfo/__init__.py
deleted file mode 100644
index adefd8e..0000000
--- a/Testcases/cfgm_common/uve/cfgm_cpuinfo/cpuinfo/__init__.py
+++ /dev/null
@@ -1 +0,0 @@
-__all__ = ['ttypes', 'constants']
diff --git a/Testcases/cfgm_common/uve/cfgm_cpuinfo/cpuinfo/__init__.pyc b/Testcases/cfgm_common/uve/cfgm_cpuinfo/cpuinfo/__init__.pyc
deleted file mode 100644
index 1446e7d..0000000
--- a/Testcases/cfgm_common/uve/cfgm_cpuinfo/cpuinfo/__init__.pyc
+++ /dev/null
Binary files differ
diff --git a/Testcases/cfgm_common/uve/cfgm_cpuinfo/cpuinfo/constants.py b/Testcases/cfgm_common/uve/cfgm_cpuinfo/cpuinfo/constants.py
deleted file mode 100644
index aadd78e..0000000
--- a/Testcases/cfgm_common/uve/cfgm_cpuinfo/cpuinfo/constants.py
+++ /dev/null
@@ -1,12 +0,0 @@
-#
-# Autogenerated by Sandesh Compiler (1.0)
-#
-# DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
-#
-# options string: py:new_style
-# Copyright (c) 2013 Juniper Networks, Inc. All rights reserved.
-#
-
-from pysandesh.Thrift import TType, TMessageType, TException
-from ttypes import *
-
diff --git a/Testcases/cfgm_common/uve/cfgm_cpuinfo/cpuinfo/constants.pyc b/Testcases/cfgm_common/uve/cfgm_cpuinfo/cpuinfo/constants.pyc
deleted file mode 100644
index 453ec8d..0000000
--- a/Testcases/cfgm_common/uve/cfgm_cpuinfo/cpuinfo/constants.pyc
+++ /dev/null
Binary files differ
diff --git a/Testcases/cfgm_common/uve/cfgm_cpuinfo/cpuinfo/cpuinfo.html b/Testcases/cfgm_common/uve/cfgm_cpuinfo/cpuinfo/cpuinfo.html
deleted file mode 100644
index f2fb672..0000000
--- a/Testcases/cfgm_common/uve/cfgm_cpuinfo/cpuinfo/cpuinfo.html
+++ /dev/null
@@ -1,18 +0,0 @@
-<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Strict//EN"
- "http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd">
-<html xmlns="http://www.w3.org/1999/xhtml">
-<head>
-<meta http-equiv="Content-Type" content="text/html;charset=utf-8" />
-<link href="style.css" rel="stylesheet" type="text/css"/>
-<title>Module: cpuinfo</title></head><body>
-<h1>Module: cpuinfo</h1>
-<table><tr><th>Module</th><th>Sandeshs</th></tr>
-<tr>
-<td>cpuinfo</td><td><a href="cpuinfo.html#Snh_CpuLoadInfoReq">CpuLoadInfoReq</a><br/>
-</td></tr>
-</table>
-<hr/><h2 id="Sandeshs"></h2>
-<div class="definition"><h3 id="Snh_CpuLoadInfoReq">CpuLoadInfoReq</h3>
-<form action="Snh_CpuLoadInfoReq" method="get">
-<table><tr><th>Key</th><th>Field</th><th>Type</th><th>Description</th><th>Requiredness</th><th>Value</th></tr>
-</table><button type="submit">Send</button></form><br/></div></body></html>
diff --git a/Testcases/cfgm_common/uve/cfgm_cpuinfo/cpuinfo/cpuinfo.xml b/Testcases/cfgm_common/uve/cfgm_cpuinfo/cpuinfo/cpuinfo.xml
deleted file mode 100644
index d3a129b..0000000
--- a/Testcases/cfgm_common/uve/cfgm_cpuinfo/cpuinfo/cpuinfo.xml
+++ /dev/null
@@ -1,5 +0,0 @@
-<?xml-stylesheet type="text/xsl" href="/universal_parse.xsl"?>
-<cpuinfo type="rlist">
-<CpuLoadInfoReq type="sandesh">
-</CpuLoadInfoReq>
-</cpuinfo>
diff --git a/Testcases/cfgm_common/uve/cfgm_cpuinfo/cpuinfo/http_request.py b/Testcases/cfgm_common/uve/cfgm_cpuinfo/cpuinfo/http_request.py
deleted file mode 100644
index e48bba8..0000000
--- a/Testcases/cfgm_common/uve/cfgm_cpuinfo/cpuinfo/http_request.py
+++ /dev/null
@@ -1,15 +0,0 @@
-#
-# Autogenerated by Sandesh Compiler (1.0)
-#
-# DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
-#
-# options string: py:new_style
-# Copyright (c) 2013 Juniper Networks, Inc. All rights reserved.
-#
-
-import ttypes
-
-_HTTP_REQUEST_LIST = [
-{ 'uri':'/Snh_CpuLoadInfoReq', 'method':ttypes.CpuLoadInfoReq.handle_http_request },
-]
-
diff --git a/Testcases/cfgm_common/uve/cfgm_cpuinfo/cpuinfo/http_request.pyc b/Testcases/cfgm_common/uve/cfgm_cpuinfo/cpuinfo/http_request.pyc
deleted file mode 100644
index c013398..0000000
--- a/Testcases/cfgm_common/uve/cfgm_cpuinfo/cpuinfo/http_request.pyc
+++ /dev/null
Binary files differ
diff --git a/Testcases/cfgm_common/uve/cfgm_cpuinfo/cpuinfo/index.html b/Testcases/cfgm_common/uve/cfgm_cpuinfo/cpuinfo/index.html
deleted file mode 100644
index a338214..0000000
--- a/Testcases/cfgm_common/uve/cfgm_cpuinfo/cpuinfo/index.html
+++ /dev/null
@@ -1,10 +0,0 @@
-<html><head>
-<link href="style.css" rel="stylesheet" type="text/css"/>
-<title>All Thrift declarations</title></head><body>
-<h1>All Thrift declarations</h1>
-<table><tr><th>Module</th><th>Services</th><th>Sandeshs</th><th>Data types</th><th>Constants</th></tr>
-<tr>
-<td>cpuinfo</td><td><a href="cpuinfo.html#Snh_CpuLoadInfoReq">CpuLoadInfoReq</a><br/>
-</td></tr>
-</table>
-</body></html>
diff --git a/Testcases/cfgm_common/uve/cfgm_cpuinfo/cpuinfo/request_skeleton.py b/Testcases/cfgm_common/uve/cfgm_cpuinfo/cpuinfo/request_skeleton.py
deleted file mode 100644
index 22463de..0000000
--- a/Testcases/cfgm_common/uve/cfgm_cpuinfo/cpuinfo/request_skeleton.py
+++ /dev/null
@@ -1,25 +0,0 @@
-#
-# Autogenerated by Sandesh Compiler (1.0)
-#
-# DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
-#
-# options string: py:new_style
-# Copyright (c) 2013 Juniper Networks, Inc. All rights reserved.
-#
-
-
-# This autogenerated skeleton file illustrates the implementation of
-# derived class to handle the sandesh request.
-
-
-# Create a derived class from "CpuLoadInfoReq" to handle
-# the sandesh request. Add this derived class "CpuLoadInfoReq_derived"
-# in module CpuLoadInfoReq_derived.py and add it in your package
-
-class CpuLoadInfoReq_derived(CpuLoadInfoReq):
-
-
- def handle_request(self):
- # Add your code to handle the "CpuLoadInfoReq" request
- pass
-
diff --git a/Testcases/cfgm_common/uve/cfgm_cpuinfo/cpuinfo/request_skeleton.pyc b/Testcases/cfgm_common/uve/cfgm_cpuinfo/cpuinfo/request_skeleton.pyc
deleted file mode 100644
index d8eb2c1..0000000
--- a/Testcases/cfgm_common/uve/cfgm_cpuinfo/cpuinfo/request_skeleton.pyc
+++ /dev/null
Binary files differ
diff --git a/Testcases/cfgm_common/uve/cfgm_cpuinfo/cpuinfo/style.css b/Testcases/cfgm_common/uve/cfgm_cpuinfo/cpuinfo/style.css
deleted file mode 100644
index 6dc2f22..0000000
--- a/Testcases/cfgm_common/uve/cfgm_cpuinfo/cpuinfo/style.css
+++ /dev/null
@@ -1,10 +0,0 @@
-/* Auto-generated CSS for generated Thrift docs */
-body { font-family: Tahoma, sans-serif; }
-pre { background-color: #dddddd; padding: 6px; }
-h3,h4 { padding-top: 0px; margin-top: 0px; }
-div.definition { border: 1px solid gray; margin: 10px; padding: 10px; }
-div.extends { margin: -0.5em 0 1em 5em }
-table { border: 1px solid grey; border-collapse: collapse; }
-td { border: 1px solid grey; padding: 1px 6px; vertical-align: top; }
-th { border: 1px solid black; background-color: #bbbbbb;
- text-align: left; padding: 1px 6px; }
diff --git a/Testcases/cfgm_common/uve/cfgm_cpuinfo/cpuinfo/ttypes.py b/Testcases/cfgm_common/uve/cfgm_cpuinfo/cpuinfo/ttypes.py
deleted file mode 100644
index fa6ed17..0000000
--- a/Testcases/cfgm_common/uve/cfgm_cpuinfo/cpuinfo/ttypes.py
+++ /dev/null
@@ -1,960 +0,0 @@
-#
-# Autogenerated by Sandesh Compiler (1.0)
-#
-# DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
-#
-# options string: py:new_style
-# Copyright (c) 2013 Juniper Networks, Inc. All rights reserved.
-#
-
-from pysandesh.Thrift import TType, TMessageType, TException
-
-from pysandesh.transport import TTransport
-from pysandesh.protocol import TBinaryProtocol, TProtocol
-try:
- from pysandesh.protocol import fastbinary
-except:
- fastbinary = None
-
-import cStringIO
-import uuid
-import bottle
-from pysandesh import sandesh_base
-from pysandesh.sandesh_http import SandeshHttp
-from pysandesh.sandesh_uve import SandeshUVETypeMaps
-from pysandesh.util import UTCTimestampUsec, UTCTimestampUsecToString
-from pysandesh.gen_py.sandesh.constants import *
-
-
-
-class CpuLoadAvg(object):
- """
- Attributes:
- - one_min_avg
- - five_min_avg
- - fifteen_min_avg
- """
-
- thrift_spec = (
- None, # 0
- (1, TType.DOUBLE, 'one_min_avg', None, None, ), # 1
- (2, TType.DOUBLE, 'five_min_avg', None, None, ), # 2
- (3, TType.DOUBLE, 'fifteen_min_avg', None, None, ), # 3
- )
-
- def __init__(self, one_min_avg=None, five_min_avg=None, fifteen_min_avg=None,):
- self.one_min_avg = one_min_avg
- self.five_min_avg = five_min_avg
- self.fifteen_min_avg = fifteen_min_avg
-
- def read(self, iprot):
- if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
- fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
- return 0
- read_cnt = 0
- length = iprot.readStructBegin()
- if length < 0: return -1
- read_cnt += length
- while True:
- (length, fname, ftype, fid) = iprot.readFieldBegin()
- if length < 0: return -1
- read_cnt += length
- if ftype == TType.STOP:
- break
- if fid == 1:
- if ftype == TType.DOUBLE:
- (length, self.one_min_avg) = iprot.readDouble();
- if length < 0: return -1
- read_cnt += length
- else:
- iprot.skip(ftype)
- elif fid == 2:
- if ftype == TType.DOUBLE:
- (length, self.five_min_avg) = iprot.readDouble();
- if length < 0: return -1
- read_cnt += length
- else:
- iprot.skip(ftype)
- elif fid == 3:
- if ftype == TType.DOUBLE:
- (length, self.fifteen_min_avg) = iprot.readDouble();
- if length < 0: return -1
- read_cnt += length
- else:
- iprot.skip(ftype)
- else:
- iprot.skip(ftype)
- length = iprot.readFieldEnd()
- if length < 0: return -1
- read_cnt += length
- length = iprot.readStructEnd()
- if length < 0: return -1
- read_cnt += length
- return read_cnt
-
- def write(self, oprot):
- if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
- oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
- return 0
- if oprot.writeStructBegin('CpuLoadAvg') < 0: return -1
- if self.one_min_avg is not None:
- annotations = {}
- if oprot.writeFieldBegin('one_min_avg', TType.DOUBLE, 1, annotations) < 0: return -1
- if oprot.writeDouble(self.one_min_avg) < 0: return -1
- if oprot.writeFieldEnd() < 0: return -1
- if self.five_min_avg is not None:
- annotations = {}
- if oprot.writeFieldBegin('five_min_avg', TType.DOUBLE, 2, annotations) < 0: return -1
- if oprot.writeDouble(self.five_min_avg) < 0: return -1
- if oprot.writeFieldEnd() < 0: return -1
- if self.fifteen_min_avg is not None:
- annotations = {}
- if oprot.writeFieldBegin('fifteen_min_avg', TType.DOUBLE, 3, annotations) < 0: return -1
- if oprot.writeDouble(self.fifteen_min_avg) < 0: return -1
- if oprot.writeFieldEnd() < 0: return -1
- if oprot.writeFieldStop() < 0: return -1
- if oprot.writeStructEnd() < 0: return -1
- return 0
-
- def validate(self):
- return
-
-
- def log(self):
- log_str = cStringIO.StringIO()
- if self.one_min_avg is not None:
- log_str.write('one_min_avg = ')
- log_str.write(str(self.one_min_avg))
- log_str.write(' ')
- if self.five_min_avg is not None:
- log_str.write('five_min_avg = ')
- log_str.write(str(self.five_min_avg))
- log_str.write(' ')
- if self.fifteen_min_avg is not None:
- log_str.write('fifteen_min_avg = ')
- log_str.write(str(self.fifteen_min_avg))
- log_str.write(' ')
- return log_str.getvalue()
-
- def __repr__(self):
- L = ['%s=%r' % (key, value)
- for key, value in self.__dict__.iteritems()]
- return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
-
- def __eq__(self, other):
- return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
-
- def __ne__(self, other):
- return not (self == other)
-
-class MemInfo(object):
- """
- Attributes:
- - virt
- - peakvirt
- - res
- """
-
- thrift_spec = (
- None, # 0
- (1, TType.U32, 'virt', None, None, ), # 1
- (2, TType.U32, 'peakvirt', None, None, ), # 2
- (3, TType.U32, 'res', None, None, ), # 3
- )
-
- def __init__(self, virt=None, peakvirt=None, res=None,):
- self.virt = virt
- self.peakvirt = peakvirt
- self.res = res
-
- def read(self, iprot):
- if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
- fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
- return 0
- read_cnt = 0
- length = iprot.readStructBegin()
- if length < 0: return -1
- read_cnt += length
- while True:
- (length, fname, ftype, fid) = iprot.readFieldBegin()
- if length < 0: return -1
- read_cnt += length
- if ftype == TType.STOP:
- break
- if fid == 1:
- if ftype == TType.U32:
- (length, self.virt) = iprot.readU32();
- if length < 0: return -1
- read_cnt += length
- else:
- iprot.skip(ftype)
- elif fid == 2:
- if ftype == TType.U32:
- (length, self.peakvirt) = iprot.readU32();
- if length < 0: return -1
- read_cnt += length
- else:
- iprot.skip(ftype)
- elif fid == 3:
- if ftype == TType.U32:
- (length, self.res) = iprot.readU32();
- if length < 0: return -1
- read_cnt += length
- else:
- iprot.skip(ftype)
- else:
- iprot.skip(ftype)
- length = iprot.readFieldEnd()
- if length < 0: return -1
- read_cnt += length
- length = iprot.readStructEnd()
- if length < 0: return -1
- read_cnt += length
- return read_cnt
-
- def write(self, oprot):
- if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
- oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
- return 0
- if oprot.writeStructBegin('MemInfo') < 0: return -1
- if self.virt is not None:
- annotations = {}
- if oprot.writeFieldBegin('virt', TType.U32, 1, annotations) < 0: return -1
- if oprot.writeU32(self.virt) < 0: return -1
- if oprot.writeFieldEnd() < 0: return -1
- if self.peakvirt is not None:
- annotations = {}
- if oprot.writeFieldBegin('peakvirt', TType.U32, 2, annotations) < 0: return -1
- if oprot.writeU32(self.peakvirt) < 0: return -1
- if oprot.writeFieldEnd() < 0: return -1
- if self.res is not None:
- annotations = {}
- if oprot.writeFieldBegin('res', TType.U32, 3, annotations) < 0: return -1
- if oprot.writeU32(self.res) < 0: return -1
- if oprot.writeFieldEnd() < 0: return -1
- if oprot.writeFieldStop() < 0: return -1
- if oprot.writeStructEnd() < 0: return -1
- return 0
-
- def validate(self):
- return
-
-
- def log(self):
- log_str = cStringIO.StringIO()
- if self.virt is not None:
- log_str.write('virt = ')
- log_str.write(str(self.virt))
- log_str.write(' ')
- if self.peakvirt is not None:
- log_str.write('peakvirt = ')
- log_str.write(str(self.peakvirt))
- log_str.write(' ')
- if self.res is not None:
- log_str.write('res = ')
- log_str.write(str(self.res))
- log_str.write(' ')
- return log_str.getvalue()
-
- def __repr__(self):
- L = ['%s=%r' % (key, value)
- for key, value in self.__dict__.iteritems()]
- return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
-
- def __eq__(self, other):
- return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
-
- def __ne__(self, other):
- return not (self == other)
-
-class SysMemInfo(object):
- """
- Attributes:
- - total
- - used
- - free
- - buffers
- """
-
- thrift_spec = (
- None, # 0
- (1, TType.U32, 'total', None, None, ), # 1
- (2, TType.U32, 'used', None, None, ), # 2
- (3, TType.U32, 'free', None, None, ), # 3
- (4, TType.U32, 'buffers', None, None, ), # 4
- )
-
- def __init__(self, total=None, used=None, free=None, buffers=None,):
- self.total = total
- self.used = used
- self.free = free
- self.buffers = buffers
-
- def read(self, iprot):
- if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
- fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
- return 0
- read_cnt = 0
- length = iprot.readStructBegin()
- if length < 0: return -1
- read_cnt += length
- while True:
- (length, fname, ftype, fid) = iprot.readFieldBegin()
- if length < 0: return -1
- read_cnt += length
- if ftype == TType.STOP:
- break
- if fid == 1:
- if ftype == TType.U32:
- (length, self.total) = iprot.readU32();
- if length < 0: return -1
- read_cnt += length
- else:
- iprot.skip(ftype)
- elif fid == 2:
- if ftype == TType.U32:
- (length, self.used) = iprot.readU32();
- if length < 0: return -1
- read_cnt += length
- else:
- iprot.skip(ftype)
- elif fid == 3:
- if ftype == TType.U32:
- (length, self.free) = iprot.readU32();
- if length < 0: return -1
- read_cnt += length
- else:
- iprot.skip(ftype)
- elif fid == 4:
- if ftype == TType.U32:
- (length, self.buffers) = iprot.readU32();
- if length < 0: return -1
- read_cnt += length
- else:
- iprot.skip(ftype)
- else:
- iprot.skip(ftype)
- length = iprot.readFieldEnd()
- if length < 0: return -1
- read_cnt += length
- length = iprot.readStructEnd()
- if length < 0: return -1
- read_cnt += length
- return read_cnt
-
- def write(self, oprot):
- if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
- oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
- return 0
- if oprot.writeStructBegin('SysMemInfo') < 0: return -1
- if self.total is not None:
- annotations = {}
- if oprot.writeFieldBegin('total', TType.U32, 1, annotations) < 0: return -1
- if oprot.writeU32(self.total) < 0: return -1
- if oprot.writeFieldEnd() < 0: return -1
- if self.used is not None:
- annotations = {}
- if oprot.writeFieldBegin('used', TType.U32, 2, annotations) < 0: return -1
- if oprot.writeU32(self.used) < 0: return -1
- if oprot.writeFieldEnd() < 0: return -1
- if self.free is not None:
- annotations = {}
- if oprot.writeFieldBegin('free', TType.U32, 3, annotations) < 0: return -1
- if oprot.writeU32(self.free) < 0: return -1
- if oprot.writeFieldEnd() < 0: return -1
- if self.buffers is not None:
- annotations = {}
- if oprot.writeFieldBegin('buffers', TType.U32, 4, annotations) < 0: return -1
- if oprot.writeU32(self.buffers) < 0: return -1
- if oprot.writeFieldEnd() < 0: return -1
- if oprot.writeFieldStop() < 0: return -1
- if oprot.writeStructEnd() < 0: return -1
- return 0
-
- def validate(self):
- return
-
-
- def log(self):
- log_str = cStringIO.StringIO()
- if self.total is not None:
- log_str.write('total = ')
- log_str.write(str(self.total))
- log_str.write(' ')
- if self.used is not None:
- log_str.write('used = ')
- log_str.write(str(self.used))
- log_str.write(' ')
- if self.free is not None:
- log_str.write('free = ')
- log_str.write(str(self.free))
- log_str.write(' ')
- if self.buffers is not None:
- log_str.write('buffers = ')
- log_str.write(str(self.buffers))
- log_str.write(' ')
- return log_str.getvalue()
-
- def __repr__(self):
- L = ['%s=%r' % (key, value)
- for key, value in self.__dict__.iteritems()]
- return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
-
- def __eq__(self, other):
- return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
-
- def __ne__(self, other):
- return not (self == other)
-
-class CpuLoadInfo(object):
- """
- Attributes:
- - num_cpu
- - sys_mem_info
- - meminfo
- - cpuload
- - cpu_share
- """
-
- thrift_spec = (
- None, # 0
- (1, TType.U32, 'num_cpu', None, None, ), # 1
- (2, TType.STRUCT, 'meminfo', (MemInfo, MemInfo.thrift_spec), None, ), # 2
- (3, TType.STRUCT, 'cpuload', (CpuLoadAvg, CpuLoadAvg.thrift_spec), None, ), # 3
- (4, TType.DOUBLE, 'cpu_share', None, None, ), # 4
- (5, TType.STRUCT, 'sys_mem_info', (SysMemInfo, SysMemInfo.thrift_spec), None, ), # 5
- )
-
- def __init__(self, num_cpu=None, sys_mem_info=None, meminfo=None, cpuload=None, cpu_share=None,):
- self.num_cpu = num_cpu
- self.sys_mem_info = sys_mem_info
- self.meminfo = meminfo
- self.cpuload = cpuload
- self.cpu_share = cpu_share
-
- def read(self, iprot):
- if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
- fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
- return 0
- read_cnt = 0
- length = iprot.readStructBegin()
- if length < 0: return -1
- read_cnt += length
- while True:
- (length, fname, ftype, fid) = iprot.readFieldBegin()
- if length < 0: return -1
- read_cnt += length
- if ftype == TType.STOP:
- break
- if fid == 1:
- if ftype == TType.U32:
- (length, self.num_cpu) = iprot.readU32();
- if length < 0: return -1
- read_cnt += length
- else:
- iprot.skip(ftype)
- elif fid == 5:
- if ftype == TType.STRUCT:
- self.sys_mem_info = SysMemInfo()
- read_cnt += self.sys_mem_info.read(iprot)
- else:
- iprot.skip(ftype)
- elif fid == 2:
- if ftype == TType.STRUCT:
- self.meminfo = MemInfo()
- read_cnt += self.meminfo.read(iprot)
- else:
- iprot.skip(ftype)
- elif fid == 3:
- if ftype == TType.STRUCT:
- self.cpuload = CpuLoadAvg()
- read_cnt += self.cpuload.read(iprot)
- else:
- iprot.skip(ftype)
- elif fid == 4:
- if ftype == TType.DOUBLE:
- (length, self.cpu_share) = iprot.readDouble();
- if length < 0: return -1
- read_cnt += length
- else:
- iprot.skip(ftype)
- else:
- iprot.skip(ftype)
- length = iprot.readFieldEnd()
- if length < 0: return -1
- read_cnt += length
- length = iprot.readStructEnd()
- if length < 0: return -1
- read_cnt += length
- return read_cnt
-
- def write(self, oprot):
- if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
- oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
- return 0
- if oprot.writeStructBegin('CpuLoadInfo') < 0: return -1
- if self.num_cpu is not None:
- annotations = {}
- if oprot.writeFieldBegin('num_cpu', TType.U32, 1, annotations) < 0: return -1
- if oprot.writeU32(self.num_cpu) < 0: return -1
- if oprot.writeFieldEnd() < 0: return -1
- if self.meminfo is not None:
- annotations = {}
- if oprot.writeFieldBegin('meminfo', TType.STRUCT, 2, annotations) < 0: return -1
- if self.meminfo.write(oprot) < 0: return -1
- if oprot.writeFieldEnd() < 0: return -1
- if self.cpuload is not None:
- annotations = {}
- if oprot.writeFieldBegin('cpuload', TType.STRUCT, 3, annotations) < 0: return -1
- if self.cpuload.write(oprot) < 0: return -1
- if oprot.writeFieldEnd() < 0: return -1
- if self.cpu_share is not None:
- annotations = {}
- if oprot.writeFieldBegin('cpu_share', TType.DOUBLE, 4, annotations) < 0: return -1
- if oprot.writeDouble(self.cpu_share) < 0: return -1
- if oprot.writeFieldEnd() < 0: return -1
- if self.sys_mem_info is not None:
- annotations = {}
- if oprot.writeFieldBegin('sys_mem_info', TType.STRUCT, 5, annotations) < 0: return -1
- if self.sys_mem_info.write(oprot) < 0: return -1
- if oprot.writeFieldEnd() < 0: return -1
- if oprot.writeFieldStop() < 0: return -1
- if oprot.writeStructEnd() < 0: return -1
- return 0
-
- def validate(self):
- return
-
-
- def log(self):
- log_str = cStringIO.StringIO()
- if self.num_cpu is not None:
- log_str.write('num_cpu = ')
- log_str.write(str(self.num_cpu))
- log_str.write(' ')
- if self.sys_mem_info is not None:
- log_str.write('sys_mem_info = ')
- log_str.write('<< ')
- log_str.write(self.sys_mem_info.log())
- log_str.write('>>')
- log_str.write(' ')
- if self.meminfo is not None:
- log_str.write('meminfo = ')
- log_str.write('<< ')
- log_str.write(self.meminfo.log())
- log_str.write('>>')
- log_str.write(' ')
- if self.cpuload is not None:
- log_str.write('cpuload = ')
- log_str.write('<< ')
- log_str.write(self.cpuload.log())
- log_str.write('>>')
- log_str.write(' ')
- if self.cpu_share is not None:
- log_str.write('cpu_share = ')
- log_str.write(str(self.cpu_share))
- log_str.write(' ')
- return log_str.getvalue()
-
- def __repr__(self):
- L = ['%s=%r' % (key, value)
- for key, value in self.__dict__.iteritems()]
- return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
-
- def __eq__(self, other):
- return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
-
- def __ne__(self, other):
- return not (self == other)
-
-class ProcessCpuInfo(object):
- """
- Attributes:
- - module_id
- - inst_id
- - mem_virt
- - cpu_share
- - mem_res
- """
-
- thrift_spec = (
- None, # 0
- (1, TType.STRING, 'module_id', None, None, ), # 1
- (2, TType.STRING, 'inst_id', None, None, ), # 2
- (3, TType.U32, 'mem_virt', None, None, ), # 3
- (4, TType.DOUBLE, 'cpu_share', None, None, ), # 4
- (5, TType.U32, 'mem_res', None, None, ), # 5
- )
-
- def __init__(self, module_id=None, inst_id=None, mem_virt=None, cpu_share=None, mem_res=None,):
- self.module_id = module_id
- self.inst_id = inst_id
- self.mem_virt = mem_virt
- self.cpu_share = cpu_share
- self.mem_res = mem_res
-
- def read(self, iprot):
- if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
- fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
- return 0
- read_cnt = 0
- length = iprot.readStructBegin()
- if length < 0: return -1
- read_cnt += length
- while True:
- (length, fname, ftype, fid) = iprot.readFieldBegin()
- if length < 0: return -1
- read_cnt += length
- if ftype == TType.STOP:
- break
- if fid == 1:
- if ftype == TType.STRING:
- (length, self.module_id) = iprot.readString();
- if length < 0: return -1
- read_cnt += length
- else:
- iprot.skip(ftype)
- elif fid == 2:
- if ftype == TType.STRING:
- (length, self.inst_id) = iprot.readString();
- if length < 0: return -1
- read_cnt += length
- else:
- iprot.skip(ftype)
- elif fid == 3:
- if ftype == TType.U32:
- (length, self.mem_virt) = iprot.readU32();
- if length < 0: return -1
- read_cnt += length
- else:
- iprot.skip(ftype)
- elif fid == 4:
- if ftype == TType.DOUBLE:
- (length, self.cpu_share) = iprot.readDouble();
- if length < 0: return -1
- read_cnt += length
- else:
- iprot.skip(ftype)
- elif fid == 5:
- if ftype == TType.U32:
- (length, self.mem_res) = iprot.readU32();
- if length < 0: return -1
- read_cnt += length
- else:
- iprot.skip(ftype)
- else:
- iprot.skip(ftype)
- length = iprot.readFieldEnd()
- if length < 0: return -1
- read_cnt += length
- length = iprot.readStructEnd()
- if length < 0: return -1
- read_cnt += length
- return read_cnt
-
- def write(self, oprot):
- if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
- oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
- return 0
- if oprot.writeStructBegin('ProcessCpuInfo') < 0: return -1
- if self.module_id is not None:
- annotations = {}
- if oprot.writeFieldBegin('module_id', TType.STRING, 1, annotations) < 0: return -1
- if oprot.writeString(self.module_id) < 0: return -1
- if oprot.writeFieldEnd() < 0: return -1
- if self.inst_id is not None:
- annotations = {}
- if oprot.writeFieldBegin('inst_id', TType.STRING, 2, annotations) < 0: return -1
- if oprot.writeString(self.inst_id) < 0: return -1
- if oprot.writeFieldEnd() < 0: return -1
- if self.mem_virt is not None:
- annotations = {}
- if oprot.writeFieldBegin('mem_virt', TType.U32, 3, annotations) < 0: return -1
- if oprot.writeU32(self.mem_virt) < 0: return -1
- if oprot.writeFieldEnd() < 0: return -1
- if self.cpu_share is not None:
- annotations = {}
- if oprot.writeFieldBegin('cpu_share', TType.DOUBLE, 4, annotations) < 0: return -1
- if oprot.writeDouble(self.cpu_share) < 0: return -1
- if oprot.writeFieldEnd() < 0: return -1
- if self.mem_res is not None:
- annotations = {}
- if oprot.writeFieldBegin('mem_res', TType.U32, 5, annotations) < 0: return -1
- if oprot.writeU32(self.mem_res) < 0: return -1
- if oprot.writeFieldEnd() < 0: return -1
- if oprot.writeFieldStop() < 0: return -1
- if oprot.writeStructEnd() < 0: return -1
- return 0
-
- def validate(self):
- return
-
-
- def log(self):
- log_str = cStringIO.StringIO()
- if self.module_id is not None:
- log_str.write('module_id = ')
- log_str.write(self.module_id)
- log_str.write(' ')
- if self.inst_id is not None:
- log_str.write('inst_id = ')
- log_str.write(self.inst_id)
- log_str.write(' ')
- if self.mem_virt is not None:
- log_str.write('mem_virt = ')
- log_str.write(str(self.mem_virt))
- log_str.write(' ')
- if self.cpu_share is not None:
- log_str.write('cpu_share = ')
- log_str.write(str(self.cpu_share))
- log_str.write(' ')
- if self.mem_res is not None:
- log_str.write('mem_res = ')
- log_str.write(str(self.mem_res))
- log_str.write(' ')
- return log_str.getvalue()
-
- def __repr__(self):
- L = ['%s=%r' % (key, value)
- for key, value in self.__dict__.iteritems()]
- return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
-
- def __eq__(self, other):
- return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
-
- def __ne__(self, other):
- return not (self == other)
-
-class CpuLoadInfoReq(sandesh_base.SandeshRequest):
-
- thrift_spec = (
- )
-
- def __init__(self, sandesh=sandesh_base.sandesh_global):
- sandesh_base.SandeshRequest.__init__(self)
- self._scope = sandesh.scope()
- self._module = sandesh.module()
- self._source = sandesh.source_id()
- self._node_type = sandesh.node_type()
- self._instance_id = sandesh.instance_id()
- self._seqnum = 0
- self._timestamp = UTCTimestampUsec()
- self._versionsig = 2471203225
- self._hints = 0
-
- @staticmethod
- def handle_http_request(sandesh=sandesh_base.sandesh_global):
- sandesh_req = CpuLoadInfoReq()
- if not sandesh_req:
- return SandeshHttp.http_error('Sandesh Request "CpuLoadInfoReq" not implemented')
- sandesh_req._context = bottle.request.url
- handle_req_fn = getattr(sandesh_req, "handle_request", None)
- if callable(handle_req_fn):
- handle_req_fn(sandesh_req)
- else:
- return SandeshHttp.http_error('Sandesh Request "CpuLoadInfoReq" not implemented')
- resp = SandeshHttp.get_http_response()
- if resp:
- return resp
- else:
- return SandeshHttp.http_error('No Response for Sandesh Request "CpuLoadInfoReq"')
-
- def log(self, trace=False):
- log_str = cStringIO.StringIO()
- return log_str.getvalue()
-
- def read(self, iprot):
- if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
- fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
- return -1
- read_cnt = 0
- (length, sandesh_name) = iprot.readSandeshBegin()
- if length < 0: return -1
- read_cnt += length
- while True:
- (length, fname, ftype, fid) = iprot.readFieldBegin()
- if length < 0: return -1
- read_cnt += length
- if ftype == TType.STOP:
- break
- else:
- iprot.skip(ftype)
- length = iprot.readFieldEnd()
- if length < 0: return -1
- read_cnt += length
- length = iprot.readSandeshEnd()
- if length < 0: return -1
- read_cnt += length
- return read_cnt
-
- def write(self, oprot):
- if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
- oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
- return 0
- if oprot.writeSandeshBegin('CpuLoadInfoReq') < 0: return -1
- if oprot.writeFieldStop() < 0: return -1
- if oprot.writeSandeshEnd() < 0: return -1
- return 0
-
- def validate(self):
- return
-
-
- def compare(self, other):
- if not isinstance(other, self.__class__):
- return False
- return True
-
- def __repr__(self):
- L = ['%s=%r' % (key, value)
- for key, value in self.__dict__.iteritems()]
- return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
-
- def __eq__(self, other):
- return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
-
- def __ne__(self, other):
- return not (self == other)
-
-class CpuLoadInfoResp(sandesh_base.SandeshResponse):
-
- thrift_spec = (
- (0, TType.BOOL, 'more', None, None, ), # 0
- (1, TType.STRUCT, 'cpu_info', (CpuLoadInfo, CpuLoadInfo.thrift_spec), None, ), # 1
- )
-
- def __init__(self, cpu_info=None, more=None, sandesh=sandesh_base.sandesh_global):
- sandesh_base.SandeshResponse.__init__(self)
- self.cpu_info = cpu_info
- self.more = more
- self._scope = sandesh.scope()
- self._module = sandesh.module()
- self._source = sandesh.source_id()
- self._node_type = sandesh.node_type()
- self._instance_id = sandesh.instance_id()
- self._seqnum = 0
- self._timestamp = UTCTimestampUsec()
- self._versionsig = 4227955829
- self._hints = 0
-
- def log(self, trace=False):
- log_str = cStringIO.StringIO()
- if trace:
- log_str.write(str(self._timestamp))
- log_str.write(' ')
- log_str.write('CpuLoadInfoResp: ')
- if self.cpu_info is not None:
- log_str.write('cpu_info = ')
- log_str.write('<< ')
- log_str.write(self.cpu_info.log())
- log_str.write('>>')
- log_str.write(' ')
- if self.more is not None:
- log_str.write('more = ')
- if self.more:
- log_str.write('True')
- else:
- log_str.write('False')
- log_str.write(' ')
- return log_str.getvalue()
-
- def read(self, iprot):
- if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
- fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
- return -1
- read_cnt = 0
- (length, sandesh_name) = iprot.readSandeshBegin()
- if length < 0: return -1
- read_cnt += length
- while True:
- (length, fname, ftype, fid) = iprot.readFieldBegin()
- if length < 0: return -1
- read_cnt += length
- if ftype == TType.STOP:
- break
- if fid == 1:
- if ftype == TType.STRUCT:
- self.cpu_info = CpuLoadInfo()
- read_cnt += self.cpu_info.read(iprot)
- else:
- iprot.skip(ftype)
- elif fid == 0:
- if ftype == TType.BOOL:
- (length, self.more) = iprot.readBool();
- if length < 0: return -1
- read_cnt += length
- else:
- iprot.skip(ftype)
- else:
- iprot.skip(ftype)
- length = iprot.readFieldEnd()
- if length < 0: return -1
- read_cnt += length
- length = iprot.readSandeshEnd()
- if length < 0: return -1
- read_cnt += length
- return read_cnt
-
- def write(self, oprot):
- if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
- oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
- return 0
- if oprot.writeSandeshBegin('CpuLoadInfoResp') < 0: return -1
- if self.more is not None:
- annotations = {}
- if oprot.writeFieldBegin('more', TType.BOOL, 0, annotations) < 0: return -1
- if oprot.writeBool(self.more) < 0: return -1
- if oprot.writeFieldEnd() < 0: return -1
- if self.cpu_info is not None:
- annotations = {}
- if oprot.writeFieldBegin('cpu_info', TType.STRUCT, 1, annotations) < 0: return -1
- if self.cpu_info.write(oprot) < 0: return -1
- if oprot.writeFieldEnd() < 0: return -1
- if oprot.writeFieldStop() < 0: return -1
- if oprot.writeSandeshEnd() < 0: return -1
- return 0
-
- def validate(self):
- return
-
-
- def compare(self, other):
- if not isinstance(other, self.__class__):
- return False
- if self.more != other.more:
- return False
- if self.cpu_info != other.cpu_info:
- return False
- return True
-
- def __repr__(self):
- L = ['%s=%r' % (key, value)
- for key, value in self.__dict__.iteritems()]
- return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
-
- def __eq__(self, other):
- return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
-
- def __ne__(self, other):
- return not (self == other)
-
-
-_SANDESH_REQUEST_LIST = [
-'CpuLoadInfoReq',
-]
-
-
-_SANDESH_UVE_LIST = [
-]
-
-
-_SANDESH_UVE_DATA_LIST = [
-]
-
-
-_SANDESH_ALARM_LIST = [
-]
-
-
-_SANDESH_ALARM_DATA_LIST = [
-]
diff --git a/Testcases/cfgm_common/uve/cfgm_cpuinfo/cpuinfo/ttypes.pyc b/Testcases/cfgm_common/uve/cfgm_cpuinfo/cpuinfo/ttypes.pyc
deleted file mode 100644
index 5cffc67..0000000
--- a/Testcases/cfgm_common/uve/cfgm_cpuinfo/cpuinfo/ttypes.pyc
+++ /dev/null
Binary files differ
diff --git a/Testcases/cfgm_common/uve/cfgm_cpuinfo/http_request.py b/Testcases/cfgm_common/uve/cfgm_cpuinfo/http_request.py
deleted file mode 100644
index 8baea4f..0000000
--- a/Testcases/cfgm_common/uve/cfgm_cpuinfo/http_request.py
+++ /dev/null
@@ -1,14 +0,0 @@
-#
-# Autogenerated by Sandesh Compiler (1.0)
-#
-# DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
-#
-# options string: py:new_style
-# Copyright (c) 2013 Juniper Networks, Inc. All rights reserved.
-#
-
-import ttypes
-
-_HTTP_REQUEST_LIST = [
-]
-
diff --git a/Testcases/cfgm_common/uve/cfgm_cpuinfo/http_request.pyc b/Testcases/cfgm_common/uve/cfgm_cpuinfo/http_request.pyc
deleted file mode 100644
index 8dec371..0000000
--- a/Testcases/cfgm_common/uve/cfgm_cpuinfo/http_request.pyc
+++ /dev/null
Binary files differ
diff --git a/Testcases/cfgm_common/uve/cfgm_cpuinfo/index.html b/Testcases/cfgm_common/uve/cfgm_cpuinfo/index.html
deleted file mode 100644
index d9836a6..0000000
--- a/Testcases/cfgm_common/uve/cfgm_cpuinfo/index.html
+++ /dev/null
@@ -1,14 +0,0 @@
-<html><head>
-<link href="style.css" rel="stylesheet" type="text/css"/>
-<title>All Thrift declarations</title></head><body>
-<h1>All Thrift declarations</h1>
-<table><tr><th>Module</th><th>Services</th><th>Sandeshs</th><th>Data types</th><th>Constants</th></tr>
-<tr>
-<td>cfgm_cpuinfo</td><td></td></tr>
-<tr>
-<td>cpuinfo</td><td><a href="cpuinfo.html#Snh_CpuLoadInfoReq">CpuLoadInfoReq</a><br/>
-</td></tr>
-<tr>
-<td>process_info</td><td></td></tr>
-</table>
-</body></html>
diff --git a/Testcases/cfgm_common/uve/cfgm_cpuinfo/process_info/__init__.py b/Testcases/cfgm_common/uve/cfgm_cpuinfo/process_info/__init__.py
deleted file mode 100644
index adefd8e..0000000
--- a/Testcases/cfgm_common/uve/cfgm_cpuinfo/process_info/__init__.py
+++ /dev/null
@@ -1 +0,0 @@
-__all__ = ['ttypes', 'constants']
diff --git a/Testcases/cfgm_common/uve/cfgm_cpuinfo/process_info/__init__.pyc b/Testcases/cfgm_common/uve/cfgm_cpuinfo/process_info/__init__.pyc
deleted file mode 100644
index 34a8d06..0000000
--- a/Testcases/cfgm_common/uve/cfgm_cpuinfo/process_info/__init__.pyc
+++ /dev/null
Binary files differ
diff --git a/Testcases/cfgm_common/uve/cfgm_cpuinfo/process_info/constants.py b/Testcases/cfgm_common/uve/cfgm_cpuinfo/process_info/constants.py
deleted file mode 100644
index 561413e..0000000
--- a/Testcases/cfgm_common/uve/cfgm_cpuinfo/process_info/constants.py
+++ /dev/null
@@ -1,33 +0,0 @@
-#
-# Autogenerated by Sandesh Compiler (1.0)
-#
-# DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
-#
-# options string: py:new_style
-# Copyright (c) 2013 Juniper Networks, Inc. All rights reserved.
-#
-
-from pysandesh.Thrift import TType, TMessageType, TException
-from ttypes import *
-
-ConnectionTypeNames = {
- 0 : "Test",
- 1 : "IFMap",
- 2 : "XMPP",
- 3 : "Collector",
- 4 : "Database",
- 5 : "Redis",
- 6 : "Zookeeper",
- 7 : "Discovery",
- 8 : "ApiServer",
- 9 : "ToR",
-}
-ConnectionStatusNames = {
- 0 : "Initializing",
- 1 : "Down",
- 2 : "Up",
-}
-ProcessStateNames = {
- 0 : "Functional",
- 1 : "Non-Functional",
-}
diff --git a/Testcases/cfgm_common/uve/cfgm_cpuinfo/process_info/constants.pyc b/Testcases/cfgm_common/uve/cfgm_cpuinfo/process_info/constants.pyc
deleted file mode 100644
index a13a91f..0000000
--- a/Testcases/cfgm_common/uve/cfgm_cpuinfo/process_info/constants.pyc
+++ /dev/null
Binary files differ
diff --git a/Testcases/cfgm_common/uve/cfgm_cpuinfo/process_info/http_request.py b/Testcases/cfgm_common/uve/cfgm_cpuinfo/process_info/http_request.py
deleted file mode 100644
index 8baea4f..0000000
--- a/Testcases/cfgm_common/uve/cfgm_cpuinfo/process_info/http_request.py
+++ /dev/null
@@ -1,14 +0,0 @@
-#
-# Autogenerated by Sandesh Compiler (1.0)
-#
-# DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
-#
-# options string: py:new_style
-# Copyright (c) 2013 Juniper Networks, Inc. All rights reserved.
-#
-
-import ttypes
-
-_HTTP_REQUEST_LIST = [
-]
-
diff --git a/Testcases/cfgm_common/uve/cfgm_cpuinfo/process_info/http_request.pyc b/Testcases/cfgm_common/uve/cfgm_cpuinfo/process_info/http_request.pyc
deleted file mode 100644
index 4348a49..0000000
--- a/Testcases/cfgm_common/uve/cfgm_cpuinfo/process_info/http_request.pyc
+++ /dev/null
Binary files differ
diff --git a/Testcases/cfgm_common/uve/cfgm_cpuinfo/process_info/index.html b/Testcases/cfgm_common/uve/cfgm_cpuinfo/process_info/index.html
deleted file mode 100644
index 9550828..0000000
--- a/Testcases/cfgm_common/uve/cfgm_cpuinfo/process_info/index.html
+++ /dev/null
@@ -1,9 +0,0 @@
-<html><head>
-<link href="style.css" rel="stylesheet" type="text/css"/>
-<title>All Thrift declarations</title></head><body>
-<h1>All Thrift declarations</h1>
-<table><tr><th>Module</th><th>Services</th><th>Sandeshs</th><th>Data types</th><th>Constants</th></tr>
-<tr>
-<td>process_info</td><td></td></tr>
-</table>
-</body></html>
diff --git a/Testcases/cfgm_common/uve/cfgm_cpuinfo/process_info/process_info.html b/Testcases/cfgm_common/uve/cfgm_cpuinfo/process_info/process_info.html
deleted file mode 100644
index 2a1230f..0000000
--- a/Testcases/cfgm_common/uve/cfgm_cpuinfo/process_info/process_info.html
+++ /dev/null
@@ -1,13 +0,0 @@
-<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Strict//EN"
- "http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd">
-<html xmlns="http://www.w3.org/1999/xhtml">
-<head>
-<meta http-equiv="Content-Type" content="text/html;charset=utf-8" />
-<link href="style.css" rel="stylesheet" type="text/css"/>
-<title>Module: process_info</title></head><body>
-<h1>Module: process_info</h1>
-<table><tr><th>Module</th><th>Sandeshs</th></tr>
-<tr>
-<td>process_info</td><td></td></tr>
-</table>
-</body></html>
diff --git a/Testcases/cfgm_common/uve/cfgm_cpuinfo/process_info/process_info.xml b/Testcases/cfgm_common/uve/cfgm_cpuinfo/process_info/process_info.xml
deleted file mode 100644
index ed715d3..0000000
--- a/Testcases/cfgm_common/uve/cfgm_cpuinfo/process_info/process_info.xml
+++ /dev/null
@@ -1,3 +0,0 @@
-<?xml-stylesheet type="text/xsl" href="/universal_parse.xsl"?>
-<process_info type="rlist">
-</process_info>
diff --git a/Testcases/cfgm_common/uve/cfgm_cpuinfo/process_info/request_skeleton.py b/Testcases/cfgm_common/uve/cfgm_cpuinfo/process_info/request_skeleton.py
deleted file mode 100644
index 99c1196..0000000
--- a/Testcases/cfgm_common/uve/cfgm_cpuinfo/process_info/request_skeleton.py
+++ /dev/null
@@ -1,13 +0,0 @@
-#
-# Autogenerated by Sandesh Compiler (1.0)
-#
-# DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
-#
-# options string: py:new_style
-# Copyright (c) 2013 Juniper Networks, Inc. All rights reserved.
-#
-
-
-# This autogenerated skeleton file illustrates the implementation of
-# derived class to handle the sandesh request.
-
diff --git a/Testcases/cfgm_common/uve/cfgm_cpuinfo/process_info/request_skeleton.pyc b/Testcases/cfgm_common/uve/cfgm_cpuinfo/process_info/request_skeleton.pyc
deleted file mode 100644
index 11f0c97..0000000
--- a/Testcases/cfgm_common/uve/cfgm_cpuinfo/process_info/request_skeleton.pyc
+++ /dev/null
Binary files differ
diff --git a/Testcases/cfgm_common/uve/cfgm_cpuinfo/process_info/style.css b/Testcases/cfgm_common/uve/cfgm_cpuinfo/process_info/style.css
deleted file mode 100644
index 6dc2f22..0000000
--- a/Testcases/cfgm_common/uve/cfgm_cpuinfo/process_info/style.css
+++ /dev/null
@@ -1,10 +0,0 @@
-/* Auto-generated CSS for generated Thrift docs */
-body { font-family: Tahoma, sans-serif; }
-pre { background-color: #dddddd; padding: 6px; }
-h3,h4 { padding-top: 0px; margin-top: 0px; }
-div.definition { border: 1px solid gray; margin: 10px; padding: 10px; }
-div.extends { margin: -0.5em 0 1em 5em }
-table { border: 1px solid grey; border-collapse: collapse; }
-td { border: 1px solid grey; padding: 1px 6px; vertical-align: top; }
-th { border: 1px solid black; background-color: #bbbbbb;
- text-align: left; padding: 1px 6px; }
diff --git a/Testcases/cfgm_common/uve/cfgm_cpuinfo/process_info/ttypes.py b/Testcases/cfgm_common/uve/cfgm_cpuinfo/process_info/ttypes.py
deleted file mode 100644
index 5c12c29..0000000
--- a/Testcases/cfgm_common/uve/cfgm_cpuinfo/process_info/ttypes.py
+++ /dev/null
@@ -1,854 +0,0 @@
-#
-# Autogenerated by Sandesh Compiler (1.0)
-#
-# DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
-#
-# options string: py:new_style
-# Copyright (c) 2013 Juniper Networks, Inc. All rights reserved.
-#
-
-from pysandesh.Thrift import TType, TMessageType, TException
-
-from pysandesh.transport import TTransport
-from pysandesh.protocol import TBinaryProtocol, TProtocol
-try:
- from pysandesh.protocol import fastbinary
-except:
- fastbinary = None
-
-import cStringIO
-import uuid
-import bottle
-from pysandesh import sandesh_base
-from pysandesh.sandesh_http import SandeshHttp
-from pysandesh.sandesh_uve import SandeshUVETypeMaps
-from pysandesh.util import UTCTimestampUsec, UTCTimestampUsecToString
-from pysandesh.gen_py.sandesh.constants import *
-
-
-class ConnectionType(object):
- TEST = 0
- IFMAP = 1
- XMPP = 2
- COLLECTOR = 3
- DATABASE = 4
- REDIS = 5
- ZOOKEEPER = 6
- DISCOVERY = 7
- APISERVER = 8
- TOR = 9
-
- _VALUES_TO_NAMES = {
- 0: "TEST",
- 1: "IFMAP",
- 2: "XMPP",
- 3: "COLLECTOR",
- 4: "DATABASE",
- 5: "REDIS",
- 6: "ZOOKEEPER",
- 7: "DISCOVERY",
- 8: "APISERVER",
- 9: "TOR",
- }
-
- _NAMES_TO_VALUES = {
- "TEST": 0,
- "IFMAP": 1,
- "XMPP": 2,
- "COLLECTOR": 3,
- "DATABASE": 4,
- "REDIS": 5,
- "ZOOKEEPER": 6,
- "DISCOVERY": 7,
- "APISERVER": 8,
- "TOR": 9,
- }
-
-class ConnectionStatus(object):
- INIT = 0
- DOWN = 1
- UP = 2
-
- _VALUES_TO_NAMES = {
- 0: "INIT",
- 1: "DOWN",
- 2: "UP",
- }
-
- _NAMES_TO_VALUES = {
- "INIT": 0,
- "DOWN": 1,
- "UP": 2,
- }
-
-class ProcessState(object):
- FUNCTIONAL = 0
- NON_FUNCTIONAL = 1
-
- _VALUES_TO_NAMES = {
- 0: "FUNCTIONAL",
- 1: "NON_FUNCTIONAL",
- }
-
- _NAMES_TO_VALUES = {
- "FUNCTIONAL": 0,
- "NON_FUNCTIONAL": 1,
- }
-
-
-class ConnectionInfo(object):
- """
- Attributes:
- - type
- - name
- - server_addrs
- - status
- - description
- """
-
- thrift_spec = (
- None, # 0
- (1, TType.STRING, 'type', None, None, ), # 1
- (2, TType.STRING, 'name', None, None, ), # 2
- (3, TType.LIST, 'server_addrs', (TType.STRING,None), None, ), # 3
- (4, TType.STRING, 'status', None, None, ), # 4
- (5, TType.STRING, 'description', None, None, ), # 5
- )
-
- def __init__(self, type=None, name=None, server_addrs=None, status=None, description=None,):
- self.type = type
- self.name = name
- self.server_addrs = server_addrs
- self.status = status
- self.description = description
-
- def read(self, iprot):
- if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
- fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
- return 0
- read_cnt = 0
- length = iprot.readStructBegin()
- if length < 0: return -1
- read_cnt += length
- while True:
- (length, fname, ftype, fid) = iprot.readFieldBegin()
- if length < 0: return -1
- read_cnt += length
- if ftype == TType.STOP:
- break
- if fid == 1:
- if ftype == TType.STRING:
- (length, self.type) = iprot.readString();
- if length < 0: return -1
- read_cnt += length
- else:
- iprot.skip(ftype)
- elif fid == 2:
- if ftype == TType.STRING:
- (length, self.name) = iprot.readString();
- if length < 0: return -1
- read_cnt += length
- else:
- iprot.skip(ftype)
- elif fid == 3:
- if ftype == TType.LIST:
- self.server_addrs = []
- (length, _etype3, _size0) = iprot.readListBegin()
- read_cnt += length
- for _i4 in xrange(_size0):
- read_cnt += iprot.readContainerElementBegin()
- (length, _elem5) = iprot.readString();
- if length < 0: return -1
- read_cnt += length
- self.server_addrs.append(_elem5)
- read_cnt += iprot.readContainerElementEnd()
- read_cnt += iprot.readListEnd()
- else:
- iprot.skip(ftype)
- elif fid == 4:
- if ftype == TType.STRING:
- (length, self.status) = iprot.readString();
- if length < 0: return -1
- read_cnt += length
- else:
- iprot.skip(ftype)
- elif fid == 5:
- if ftype == TType.STRING:
- (length, self.description) = iprot.readString();
- if length < 0: return -1
- read_cnt += length
- else:
- iprot.skip(ftype)
- else:
- iprot.skip(ftype)
- length = iprot.readFieldEnd()
- if length < 0: return -1
- read_cnt += length
- length = iprot.readStructEnd()
- if length < 0: return -1
- read_cnt += length
- return read_cnt
-
- def write(self, oprot):
- if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
- oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
- return 0
- if oprot.writeStructBegin('ConnectionInfo') < 0: return -1
- if self.type is not None:
- annotations = {}
- if oprot.writeFieldBegin('type', TType.STRING, 1, annotations) < 0: return -1
- if oprot.writeString(self.type) < 0: return -1
- if oprot.writeFieldEnd() < 0: return -1
- if self.name is not None:
- annotations = {}
- if oprot.writeFieldBegin('name', TType.STRING, 2, annotations) < 0: return -1
- if oprot.writeString(self.name) < 0: return -1
- if oprot.writeFieldEnd() < 0: return -1
- if self.server_addrs is not None:
- annotations = {}
- if oprot.writeFieldBegin('server_addrs', TType.LIST, 3, annotations) < 0: return -1
- if oprot.writeListBegin(TType.STRING, len(self.server_addrs)) < 0: return -1
- for iter6 in self.server_addrs:
- if oprot.writeContainerElementBegin() < 0: return -1
- if oprot.writeString(iter6) < 0: return -1
- if oprot.writeContainerElementEnd() < 0: return -1
- if oprot.writeListEnd() < 0: return -1
- if oprot.writeFieldEnd() < 0: return -1
- if self.status is not None:
- annotations = {}
- if oprot.writeFieldBegin('status', TType.STRING, 4, annotations) < 0: return -1
- if oprot.writeString(self.status) < 0: return -1
- if oprot.writeFieldEnd() < 0: return -1
- if self.description is not None:
- annotations = {}
- if oprot.writeFieldBegin('description', TType.STRING, 5, annotations) < 0: return -1
- if oprot.writeString(self.description) < 0: return -1
- if oprot.writeFieldEnd() < 0: return -1
- if oprot.writeFieldStop() < 0: return -1
- if oprot.writeStructEnd() < 0: return -1
- return 0
-
- def validate(self):
- return
-
-
- def log(self):
- log_str = cStringIO.StringIO()
- if self.type is not None:
- log_str.write('type = ')
- log_str.write(self.type)
- log_str.write(' ')
- if self.name is not None:
- log_str.write('name = ')
- log_str.write(self.name)
- log_str.write(' ')
- if self.server_addrs is not None:
- log_str.write('server_addrs = ')
- log_str.write('[ ')
- for iter7 in self.server_addrs:
- log_str.write(iter7)
- log_str.write(', ')
- log_str.write(' ]')
- log_str.write(' ')
- if self.status is not None:
- log_str.write('status = ')
- log_str.write(self.status)
- log_str.write(' ')
- if self.description is not None:
- log_str.write('description = ')
- log_str.write(self.description)
- log_str.write(' ')
- return log_str.getvalue()
-
- def __repr__(self):
- L = ['%s=%r' % (key, value)
- for key, value in self.__dict__.iteritems()]
- return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
-
- def __eq__(self, other):
- return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
-
- def __ne__(self, other):
- return not (self == other)
-
-class ProcessStatus(object):
- """
- Attributes:
- - module_id
- - instance_id
- - state
- - connection_infos
- - description
- """
-
- thrift_spec = (
- None, # 0
- (1, TType.STRING, 'module_id', None, None, ), # 1
- (2, TType.STRING, 'instance_id', None, None, ), # 2
- (3, TType.STRING, 'state', None, None, ), # 3
- (4, TType.LIST, 'connection_infos', (TType.STRUCT,(ConnectionInfo, ConnectionInfo.thrift_spec)), None, ), # 4
- (5, TType.STRING, 'description', None, None, ), # 5
- )
-
- def __init__(self, module_id=None, instance_id=None, state=None, connection_infos=None, description=None,):
- self.module_id = module_id
- self.instance_id = instance_id
- self.state = state
- self.connection_infos = connection_infos
- self.description = description
-
- def read(self, iprot):
- if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
- fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
- return 0
- read_cnt = 0
- length = iprot.readStructBegin()
- if length < 0: return -1
- read_cnt += length
- while True:
- (length, fname, ftype, fid) = iprot.readFieldBegin()
- if length < 0: return -1
- read_cnt += length
- if ftype == TType.STOP:
- break
- if fid == 1:
- if ftype == TType.STRING:
- (length, self.module_id) = iprot.readString();
- if length < 0: return -1
- read_cnt += length
- else:
- iprot.skip(ftype)
- elif fid == 2:
- if ftype == TType.STRING:
- (length, self.instance_id) = iprot.readString();
- if length < 0: return -1
- read_cnt += length
- else:
- iprot.skip(ftype)
- elif fid == 3:
- if ftype == TType.STRING:
- (length, self.state) = iprot.readString();
- if length < 0: return -1
- read_cnt += length
- else:
- iprot.skip(ftype)
- elif fid == 4:
- if ftype == TType.LIST:
- self.connection_infos = []
- (length, _etype11, _size8) = iprot.readListBegin()
- read_cnt += length
- for _i12 in xrange(_size8):
- _elem13 = ConnectionInfo()
- read_cnt += _elem13.read(iprot)
- self.connection_infos.append(_elem13)
- read_cnt += iprot.readListEnd()
- else:
- iprot.skip(ftype)
- elif fid == 5:
- if ftype == TType.STRING:
- (length, self.description) = iprot.readString();
- if length < 0: return -1
- read_cnt += length
- else:
- iprot.skip(ftype)
- else:
- iprot.skip(ftype)
- length = iprot.readFieldEnd()
- if length < 0: return -1
- read_cnt += length
- length = iprot.readStructEnd()
- if length < 0: return -1
- read_cnt += length
- return read_cnt
-
- def write(self, oprot):
- if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
- oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
- return 0
- if oprot.writeStructBegin('ProcessStatus') < 0: return -1
- if self.module_id is not None:
- annotations = {}
- if oprot.writeFieldBegin('module_id', TType.STRING, 1, annotations) < 0: return -1
- if oprot.writeString(self.module_id) < 0: return -1
- if oprot.writeFieldEnd() < 0: return -1
- if self.instance_id is not None:
- annotations = {}
- if oprot.writeFieldBegin('instance_id', TType.STRING, 2, annotations) < 0: return -1
- if oprot.writeString(self.instance_id) < 0: return -1
- if oprot.writeFieldEnd() < 0: return -1
- if self.state is not None:
- annotations = {}
- if oprot.writeFieldBegin('state', TType.STRING, 3, annotations) < 0: return -1
- if oprot.writeString(self.state) < 0: return -1
- if oprot.writeFieldEnd() < 0: return -1
- if self.connection_infos is not None:
- annotations = {}
- if oprot.writeFieldBegin('connection_infos', TType.LIST, 4, annotations) < 0: return -1
- if oprot.writeListBegin(TType.STRUCT, len(self.connection_infos)) < 0: return -1
- for iter14 in self.connection_infos:
- if iter14.write(oprot) < 0: return -1
- if oprot.writeListEnd() < 0: return -1
- if oprot.writeFieldEnd() < 0: return -1
- if self.description is not None:
- annotations = {}
- if oprot.writeFieldBegin('description', TType.STRING, 5, annotations) < 0: return -1
- if oprot.writeString(self.description) < 0: return -1
- if oprot.writeFieldEnd() < 0: return -1
- if oprot.writeFieldStop() < 0: return -1
- if oprot.writeStructEnd() < 0: return -1
- return 0
-
- def validate(self):
- return
-
-
- def log(self):
- log_str = cStringIO.StringIO()
- if self.module_id is not None:
- log_str.write('module_id = ')
- log_str.write(self.module_id)
- log_str.write(' ')
- if self.instance_id is not None:
- log_str.write('instance_id = ')
- log_str.write(self.instance_id)
- log_str.write(' ')
- if self.state is not None:
- log_str.write('state = ')
- log_str.write(self.state)
- log_str.write(' ')
- if self.connection_infos is not None:
- log_str.write('connection_infos = ')
- log_str.write('[ ')
- for iter15 in self.connection_infos:
- log_str.write('<< ')
- log_str.write(iter15.log())
- log_str.write('>>')
- log_str.write(', ')
- log_str.write(' ]')
- log_str.write(' ')
- if self.description is not None:
- log_str.write('description = ')
- log_str.write(self.description)
- log_str.write(' ')
- return log_str.getvalue()
-
- def __repr__(self):
- L = ['%s=%r' % (key, value)
- for key, value in self.__dict__.iteritems()]
- return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
-
- def __eq__(self, other):
- return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
-
- def __ne__(self, other):
- return not (self == other)
-
-class DiskPartitionUsageStats(object):
- """
- Attributes:
- - partition_type
- - partition_name
- - partition_space_used_1k
- - partition_space_available_1k
- """
-
- thrift_spec = (
- None, # 0
- (1, TType.STRING, 'partition_type', None, None, ), # 1
- (2, TType.STRING, 'partition_name', None, None, ), # 2
- (3, TType.U64, 'partition_space_used_1k', None, None, ), # 3
- (4, TType.U64, 'partition_space_available_1k', None, None, ), # 4
- )
-
- def __init__(self, partition_type=None, partition_name=None, partition_space_used_1k=None, partition_space_available_1k=None,):
- self.partition_type = partition_type
- self.partition_name = partition_name
- self.partition_space_used_1k = partition_space_used_1k
- self.partition_space_available_1k = partition_space_available_1k
-
- def read(self, iprot):
- if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
- fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
- return 0
- read_cnt = 0
- length = iprot.readStructBegin()
- if length < 0: return -1
- read_cnt += length
- while True:
- (length, fname, ftype, fid) = iprot.readFieldBegin()
- if length < 0: return -1
- read_cnt += length
- if ftype == TType.STOP:
- break
- if fid == 1:
- if ftype == TType.STRING:
- (length, self.partition_type) = iprot.readString();
- if length < 0: return -1
- read_cnt += length
- else:
- iprot.skip(ftype)
- elif fid == 2:
- if ftype == TType.STRING:
- (length, self.partition_name) = iprot.readString();
- if length < 0: return -1
- read_cnt += length
- else:
- iprot.skip(ftype)
- elif fid == 3:
- if ftype == TType.U64:
- (length, self.partition_space_used_1k) = iprot.readU64();
- if length < 0: return -1
- read_cnt += length
- else:
- iprot.skip(ftype)
- elif fid == 4:
- if ftype == TType.U64:
- (length, self.partition_space_available_1k) = iprot.readU64();
- if length < 0: return -1
- read_cnt += length
- else:
- iprot.skip(ftype)
- else:
- iprot.skip(ftype)
- length = iprot.readFieldEnd()
- if length < 0: return -1
- read_cnt += length
- length = iprot.readStructEnd()
- if length < 0: return -1
- read_cnt += length
- return read_cnt
-
- def write(self, oprot):
- if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
- oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
- return 0
- if oprot.writeStructBegin('DiskPartitionUsageStats') < 0: return -1
- if self.partition_type is not None:
- annotations = {}
- if oprot.writeFieldBegin('partition_type', TType.STRING, 1, annotations) < 0: return -1
- if oprot.writeString(self.partition_type) < 0: return -1
- if oprot.writeFieldEnd() < 0: return -1
- if self.partition_name is not None:
- annotations = {}
- if oprot.writeFieldBegin('partition_name', TType.STRING, 2, annotations) < 0: return -1
- if oprot.writeString(self.partition_name) < 0: return -1
- if oprot.writeFieldEnd() < 0: return -1
- if self.partition_space_used_1k is not None:
- annotations = {}
- if oprot.writeFieldBegin('partition_space_used_1k', TType.U64, 3, annotations) < 0: return -1
- if oprot.writeU64(self.partition_space_used_1k) < 0: return -1
- if oprot.writeFieldEnd() < 0: return -1
- if self.partition_space_available_1k is not None:
- annotations = {}
- if oprot.writeFieldBegin('partition_space_available_1k', TType.U64, 4, annotations) < 0: return -1
- if oprot.writeU64(self.partition_space_available_1k) < 0: return -1
- if oprot.writeFieldEnd() < 0: return -1
- if oprot.writeFieldStop() < 0: return -1
- if oprot.writeStructEnd() < 0: return -1
- return 0
-
- def validate(self):
- return
-
-
- def log(self):
- log_str = cStringIO.StringIO()
- if self.partition_type is not None:
- log_str.write('partition_type = ')
- log_str.write(self.partition_type)
- log_str.write(' ')
- if self.partition_name is not None:
- log_str.write('partition_name = ')
- log_str.write(self.partition_name)
- log_str.write(' ')
- if self.partition_space_used_1k is not None:
- log_str.write('partition_space_used_1k = ')
- log_str.write(str(self.partition_space_used_1k))
- log_str.write(' ')
- if self.partition_space_available_1k is not None:
- log_str.write('partition_space_available_1k = ')
- log_str.write(str(self.partition_space_available_1k))
- log_str.write(' ')
- return log_str.getvalue()
-
- def __repr__(self):
- L = ['%s=%r' % (key, value)
- for key, value in self.__dict__.iteritems()]
- return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
-
- def __eq__(self, other):
- return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
-
- def __ne__(self, other):
- return not (self == other)
-
-class ProcessInfo(object):
- """
- Attributes:
- - process_name
- - process_state
- - start_count
- - stop_count
- - exit_count
- - last_start_time
- - last_stop_time
- - last_exit_time
- - core_file_list
- """
-
- thrift_spec = (
- None, # 0
- (1, TType.STRING, 'process_name', None, None, ), # 1
- (2, TType.STRING, 'process_state', None, None, ), # 2
- (3, TType.U32, 'start_count', None, None, ), # 3
- (4, TType.U32, 'stop_count', None, None, ), # 4
- (5, TType.U32, 'exit_count', None, None, ), # 5
- (6, TType.STRING, 'last_start_time', None, None, ), # 6
- (7, TType.STRING, 'last_stop_time', None, None, ), # 7
- (8, TType.STRING, 'last_exit_time', None, None, ), # 8
- (9, TType.LIST, 'core_file_list', (TType.STRING,None), None, ), # 9
- )
-
- def __init__(self, process_name=None, process_state=None, start_count=None, stop_count=None, exit_count=None, last_start_time=None, last_stop_time=None, last_exit_time=None, core_file_list=None,):
- self.process_name = process_name
- self.process_state = process_state
- self.start_count = start_count
- self.stop_count = stop_count
- self.exit_count = exit_count
- self.last_start_time = last_start_time
- self.last_stop_time = last_stop_time
- self.last_exit_time = last_exit_time
- self.core_file_list = core_file_list
-
- def read(self, iprot):
- if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
- fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
- return 0
- read_cnt = 0
- length = iprot.readStructBegin()
- if length < 0: return -1
- read_cnt += length
- while True:
- (length, fname, ftype, fid) = iprot.readFieldBegin()
- if length < 0: return -1
- read_cnt += length
- if ftype == TType.STOP:
- break
- if fid == 1:
- if ftype == TType.STRING:
- (length, self.process_name) = iprot.readString();
- if length < 0: return -1
- read_cnt += length
- else:
- iprot.skip(ftype)
- elif fid == 2:
- if ftype == TType.STRING:
- (length, self.process_state) = iprot.readString();
- if length < 0: return -1
- read_cnt += length
- else:
- iprot.skip(ftype)
- elif fid == 3:
- if ftype == TType.U32:
- (length, self.start_count) = iprot.readU32();
- if length < 0: return -1
- read_cnt += length
- else:
- iprot.skip(ftype)
- elif fid == 4:
- if ftype == TType.U32:
- (length, self.stop_count) = iprot.readU32();
- if length < 0: return -1
- read_cnt += length
- else:
- iprot.skip(ftype)
- elif fid == 5:
- if ftype == TType.U32:
- (length, self.exit_count) = iprot.readU32();
- if length < 0: return -1
- read_cnt += length
- else:
- iprot.skip(ftype)
- elif fid == 6:
- if ftype == TType.STRING:
- (length, self.last_start_time) = iprot.readString();
- if length < 0: return -1
- read_cnt += length
- else:
- iprot.skip(ftype)
- elif fid == 7:
- if ftype == TType.STRING:
- (length, self.last_stop_time) = iprot.readString();
- if length < 0: return -1
- read_cnt += length
- else:
- iprot.skip(ftype)
- elif fid == 8:
- if ftype == TType.STRING:
- (length, self.last_exit_time) = iprot.readString();
- if length < 0: return -1
- read_cnt += length
- else:
- iprot.skip(ftype)
- elif fid == 9:
- if ftype == TType.LIST:
- self.core_file_list = []
- (length, _etype19, _size16) = iprot.readListBegin()
- read_cnt += length
- for _i20 in xrange(_size16):
- read_cnt += iprot.readContainerElementBegin()
- (length, _elem21) = iprot.readString();
- if length < 0: return -1
- read_cnt += length
- self.core_file_list.append(_elem21)
- read_cnt += iprot.readContainerElementEnd()
- read_cnt += iprot.readListEnd()
- else:
- iprot.skip(ftype)
- else:
- iprot.skip(ftype)
- length = iprot.readFieldEnd()
- if length < 0: return -1
- read_cnt += length
- length = iprot.readStructEnd()
- if length < 0: return -1
- read_cnt += length
- return read_cnt
-
- def write(self, oprot):
- if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
- oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
- return 0
- if oprot.writeStructBegin('ProcessInfo') < 0: return -1
- if self.process_name is not None:
- annotations = {}
- if oprot.writeFieldBegin('process_name', TType.STRING, 1, annotations) < 0: return -1
- if oprot.writeString(self.process_name) < 0: return -1
- if oprot.writeFieldEnd() < 0: return -1
- if self.process_state is not None:
- annotations = {}
- if oprot.writeFieldBegin('process_state', TType.STRING, 2, annotations) < 0: return -1
- if oprot.writeString(self.process_state) < 0: return -1
- if oprot.writeFieldEnd() < 0: return -1
- if self.start_count is not None:
- annotations = {}
- if oprot.writeFieldBegin('start_count', TType.U32, 3, annotations) < 0: return -1
- if oprot.writeU32(self.start_count) < 0: return -1
- if oprot.writeFieldEnd() < 0: return -1
- if self.stop_count is not None:
- annotations = {}
- if oprot.writeFieldBegin('stop_count', TType.U32, 4, annotations) < 0: return -1
- if oprot.writeU32(self.stop_count) < 0: return -1
- if oprot.writeFieldEnd() < 0: return -1
- if self.exit_count is not None:
- annotations = {}
- if oprot.writeFieldBegin('exit_count', TType.U32, 5, annotations) < 0: return -1
- if oprot.writeU32(self.exit_count) < 0: return -1
- if oprot.writeFieldEnd() < 0: return -1
- if self.last_start_time is not None:
- annotations = {}
- if oprot.writeFieldBegin('last_start_time', TType.STRING, 6, annotations) < 0: return -1
- if oprot.writeString(self.last_start_time) < 0: return -1
- if oprot.writeFieldEnd() < 0: return -1
- if self.last_stop_time is not None:
- annotations = {}
- if oprot.writeFieldBegin('last_stop_time', TType.STRING, 7, annotations) < 0: return -1
- if oprot.writeString(self.last_stop_time) < 0: return -1
- if oprot.writeFieldEnd() < 0: return -1
- if self.last_exit_time is not None:
- annotations = {}
- if oprot.writeFieldBegin('last_exit_time', TType.STRING, 8, annotations) < 0: return -1
- if oprot.writeString(self.last_exit_time) < 0: return -1
- if oprot.writeFieldEnd() < 0: return -1
- if self.core_file_list is not None:
- annotations = {}
- if oprot.writeFieldBegin('core_file_list', TType.LIST, 9, annotations) < 0: return -1
- if oprot.writeListBegin(TType.STRING, len(self.core_file_list)) < 0: return -1
- for iter22 in self.core_file_list:
- if oprot.writeContainerElementBegin() < 0: return -1
- if oprot.writeString(iter22) < 0: return -1
- if oprot.writeContainerElementEnd() < 0: return -1
- if oprot.writeListEnd() < 0: return -1
- if oprot.writeFieldEnd() < 0: return -1
- if oprot.writeFieldStop() < 0: return -1
- if oprot.writeStructEnd() < 0: return -1
- return 0
-
- def validate(self):
- return
-
-
- def log(self):
- log_str = cStringIO.StringIO()
- if self.process_name is not None:
- log_str.write('process_name = ')
- log_str.write(self.process_name)
- log_str.write(' ')
- if self.process_state is not None:
- log_str.write('process_state = ')
- log_str.write(self.process_state)
- log_str.write(' ')
- if self.start_count is not None:
- log_str.write('start_count = ')
- log_str.write(str(self.start_count))
- log_str.write(' ')
- if self.stop_count is not None:
- log_str.write('stop_count = ')
- log_str.write(str(self.stop_count))
- log_str.write(' ')
- if self.exit_count is not None:
- log_str.write('exit_count = ')
- log_str.write(str(self.exit_count))
- log_str.write(' ')
- if self.last_start_time is not None:
- log_str.write('last_start_time = ')
- log_str.write(self.last_start_time)
- log_str.write(' ')
- if self.last_stop_time is not None:
- log_str.write('last_stop_time = ')
- log_str.write(self.last_stop_time)
- log_str.write(' ')
- if self.last_exit_time is not None:
- log_str.write('last_exit_time = ')
- log_str.write(self.last_exit_time)
- log_str.write(' ')
- if self.core_file_list is not None:
- log_str.write('core_file_list = ')
- log_str.write('[ ')
- for iter23 in self.core_file_list:
- log_str.write(iter23)
- log_str.write(', ')
- log_str.write(' ]')
- log_str.write(' ')
- return log_str.getvalue()
-
- def __repr__(self):
- L = ['%s=%r' % (key, value)
- for key, value in self.__dict__.iteritems()]
- return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
-
- def __eq__(self, other):
- return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
-
- def __ne__(self, other):
- return not (self == other)
-
-
-_SANDESH_REQUEST_LIST = [
-]
-
-
-_SANDESH_UVE_LIST = [
-]
-
-
-_SANDESH_UVE_DATA_LIST = [
-]
-
-
-_SANDESH_ALARM_LIST = [
-]
-
-
-_SANDESH_ALARM_DATA_LIST = [
-]
diff --git a/Testcases/cfgm_common/uve/cfgm_cpuinfo/process_info/ttypes.pyc b/Testcases/cfgm_common/uve/cfgm_cpuinfo/process_info/ttypes.pyc
deleted file mode 100644
index a07afd4..0000000
--- a/Testcases/cfgm_common/uve/cfgm_cpuinfo/process_info/ttypes.pyc
+++ /dev/null
Binary files differ
diff --git a/Testcases/cfgm_common/uve/cfgm_cpuinfo/request_skeleton.py b/Testcases/cfgm_common/uve/cfgm_cpuinfo/request_skeleton.py
deleted file mode 100644
index 99c1196..0000000
--- a/Testcases/cfgm_common/uve/cfgm_cpuinfo/request_skeleton.py
+++ /dev/null
@@ -1,13 +0,0 @@
-#
-# Autogenerated by Sandesh Compiler (1.0)
-#
-# DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
-#
-# options string: py:new_style
-# Copyright (c) 2013 Juniper Networks, Inc. All rights reserved.
-#
-
-
-# This autogenerated skeleton file illustrates the implementation of
-# derived class to handle the sandesh request.
-
diff --git a/Testcases/cfgm_common/uve/cfgm_cpuinfo/request_skeleton.pyc b/Testcases/cfgm_common/uve/cfgm_cpuinfo/request_skeleton.pyc
deleted file mode 100644
index 0ba81c0..0000000
--- a/Testcases/cfgm_common/uve/cfgm_cpuinfo/request_skeleton.pyc
+++ /dev/null
Binary files differ
diff --git a/Testcases/cfgm_common/uve/cfgm_cpuinfo/style.css b/Testcases/cfgm_common/uve/cfgm_cpuinfo/style.css
deleted file mode 100644
index 6dc2f22..0000000
--- a/Testcases/cfgm_common/uve/cfgm_cpuinfo/style.css
+++ /dev/null
@@ -1,10 +0,0 @@
-/* Auto-generated CSS for generated Thrift docs */
-body { font-family: Tahoma, sans-serif; }
-pre { background-color: #dddddd; padding: 6px; }
-h3,h4 { padding-top: 0px; margin-top: 0px; }
-div.definition { border: 1px solid gray; margin: 10px; padding: 10px; }
-div.extends { margin: -0.5em 0 1em 5em }
-table { border: 1px solid grey; border-collapse: collapse; }
-td { border: 1px solid grey; padding: 1px 6px; vertical-align: top; }
-th { border: 1px solid black; background-color: #bbbbbb;
- text-align: left; padding: 1px 6px; }
diff --git a/Testcases/cfgm_common/uve/cfgm_cpuinfo/ttypes.py b/Testcases/cfgm_common/uve/cfgm_cpuinfo/ttypes.py
deleted file mode 100644
index ce6e3ae..0000000
--- a/Testcases/cfgm_common/uve/cfgm_cpuinfo/ttypes.py
+++ /dev/null
@@ -1,1281 +0,0 @@
-#
-# Autogenerated by Sandesh Compiler (1.0)
-#
-# DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
-#
-# options string: py:new_style
-# Copyright (c) 2013 Juniper Networks, Inc. All rights reserved.
-#
-
-from pysandesh.Thrift import TType, TMessageType, TException
-import cpuinfo.ttypes
-import process_info.ttypes
-
-
-from pysandesh.transport import TTransport
-from pysandesh.protocol import TBinaryProtocol, TProtocol
-try:
- from pysandesh.protocol import fastbinary
-except:
- fastbinary = None
-
-import cStringIO
-import uuid
-import bottle
-from pysandesh import sandesh_base
-from pysandesh.sandesh_http import SandeshHttp
-from pysandesh.sandesh_uve import SandeshUVETypeMaps
-from pysandesh.util import UTCTimestampUsec, UTCTimestampUsecToString
-from pysandesh.gen_py.sandesh.constants import *
-
-
-
-class ModuleCpuInfo(object):
- """
- Attributes:
- - module_id
- - cpu_info
- - instance_id
- """
-
- thrift_spec = (
- None, # 0
- (1, TType.STRING, 'module_id', None, None, ), # 1
- (2, TType.STRUCT, 'cpu_info', (cpuinfo.ttypes.CpuLoadInfo, cpuinfo.ttypes.CpuLoadInfo.thrift_spec), None, ), # 2
- (3, TType.STRING, 'instance_id', None, None, ), # 3
- )
-
- def __init__(self, module_id=None, cpu_info=None, instance_id=None,):
- self.module_id = module_id
- self.cpu_info = cpu_info
- self.instance_id = instance_id
-
- def read(self, iprot):
- if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
- fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
- return 0
- read_cnt = 0
- length = iprot.readStructBegin()
- if length < 0: return -1
- read_cnt += length
- while True:
- (length, fname, ftype, fid) = iprot.readFieldBegin()
- if length < 0: return -1
- read_cnt += length
- if ftype == TType.STOP:
- break
- if fid == 1:
- if ftype == TType.STRING:
- (length, self.module_id) = iprot.readString();
- if length < 0: return -1
- read_cnt += length
- else:
- iprot.skip(ftype)
- elif fid == 2:
- if ftype == TType.STRUCT:
- self.cpu_info = cpuinfo.ttypes.CpuLoadInfo()
- read_cnt += self.cpu_info.read(iprot)
- else:
- iprot.skip(ftype)
- elif fid == 3:
- if ftype == TType.STRING:
- (length, self.instance_id) = iprot.readString();
- if length < 0: return -1
- read_cnt += length
- else:
- iprot.skip(ftype)
- else:
- iprot.skip(ftype)
- length = iprot.readFieldEnd()
- if length < 0: return -1
- read_cnt += length
- length = iprot.readStructEnd()
- if length < 0: return -1
- read_cnt += length
- return read_cnt
-
- def write(self, oprot):
- if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
- oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
- return 0
- if oprot.writeStructBegin('ModuleCpuInfo') < 0: return -1
- if self.module_id is not None:
- annotations = {}
- if oprot.writeFieldBegin('module_id', TType.STRING, 1, annotations) < 0: return -1
- if oprot.writeString(self.module_id) < 0: return -1
- if oprot.writeFieldEnd() < 0: return -1
- if self.cpu_info is not None:
- annotations = {}
- if oprot.writeFieldBegin('cpu_info', TType.STRUCT, 2, annotations) < 0: return -1
- if self.cpu_info.write(oprot) < 0: return -1
- if oprot.writeFieldEnd() < 0: return -1
- if self.instance_id is not None:
- annotations = {}
- if oprot.writeFieldBegin('instance_id', TType.STRING, 3, annotations) < 0: return -1
- if oprot.writeString(self.instance_id) < 0: return -1
- if oprot.writeFieldEnd() < 0: return -1
- if oprot.writeFieldStop() < 0: return -1
- if oprot.writeStructEnd() < 0: return -1
- return 0
-
- def validate(self):
- return
-
-
- def log(self):
- log_str = cStringIO.StringIO()
- if self.module_id is not None:
- log_str.write('module_id = ')
- log_str.write(self.module_id)
- log_str.write(' ')
- if self.cpu_info is not None:
- log_str.write('cpu_info = ')
- log_str.write('<< ')
- log_str.write(self.cpu_info.log())
- log_str.write('>>')
- log_str.write(' ')
- if self.instance_id is not None:
- log_str.write('instance_id = ')
- log_str.write(self.instance_id)
- log_str.write(' ')
- return log_str.getvalue()
-
- def __repr__(self):
- L = ['%s=%r' % (key, value)
- for key, value in self.__dict__.iteritems()]
- return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
-
- def __eq__(self, other):
- return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
-
- def __ne__(self, other):
- return not (self == other)
-
-class ModuleCpuState(object):
- """
- Attributes:
- - name
- - deleted
- - module_cpu_info
- - build_info
- - config_node_ip
- - api_server_cpu_share
- - schema_xmer_cpu_share
- - service_monitor_cpu_share
- - api_server_mem_virt
- - schema_xmer_mem_virt
- - service_monitor_mem_virt
- """
-
- thrift_spec = (
- None, # 0
- (1, TType.STRING, 'name', None, None, ), # 1
- (2, TType.BOOL, 'deleted', None, None, ), # 2
- (3, TType.LIST, 'module_cpu_info', (TType.STRUCT,(ModuleCpuInfo, ModuleCpuInfo.thrift_spec)), None, ), # 3
- (4, TType.STRING, 'build_info', None, None, ), # 4
- (5, TType.LIST, 'config_node_ip', (TType.STRING,None), None, ), # 5
- (6, TType.DOUBLE, 'api_server_cpu_share', None, None, ), # 6
- (7, TType.DOUBLE, 'schema_xmer_cpu_share', None, None, ), # 7
- (8, TType.DOUBLE, 'service_monitor_cpu_share', None, None, ), # 8
- (9, TType.U32, 'api_server_mem_virt', None, None, ), # 9
- (10, TType.U32, 'schema_xmer_mem_virt', None, None, ), # 10
- (11, TType.U32, 'service_monitor_mem_virt', None, None, ), # 11
- )
-
- def __init__(self, name=None, deleted=None, module_cpu_info=None, build_info=None, config_node_ip=None, api_server_cpu_share=None, schema_xmer_cpu_share=None, service_monitor_cpu_share=None, api_server_mem_virt=None, schema_xmer_mem_virt=None, service_monitor_mem_virt=None,):
- self.name = name
- self.deleted = deleted
- self.module_cpu_info = module_cpu_info
- self.build_info = build_info
- self.config_node_ip = config_node_ip
- self.api_server_cpu_share = api_server_cpu_share
- self.schema_xmer_cpu_share = schema_xmer_cpu_share
- self.service_monitor_cpu_share = service_monitor_cpu_share
- self.api_server_mem_virt = api_server_mem_virt
- self.schema_xmer_mem_virt = schema_xmer_mem_virt
- self.service_monitor_mem_virt = service_monitor_mem_virt
- self._table = 'ObjectConfigNode'
-
- def read(self, iprot):
- if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
- fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
- return 0
- read_cnt = 0
- length = iprot.readStructBegin()
- if length < 0: return -1
- read_cnt += length
- while True:
- (length, fname, ftype, fid) = iprot.readFieldBegin()
- if length < 0: return -1
- read_cnt += length
- if ftype == TType.STOP:
- break
- if fid == 1:
- if ftype == TType.STRING:
- (length, self.name) = iprot.readString();
- if length < 0: return -1
- read_cnt += length
- else:
- iprot.skip(ftype)
- elif fid == 2:
- if ftype == TType.BOOL:
- (length, self.deleted) = iprot.readBool();
- if length < 0: return -1
- read_cnt += length
- else:
- iprot.skip(ftype)
- elif fid == 3:
- if ftype == TType.LIST:
- self.module_cpu_info = []
- (length, _etype3, _size0) = iprot.readListBegin()
- read_cnt += length
- for _i4 in xrange(_size0):
- _elem5 = ModuleCpuInfo()
- read_cnt += _elem5.read(iprot)
- self.module_cpu_info.append(_elem5)
- read_cnt += iprot.readListEnd()
- else:
- iprot.skip(ftype)
- elif fid == 4:
- if ftype == TType.STRING:
- (length, self.build_info) = iprot.readString();
- if length < 0: return -1
- read_cnt += length
- else:
- iprot.skip(ftype)
- elif fid == 5:
- if ftype == TType.LIST:
- self.config_node_ip = []
- (length, _etype9, _size6) = iprot.readListBegin()
- read_cnt += length
- for _i10 in xrange(_size6):
- read_cnt += iprot.readContainerElementBegin()
- (length, _elem11) = iprot.readString();
- if length < 0: return -1
- read_cnt += length
- self.config_node_ip.append(_elem11)
- read_cnt += iprot.readContainerElementEnd()
- read_cnt += iprot.readListEnd()
- else:
- iprot.skip(ftype)
- elif fid == 6:
- if ftype == TType.DOUBLE:
- (length, self.api_server_cpu_share) = iprot.readDouble();
- if length < 0: return -1
- read_cnt += length
- else:
- iprot.skip(ftype)
- elif fid == 7:
- if ftype == TType.DOUBLE:
- (length, self.schema_xmer_cpu_share) = iprot.readDouble();
- if length < 0: return -1
- read_cnt += length
- else:
- iprot.skip(ftype)
- elif fid == 8:
- if ftype == TType.DOUBLE:
- (length, self.service_monitor_cpu_share) = iprot.readDouble();
- if length < 0: return -1
- read_cnt += length
- else:
- iprot.skip(ftype)
- elif fid == 9:
- if ftype == TType.U32:
- (length, self.api_server_mem_virt) = iprot.readU32();
- if length < 0: return -1
- read_cnt += length
- else:
- iprot.skip(ftype)
- elif fid == 10:
- if ftype == TType.U32:
- (length, self.schema_xmer_mem_virt) = iprot.readU32();
- if length < 0: return -1
- read_cnt += length
- else:
- iprot.skip(ftype)
- elif fid == 11:
- if ftype == TType.U32:
- (length, self.service_monitor_mem_virt) = iprot.readU32();
- if length < 0: return -1
- read_cnt += length
- else:
- iprot.skip(ftype)
- else:
- iprot.skip(ftype)
- length = iprot.readFieldEnd()
- if length < 0: return -1
- read_cnt += length
- length = iprot.readStructEnd()
- if length < 0: return -1
- read_cnt += length
- return read_cnt
-
- def write(self, oprot):
- if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
- oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
- return 0
- if oprot.writeStructBegin('ModuleCpuState') < 0: return -1
- if self.name is not None:
- annotations = {}
- if self._table is None or self._table is '': return -1
- annotations['key'] = self._table
- if oprot.writeFieldBegin('name', TType.STRING, 1, annotations) < 0: return -1
- if oprot.writeString(self.name) < 0: return -1
- if oprot.writeFieldEnd() < 0: return -1
- if self.deleted is not None:
- annotations = {}
- if oprot.writeFieldBegin('deleted', TType.BOOL, 2, annotations) < 0: return -1
- if oprot.writeBool(self.deleted) < 0: return -1
- if oprot.writeFieldEnd() < 0: return -1
- if self.module_cpu_info is not None:
- annotations = {}
- annotations['aggtype'] = 'union'
- if oprot.writeFieldBegin('module_cpu_info', TType.LIST, 3, annotations) < 0: return -1
- if oprot.writeListBegin(TType.STRUCT, len(self.module_cpu_info)) < 0: return -1
- for iter12 in self.module_cpu_info:
- if iter12.write(oprot) < 0: return -1
- if oprot.writeListEnd() < 0: return -1
- if oprot.writeFieldEnd() < 0: return -1
- if self.build_info is not None:
- annotations = {}
- if oprot.writeFieldBegin('build_info', TType.STRING, 4, annotations) < 0: return -1
- if oprot.writeString(self.build_info) < 0: return -1
- if oprot.writeFieldEnd() < 0: return -1
- if self.config_node_ip is not None:
- annotations = {}
- if oprot.writeFieldBegin('config_node_ip', TType.LIST, 5, annotations) < 0: return -1
- if oprot.writeListBegin(TType.STRING, len(self.config_node_ip)) < 0: return -1
- for iter13 in self.config_node_ip:
- if oprot.writeContainerElementBegin() < 0: return -1
- if oprot.writeString(iter13) < 0: return -1
- if oprot.writeContainerElementEnd() < 0: return -1
- if oprot.writeListEnd() < 0: return -1
- if oprot.writeFieldEnd() < 0: return -1
- if self.api_server_cpu_share is not None:
- annotations = {}
- annotations['aggtype'] = 'stats'
- if oprot.writeFieldBegin('api_server_cpu_share', TType.DOUBLE, 6, annotations) < 0: return -1
- if oprot.writeDouble(self.api_server_cpu_share) < 0: return -1
- if oprot.writeFieldEnd() < 0: return -1
- if self.schema_xmer_cpu_share is not None:
- annotations = {}
- annotations['aggtype'] = 'stats'
- if oprot.writeFieldBegin('schema_xmer_cpu_share', TType.DOUBLE, 7, annotations) < 0: return -1
- if oprot.writeDouble(self.schema_xmer_cpu_share) < 0: return -1
- if oprot.writeFieldEnd() < 0: return -1
- if self.service_monitor_cpu_share is not None:
- annotations = {}
- annotations['aggtype'] = 'stats'
- if oprot.writeFieldBegin('service_monitor_cpu_share', TType.DOUBLE, 8, annotations) < 0: return -1
- if oprot.writeDouble(self.service_monitor_cpu_share) < 0: return -1
- if oprot.writeFieldEnd() < 0: return -1
- if self.api_server_mem_virt is not None:
- annotations = {}
- annotations['aggtype'] = 'stats'
- annotations['hbin'] = '100000'
- if oprot.writeFieldBegin('api_server_mem_virt', TType.U32, 9, annotations) < 0: return -1
- if oprot.writeU32(self.api_server_mem_virt) < 0: return -1
- if oprot.writeFieldEnd() < 0: return -1
- if self.schema_xmer_mem_virt is not None:
- annotations = {}
- annotations['aggtype'] = 'stats'
- annotations['hbin'] = '100000'
- if oprot.writeFieldBegin('schema_xmer_mem_virt', TType.U32, 10, annotations) < 0: return -1
- if oprot.writeU32(self.schema_xmer_mem_virt) < 0: return -1
- if oprot.writeFieldEnd() < 0: return -1
- if self.service_monitor_mem_virt is not None:
- annotations = {}
- annotations['aggtype'] = 'stats'
- annotations['hbin'] = '100000'
- if oprot.writeFieldBegin('service_monitor_mem_virt', TType.U32, 11, annotations) < 0: return -1
- if oprot.writeU32(self.service_monitor_mem_virt) < 0: return -1
- if oprot.writeFieldEnd() < 0: return -1
- if oprot.writeFieldStop() < 0: return -1
- if oprot.writeStructEnd() < 0: return -1
- return 0
-
- def validate(self):
- return
-
-
- def log(self):
- log_str = cStringIO.StringIO()
- if self.name is not None:
- log_str.write('name = ')
- log_str.write(self.name)
- log_str.write(' ')
- if self.deleted is not None:
- log_str.write('deleted = ')
- if self.deleted:
- log_str.write('True')
- else:
- log_str.write('False')
- log_str.write(' ')
- if self.module_cpu_info is not None:
- log_str.write('module_cpu_info = ')
- log_str.write('[ ')
- for iter14 in self.module_cpu_info:
- log_str.write('<< ')
- log_str.write(iter14.log())
- log_str.write('>>')
- log_str.write(', ')
- log_str.write(' ]')
- log_str.write(' ')
- if self.build_info is not None:
- log_str.write('build_info = ')
- log_str.write(self.build_info)
- log_str.write(' ')
- if self.config_node_ip is not None:
- log_str.write('config_node_ip = ')
- log_str.write('[ ')
- for iter15 in self.config_node_ip:
- log_str.write(iter15)
- log_str.write(', ')
- log_str.write(' ]')
- log_str.write(' ')
- if self.api_server_cpu_share is not None:
- log_str.write('api_server_cpu_share = ')
- log_str.write(str(self.api_server_cpu_share))
- log_str.write(' ')
- if self.schema_xmer_cpu_share is not None:
- log_str.write('schema_xmer_cpu_share = ')
- log_str.write(str(self.schema_xmer_cpu_share))
- log_str.write(' ')
- if self.service_monitor_cpu_share is not None:
- log_str.write('service_monitor_cpu_share = ')
- log_str.write(str(self.service_monitor_cpu_share))
- log_str.write(' ')
- if self.api_server_mem_virt is not None:
- log_str.write('api_server_mem_virt = ')
- log_str.write(str(self.api_server_mem_virt))
- log_str.write(' ')
- if self.schema_xmer_mem_virt is not None:
- log_str.write('schema_xmer_mem_virt = ')
- log_str.write(str(self.schema_xmer_mem_virt))
- log_str.write(' ')
- if self.service_monitor_mem_virt is not None:
- log_str.write('service_monitor_mem_virt = ')
- log_str.write(str(self.service_monitor_mem_virt))
- log_str.write(' ')
- return log_str.getvalue()
-
- def __repr__(self):
- L = ['%s=%r' % (key, value)
- for key, value in self.__dict__.iteritems()]
- return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
-
- def __eq__(self, other):
- return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
-
- def __ne__(self, other):
- return not (self == other)
-
-class ConfigCpuState(object):
- """
- Attributes:
- - name
- - deleted
- - cpu_info
- """
-
- thrift_spec = (
- None, # 0
- (1, TType.STRING, 'name', None, None, ), # 1
- (2, TType.BOOL, 'deleted', None, None, ), # 2
- (3, TType.LIST, 'cpu_info', (TType.STRUCT,(cpuinfo.ttypes.ProcessCpuInfo, cpuinfo.ttypes.ProcessCpuInfo.thrift_spec)), None, ), # 3
- )
-
- def __init__(self, name=None, deleted=None, cpu_info=None,):
- self.name = name
- self.deleted = deleted
- self.cpu_info = cpu_info
- self._table = 'ObjectConfigNode'
-
- def read(self, iprot):
- if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
- fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
- return 0
- read_cnt = 0
- length = iprot.readStructBegin()
- if length < 0: return -1
- read_cnt += length
- while True:
- (length, fname, ftype, fid) = iprot.readFieldBegin()
- if length < 0: return -1
- read_cnt += length
- if ftype == TType.STOP:
- break
- if fid == 1:
- if ftype == TType.STRING:
- (length, self.name) = iprot.readString();
- if length < 0: return -1
- read_cnt += length
- else:
- iprot.skip(ftype)
- elif fid == 2:
- if ftype == TType.BOOL:
- (length, self.deleted) = iprot.readBool();
- if length < 0: return -1
- read_cnt += length
- else:
- iprot.skip(ftype)
- elif fid == 3:
- if ftype == TType.LIST:
- self.cpu_info = []
- (length, _etype19, _size16) = iprot.readListBegin()
- read_cnt += length
- for _i20 in xrange(_size16):
- _elem21 = cpuinfo.ttypes.ProcessCpuInfo()
- read_cnt += _elem21.read(iprot)
- self.cpu_info.append(_elem21)
- read_cnt += iprot.readListEnd()
- else:
- iprot.skip(ftype)
- else:
- iprot.skip(ftype)
- length = iprot.readFieldEnd()
- if length < 0: return -1
- read_cnt += length
- length = iprot.readStructEnd()
- if length < 0: return -1
- read_cnt += length
- return read_cnt
-
- def write(self, oprot):
- if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
- oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
- return 0
- if oprot.writeStructBegin('ConfigCpuState') < 0: return -1
- if self.name is not None:
- annotations = {}
- if self._table is None or self._table is '': return -1
- annotations['key'] = self._table
- if oprot.writeFieldBegin('name', TType.STRING, 1, annotations) < 0: return -1
- if oprot.writeString(self.name) < 0: return -1
- if oprot.writeFieldEnd() < 0: return -1
- if self.deleted is not None:
- annotations = {}
- if oprot.writeFieldBegin('deleted', TType.BOOL, 2, annotations) < 0: return -1
- if oprot.writeBool(self.deleted) < 0: return -1
- if oprot.writeFieldEnd() < 0: return -1
- if self.cpu_info is not None:
- annotations = {}
- annotations['aggtype'] = 'union'
- annotations['tags'] = '.module_id,.mem_virt,.cpu_share,.mem_res'
- if oprot.writeFieldBegin('cpu_info', TType.LIST, 3, annotations) < 0: return -1
- if oprot.writeListBegin(TType.STRUCT, len(self.cpu_info)) < 0: return -1
- for iter22 in self.cpu_info:
- if iter22.write(oprot) < 0: return -1
- if oprot.writeListEnd() < 0: return -1
- if oprot.writeFieldEnd() < 0: return -1
- if oprot.writeFieldStop() < 0: return -1
- if oprot.writeStructEnd() < 0: return -1
- return 0
-
- def validate(self):
- return
-
-
- def log(self):
- log_str = cStringIO.StringIO()
- if self.name is not None:
- log_str.write('name = ')
- log_str.write(self.name)
- log_str.write(' ')
- if self.deleted is not None:
- log_str.write('deleted = ')
- if self.deleted:
- log_str.write('True')
- else:
- log_str.write('False')
- log_str.write(' ')
- if self.cpu_info is not None:
- log_str.write('cpu_info = ')
- log_str.write('[ ')
- for iter23 in self.cpu_info:
- log_str.write('<< ')
- log_str.write(iter23.log())
- log_str.write('>>')
- log_str.write(', ')
- log_str.write(' ]')
- log_str.write(' ')
- return log_str.getvalue()
-
- def __repr__(self):
- L = ['%s=%r' % (key, value)
- for key, value in self.__dict__.iteritems()]
- return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
-
- def __eq__(self, other):
- return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
-
- def __ne__(self, other):
- return not (self == other)
-
-class NodeStatus(object):
- """
- Attributes:
- - name
- - deleted
- - status
- - process_status
- - process_info
- - disk_usage_info
- - description
- - all_core_file_list
- """
-
- thrift_spec = (
- None, # 0
- (1, TType.STRING, 'name', None, None, ), # 1
- (2, TType.BOOL, 'deleted', None, None, ), # 2
- (3, TType.STRING, 'status', None, None, ), # 3
- (4, TType.LIST, 'process_status', (TType.STRUCT,(process_info.ttypes.ProcessStatus, process_info.ttypes.ProcessStatus.thrift_spec)), None, ), # 4
- (5, TType.LIST, 'process_info', (TType.STRUCT,(process_info.ttypes.ProcessInfo, process_info.ttypes.ProcessInfo.thrift_spec)), None, ), # 5
- (6, TType.LIST, 'disk_usage_info', (TType.STRUCT,(process_info.ttypes.DiskPartitionUsageStats, process_info.ttypes.DiskPartitionUsageStats.thrift_spec)), None, ), # 6
- (7, TType.STRING, 'description', None, None, ), # 7
- (8, TType.LIST, 'all_core_file_list', (TType.STRING,None), None, ), # 8
- )
-
- def __init__(self, name=None, deleted=None, status=None, process_status=None, process_info=None, disk_usage_info=None, description=None, all_core_file_list=None,):
- self.name = name
- self.deleted = deleted
- self.status = status
- self.process_status = process_status
- self.process_info = process_info
- self.disk_usage_info = disk_usage_info
- self.description = description
- self.all_core_file_list = all_core_file_list
- self._table = 'ObjectConfigNode'
-
- def read(self, iprot):
- if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
- fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
- return 0
- read_cnt = 0
- length = iprot.readStructBegin()
- if length < 0: return -1
- read_cnt += length
- while True:
- (length, fname, ftype, fid) = iprot.readFieldBegin()
- if length < 0: return -1
- read_cnt += length
- if ftype == TType.STOP:
- break
- if fid == 1:
- if ftype == TType.STRING:
- (length, self.name) = iprot.readString();
- if length < 0: return -1
- read_cnt += length
- else:
- iprot.skip(ftype)
- elif fid == 2:
- if ftype == TType.BOOL:
- (length, self.deleted) = iprot.readBool();
- if length < 0: return -1
- read_cnt += length
- else:
- iprot.skip(ftype)
- elif fid == 3:
- if ftype == TType.STRING:
- (length, self.status) = iprot.readString();
- if length < 0: return -1
- read_cnt += length
- else:
- iprot.skip(ftype)
- elif fid == 4:
- if ftype == TType.LIST:
- self.process_status = []
- (length, _etype27, _size24) = iprot.readListBegin()
- read_cnt += length
- for _i28 in xrange(_size24):
- _elem29 = process_info.ttypes.ProcessStatus()
- read_cnt += _elem29.read(iprot)
- self.process_status.append(_elem29)
- read_cnt += iprot.readListEnd()
- else:
- iprot.skip(ftype)
- elif fid == 5:
- if ftype == TType.LIST:
- self.process_info = []
- (length, _etype33, _size30) = iprot.readListBegin()
- read_cnt += length
- for _i34 in xrange(_size30):
- _elem35 = process_info.ttypes.ProcessInfo()
- read_cnt += _elem35.read(iprot)
- self.process_info.append(_elem35)
- read_cnt += iprot.readListEnd()
- else:
- iprot.skip(ftype)
- elif fid == 6:
- if ftype == TType.LIST:
- self.disk_usage_info = []
- (length, _etype39, _size36) = iprot.readListBegin()
- read_cnt += length
- for _i40 in xrange(_size36):
- _elem41 = process_info.ttypes.DiskPartitionUsageStats()
- read_cnt += _elem41.read(iprot)
- self.disk_usage_info.append(_elem41)
- read_cnt += iprot.readListEnd()
- else:
- iprot.skip(ftype)
- elif fid == 7:
- if ftype == TType.STRING:
- (length, self.description) = iprot.readString();
- if length < 0: return -1
- read_cnt += length
- else:
- iprot.skip(ftype)
- elif fid == 8:
- if ftype == TType.LIST:
- self.all_core_file_list = []
- (length, _etype45, _size42) = iprot.readListBegin()
- read_cnt += length
- for _i46 in xrange(_size42):
- read_cnt += iprot.readContainerElementBegin()
- (length, _elem47) = iprot.readString();
- if length < 0: return -1
- read_cnt += length
- self.all_core_file_list.append(_elem47)
- read_cnt += iprot.readContainerElementEnd()
- read_cnt += iprot.readListEnd()
- else:
- iprot.skip(ftype)
- else:
- iprot.skip(ftype)
- length = iprot.readFieldEnd()
- if length < 0: return -1
- read_cnt += length
- length = iprot.readStructEnd()
- if length < 0: return -1
- read_cnt += length
- return read_cnt
-
- def write(self, oprot):
- if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
- oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
- return 0
- if oprot.writeStructBegin('NodeStatus') < 0: return -1
- if self.name is not None:
- annotations = {}
- if self._table is None or self._table is '': return -1
- annotations['key'] = self._table
- if oprot.writeFieldBegin('name', TType.STRING, 1, annotations) < 0: return -1
- if oprot.writeString(self.name) < 0: return -1
- if oprot.writeFieldEnd() < 0: return -1
- if self.deleted is not None:
- annotations = {}
- if oprot.writeFieldBegin('deleted', TType.BOOL, 2, annotations) < 0: return -1
- if oprot.writeBool(self.deleted) < 0: return -1
- if oprot.writeFieldEnd() < 0: return -1
- if self.status is not None:
- annotations = {}
- if oprot.writeFieldBegin('status', TType.STRING, 3, annotations) < 0: return -1
- if oprot.writeString(self.status) < 0: return -1
- if oprot.writeFieldEnd() < 0: return -1
- if self.process_status is not None:
- annotations = {}
- annotations['aggtype'] = 'union'
- if oprot.writeFieldBegin('process_status', TType.LIST, 4, annotations) < 0: return -1
- if oprot.writeListBegin(TType.STRUCT, len(self.process_status)) < 0: return -1
- for iter48 in self.process_status:
- if iter48.write(oprot) < 0: return -1
- if oprot.writeListEnd() < 0: return -1
- if oprot.writeFieldEnd() < 0: return -1
- if self.process_info is not None:
- annotations = {}
- annotations['aggtype'] = 'union'
- if oprot.writeFieldBegin('process_info', TType.LIST, 5, annotations) < 0: return -1
- if oprot.writeListBegin(TType.STRUCT, len(self.process_info)) < 0: return -1
- for iter49 in self.process_info:
- if iter49.write(oprot) < 0: return -1
- if oprot.writeListEnd() < 0: return -1
- if oprot.writeFieldEnd() < 0: return -1
- if self.disk_usage_info is not None:
- annotations = {}
- annotations['tags'] = ''
- if oprot.writeFieldBegin('disk_usage_info', TType.LIST, 6, annotations) < 0: return -1
- if oprot.writeListBegin(TType.STRUCT, len(self.disk_usage_info)) < 0: return -1
- for iter50 in self.disk_usage_info:
- if iter50.write(oprot) < 0: return -1
- if oprot.writeListEnd() < 0: return -1
- if oprot.writeFieldEnd() < 0: return -1
- if self.description is not None:
- annotations = {}
- if oprot.writeFieldBegin('description', TType.STRING, 7, annotations) < 0: return -1
- if oprot.writeString(self.description) < 0: return -1
- if oprot.writeFieldEnd() < 0: return -1
- if self.all_core_file_list is not None:
- annotations = {}
- if oprot.writeFieldBegin('all_core_file_list', TType.LIST, 8, annotations) < 0: return -1
- if oprot.writeListBegin(TType.STRING, len(self.all_core_file_list)) < 0: return -1
- for iter51 in self.all_core_file_list:
- if oprot.writeContainerElementBegin() < 0: return -1
- if oprot.writeString(iter51) < 0: return -1
- if oprot.writeContainerElementEnd() < 0: return -1
- if oprot.writeListEnd() < 0: return -1
- if oprot.writeFieldEnd() < 0: return -1
- if oprot.writeFieldStop() < 0: return -1
- if oprot.writeStructEnd() < 0: return -1
- return 0
-
- def validate(self):
- return
-
-
- def log(self):
- log_str = cStringIO.StringIO()
- if self.name is not None:
- log_str.write('name = ')
- log_str.write(self.name)
- log_str.write(' ')
- if self.deleted is not None:
- log_str.write('deleted = ')
- if self.deleted:
- log_str.write('True')
- else:
- log_str.write('False')
- log_str.write(' ')
- if self.status is not None:
- log_str.write('status = ')
- log_str.write(self.status)
- log_str.write(' ')
- if self.process_status is not None:
- log_str.write('process_status = ')
- log_str.write('[ ')
- for iter52 in self.process_status:
- log_str.write('<< ')
- log_str.write(iter52.log())
- log_str.write('>>')
- log_str.write(', ')
- log_str.write(' ]')
- log_str.write(' ')
- if self.process_info is not None:
- log_str.write('process_info = ')
- log_str.write('[ ')
- for iter53 in self.process_info:
- log_str.write('<< ')
- log_str.write(iter53.log())
- log_str.write('>>')
- log_str.write(', ')
- log_str.write(' ]')
- log_str.write(' ')
- if self.disk_usage_info is not None:
- log_str.write('disk_usage_info = ')
- log_str.write('[ ')
- for iter54 in self.disk_usage_info:
- log_str.write('<< ')
- log_str.write(iter54.log())
- log_str.write('>>')
- log_str.write(', ')
- log_str.write(' ]')
- log_str.write(' ')
- if self.description is not None:
- log_str.write('description = ')
- log_str.write(self.description)
- log_str.write(' ')
- if self.all_core_file_list is not None:
- log_str.write('all_core_file_list = ')
- log_str.write('[ ')
- for iter55 in self.all_core_file_list:
- log_str.write(iter55)
- log_str.write(', ')
- log_str.write(' ]')
- log_str.write(' ')
- return log_str.getvalue()
-
- def __repr__(self):
- L = ['%s=%r' % (key, value)
- for key, value in self.__dict__.iteritems()]
- return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
-
- def __eq__(self, other):
- return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
-
- def __ne__(self, other):
- return not (self == other)
-
-class ModuleCpuStateTrace(sandesh_base.SandeshUVE):
-
- thrift_spec = (
- None, # 0
- (1, TType.STRUCT, 'data', (ModuleCpuState, ModuleCpuState.thrift_spec), None, ), # 1
- )
-
- def __init__(self, data=None, table=None, sandesh=sandesh_base.sandesh_global):
- sandesh_base.SandeshUVE.__init__(self)
- self.data = data
- self._scope = sandesh.scope()
- self._module = sandesh.module()
- self._source = sandesh.source_id()
- self._node_type = sandesh.node_type()
- self._instance_id = sandesh.instance_id()
- self._seqnum = 0
- self._timestamp = UTCTimestampUsec()
- self._versionsig = 3681498004
- self._hints = 0 | SANDESH_KEY_HINT
- if table is not None:
- self.data._table = table
-
- def update_uve(self, tdata):
- if self.data.name is not None:
- tdata.name = self.data.name
- if self.data.deleted is not None:
- tdata.deleted = self.data.deleted
- if self.data.module_cpu_info is not None:
- tdata.module_cpu_info = self.data.module_cpu_info
- if self.data.build_info is not None:
- tdata.build_info = self.data.build_info
- if self.data.config_node_ip is not None:
- tdata.config_node_ip = self.data.config_node_ip
- if self.data.api_server_cpu_share is not None:
- tdata.api_server_cpu_share = self.data.api_server_cpu_share
- if self.data.schema_xmer_cpu_share is not None:
- tdata.schema_xmer_cpu_share = self.data.schema_xmer_cpu_share
- if self.data.service_monitor_cpu_share is not None:
- tdata.service_monitor_cpu_share = self.data.service_monitor_cpu_share
- if self.data.api_server_mem_virt is not None:
- tdata.api_server_mem_virt = self.data.api_server_mem_virt
- if self.data.schema_xmer_mem_virt is not None:
- tdata.schema_xmer_mem_virt = self.data.schema_xmer_mem_virt
- if self.data.service_monitor_mem_virt is not None:
- tdata.service_monitor_mem_virt = self.data.service_monitor_mem_virt
- return tdata
-
- def log(self, trace=False):
- log_str = cStringIO.StringIO()
- if trace:
- log_str.write(str(self._timestamp))
- log_str.write(' ')
- log_str.write('ModuleCpuStateTrace: ')
- if self.data is not None:
- log_str.write('data = ')
- log_str.write('<< ')
- log_str.write(self.data.log())
- log_str.write('>>')
- log_str.write(' ')
- return log_str.getvalue()
-
- def read(self, iprot):
- if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
- fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
- return -1
- read_cnt = 0
- (length, sandesh_name) = iprot.readSandeshBegin()
- if length < 0: return -1
- read_cnt += length
- while True:
- (length, fname, ftype, fid) = iprot.readFieldBegin()
- if length < 0: return -1
- read_cnt += length
- if ftype == TType.STOP:
- break
- if fid == 1:
- if ftype == TType.STRUCT:
- self.data = ModuleCpuState()
- read_cnt += self.data.read(iprot)
- else:
- iprot.skip(ftype)
- else:
- iprot.skip(ftype)
- length = iprot.readFieldEnd()
- if length < 0: return -1
- read_cnt += length
- length = iprot.readSandeshEnd()
- if length < 0: return -1
- read_cnt += length
- return read_cnt
-
- def write(self, oprot):
- if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
- oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
- return 0
- if oprot.writeSandeshBegin('ModuleCpuStateTrace') < 0: return -1
- if self.data is not None:
- annotations = {}
- if oprot.writeFieldBegin('data', TType.STRUCT, 1, annotations) < 0: return -1
- if self.data.write(oprot) < 0: return -1
- if oprot.writeFieldEnd() < 0: return -1
- if oprot.writeFieldStop() < 0: return -1
- if oprot.writeSandeshEnd() < 0: return -1
- return 0
-
- def validate(self):
- return
-
-
- def compare(self, other):
- if not isinstance(other, self.__class__):
- return False
- if self.data != other.data:
- return False
- return True
-
- def __repr__(self):
- L = ['%s=%r' % (key, value)
- for key, value in self.__dict__.iteritems()]
- return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
-
- def __eq__(self, other):
- return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
-
- def __ne__(self, other):
- return not (self == other)
-
-class ConfigCpuStateTrace(sandesh_base.SandeshUVE):
-
- thrift_spec = (
- None, # 0
- (1, TType.STRUCT, 'data', (ConfigCpuState, ConfigCpuState.thrift_spec), None, ), # 1
- )
-
- def __init__(self, data=None, table=None, sandesh=sandesh_base.sandesh_global):
- sandesh_base.SandeshUVE.__init__(self)
- self.data = data
- self._scope = sandesh.scope()
- self._module = sandesh.module()
- self._source = sandesh.source_id()
- self._node_type = sandesh.node_type()
- self._instance_id = sandesh.instance_id()
- self._seqnum = 0
- self._timestamp = UTCTimestampUsec()
- self._versionsig = 3278957034
- self._hints = 0 | SANDESH_KEY_HINT
- if table is not None:
- self.data._table = table
-
- def update_uve(self, tdata):
- if self.data.name is not None:
- tdata.name = self.data.name
- if self.data.deleted is not None:
- tdata.deleted = self.data.deleted
- if self.data.cpu_info is not None:
- tdata.cpu_info = self.data.cpu_info
- return tdata
-
- def log(self, trace=False):
- log_str = cStringIO.StringIO()
- if trace:
- log_str.write(str(self._timestamp))
- log_str.write(' ')
- log_str.write('ConfigCpuStateTrace: ')
- if self.data is not None:
- log_str.write('data = ')
- log_str.write('<< ')
- log_str.write(self.data.log())
- log_str.write('>>')
- log_str.write(' ')
- return log_str.getvalue()
-
- def read(self, iprot):
- if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
- fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
- return -1
- read_cnt = 0
- (length, sandesh_name) = iprot.readSandeshBegin()
- if length < 0: return -1
- read_cnt += length
- while True:
- (length, fname, ftype, fid) = iprot.readFieldBegin()
- if length < 0: return -1
- read_cnt += length
- if ftype == TType.STOP:
- break
- if fid == 1:
- if ftype == TType.STRUCT:
- self.data = ConfigCpuState()
- read_cnt += self.data.read(iprot)
- else:
- iprot.skip(ftype)
- else:
- iprot.skip(ftype)
- length = iprot.readFieldEnd()
- if length < 0: return -1
- read_cnt += length
- length = iprot.readSandeshEnd()
- if length < 0: return -1
- read_cnt += length
- return read_cnt
-
- def write(self, oprot):
- if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
- oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
- return 0
- if oprot.writeSandeshBegin('ConfigCpuStateTrace') < 0: return -1
- if self.data is not None:
- annotations = {}
- if oprot.writeFieldBegin('data', TType.STRUCT, 1, annotations) < 0: return -1
- if self.data.write(oprot) < 0: return -1
- if oprot.writeFieldEnd() < 0: return -1
- if oprot.writeFieldStop() < 0: return -1
- if oprot.writeSandeshEnd() < 0: return -1
- return 0
-
- def validate(self):
- return
-
-
- def compare(self, other):
- if not isinstance(other, self.__class__):
- return False
- if self.data != other.data:
- return False
- return True
-
- def __repr__(self):
- L = ['%s=%r' % (key, value)
- for key, value in self.__dict__.iteritems()]
- return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
-
- def __eq__(self, other):
- return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
-
- def __ne__(self, other):
- return not (self == other)
-
-class NodeStatusUVE(sandesh_base.SandeshUVE):
-
- thrift_spec = (
- None, # 0
- (1, TType.STRUCT, 'data', (NodeStatus, NodeStatus.thrift_spec), None, ), # 1
- )
-
- def __init__(self, data=None, table=None, sandesh=sandesh_base.sandesh_global):
- sandesh_base.SandeshUVE.__init__(self)
- self.data = data
- self._scope = sandesh.scope()
- self._module = sandesh.module()
- self._source = sandesh.source_id()
- self._node_type = sandesh.node_type()
- self._instance_id = sandesh.instance_id()
- self._seqnum = 0
- self._timestamp = UTCTimestampUsec()
- self._versionsig = 2778367443
- self._hints = 0 | SANDESH_KEY_HINT
- if table is not None:
- self.data._table = table
-
- def update_uve(self, tdata):
- if self.data.name is not None:
- tdata.name = self.data.name
- if self.data.deleted is not None:
- tdata.deleted = self.data.deleted
- if self.data.status is not None:
- tdata.status = self.data.status
- if self.data.process_status is not None:
- tdata.process_status = self.data.process_status
- if self.data.process_info is not None:
- tdata.process_info = self.data.process_info
- if self.data.disk_usage_info is not None:
- tdata.disk_usage_info = self.data.disk_usage_info
- if self.data.description is not None:
- tdata.description = self.data.description
- if self.data.all_core_file_list is not None:
- tdata.all_core_file_list = self.data.all_core_file_list
- return tdata
-
- def log(self, trace=False):
- log_str = cStringIO.StringIO()
- if trace:
- log_str.write(str(self._timestamp))
- log_str.write(' ')
- log_str.write('NodeStatusUVE: ')
- if self.data is not None:
- log_str.write('data = ')
- log_str.write('<< ')
- log_str.write(self.data.log())
- log_str.write('>>')
- log_str.write(' ')
- return log_str.getvalue()
-
- def read(self, iprot):
- if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
- fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
- return -1
- read_cnt = 0
- (length, sandesh_name) = iprot.readSandeshBegin()
- if length < 0: return -1
- read_cnt += length
- while True:
- (length, fname, ftype, fid) = iprot.readFieldBegin()
- if length < 0: return -1
- read_cnt += length
- if ftype == TType.STOP:
- break
- if fid == 1:
- if ftype == TType.STRUCT:
- self.data = NodeStatus()
- read_cnt += self.data.read(iprot)
- else:
- iprot.skip(ftype)
- else:
- iprot.skip(ftype)
- length = iprot.readFieldEnd()
- if length < 0: return -1
- read_cnt += length
- length = iprot.readSandeshEnd()
- if length < 0: return -1
- read_cnt += length
- return read_cnt
-
- def write(self, oprot):
- if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
- oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
- return 0
- if oprot.writeSandeshBegin('NodeStatusUVE') < 0: return -1
- if self.data is not None:
- annotations = {}
- if oprot.writeFieldBegin('data', TType.STRUCT, 1, annotations) < 0: return -1
- if self.data.write(oprot) < 0: return -1
- if oprot.writeFieldEnd() < 0: return -1
- if oprot.writeFieldStop() < 0: return -1
- if oprot.writeSandeshEnd() < 0: return -1
- return 0
-
- def validate(self):
- return
-
-
- def compare(self, other):
- if not isinstance(other, self.__class__):
- return False
- if self.data != other.data:
- return False
- return True
-
- def __repr__(self):
- L = ['%s=%r' % (key, value)
- for key, value in self.__dict__.iteritems()]
- return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
-
- def __eq__(self, other):
- return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
-
- def __ne__(self, other):
- return not (self == other)
-
-
-_SANDESH_REQUEST_LIST = [
-]
-
-
-_SANDESH_UVE_LIST = [
-'ModuleCpuStateTrace',
-'ConfigCpuStateTrace',
-'NodeStatusUVE',
-]
-
-
-_SANDESH_UVE_DATA_LIST = [
-'ModuleCpuState',
-'ConfigCpuState',
-'NodeStatus',
-]
-
-
-_SANDESH_ALARM_LIST = [
-]
-
-
-_SANDESH_ALARM_DATA_LIST = [
-]
diff --git a/Testcases/cfgm_common/uve/cfgm_cpuinfo/ttypes.pyc b/Testcases/cfgm_common/uve/cfgm_cpuinfo/ttypes.pyc
deleted file mode 100644
index 4a16093..0000000
--- a/Testcases/cfgm_common/uve/cfgm_cpuinfo/ttypes.pyc
+++ /dev/null
Binary files differ
diff --git a/Testcases/cfgm_common/uve/physical_router/__init__.py b/Testcases/cfgm_common/uve/physical_router/__init__.py
deleted file mode 100644
index adefd8e..0000000
--- a/Testcases/cfgm_common/uve/physical_router/__init__.py
+++ /dev/null
@@ -1 +0,0 @@
-__all__ = ['ttypes', 'constants']
diff --git a/Testcases/cfgm_common/uve/physical_router/__init__.pyc b/Testcases/cfgm_common/uve/physical_router/__init__.pyc
deleted file mode 100644
index 1b6bfe5..0000000
--- a/Testcases/cfgm_common/uve/physical_router/__init__.pyc
+++ /dev/null
Binary files differ
diff --git a/Testcases/cfgm_common/uve/physical_router/constants.py b/Testcases/cfgm_common/uve/physical_router/constants.py
deleted file mode 100644
index aadd78e..0000000
--- a/Testcases/cfgm_common/uve/physical_router/constants.py
+++ /dev/null
@@ -1,12 +0,0 @@
-#
-# Autogenerated by Sandesh Compiler (1.0)
-#
-# DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
-#
-# options string: py:new_style
-# Copyright (c) 2013 Juniper Networks, Inc. All rights reserved.
-#
-
-from pysandesh.Thrift import TType, TMessageType, TException
-from ttypes import *
-
diff --git a/Testcases/cfgm_common/uve/physical_router/constants.pyc b/Testcases/cfgm_common/uve/physical_router/constants.pyc
deleted file mode 100644
index aee32ed..0000000
--- a/Testcases/cfgm_common/uve/physical_router/constants.pyc
+++ /dev/null
Binary files differ
diff --git a/Testcases/cfgm_common/uve/physical_router/http_request.py b/Testcases/cfgm_common/uve/physical_router/http_request.py
deleted file mode 100644
index 8baea4f..0000000
--- a/Testcases/cfgm_common/uve/physical_router/http_request.py
+++ /dev/null
@@ -1,14 +0,0 @@
-#
-# Autogenerated by Sandesh Compiler (1.0)
-#
-# DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
-#
-# options string: py:new_style
-# Copyright (c) 2013 Juniper Networks, Inc. All rights reserved.
-#
-
-import ttypes
-
-_HTTP_REQUEST_LIST = [
-]
-
diff --git a/Testcases/cfgm_common/uve/physical_router/http_request.pyc b/Testcases/cfgm_common/uve/physical_router/http_request.pyc
deleted file mode 100644
index 8f6d434..0000000
--- a/Testcases/cfgm_common/uve/physical_router/http_request.pyc
+++ /dev/null
Binary files differ
diff --git a/Testcases/cfgm_common/uve/physical_router/index.html b/Testcases/cfgm_common/uve/physical_router/index.html
deleted file mode 100644
index 8e90634..0000000
--- a/Testcases/cfgm_common/uve/physical_router/index.html
+++ /dev/null
@@ -1,9 +0,0 @@
-<html><head>
-<link href="style.css" rel="stylesheet" type="text/css"/>
-<title>All Thrift declarations</title></head><body>
-<h1>All Thrift declarations</h1>
-<table><tr><th>Module</th><th>Services</th><th>Sandeshs</th><th>Data types</th><th>Constants</th></tr>
-<tr>
-<td>physical_router</td><td></td></tr>
-</table>
-</body></html>
diff --git a/Testcases/cfgm_common/uve/physical_router/physical_router.html b/Testcases/cfgm_common/uve/physical_router/physical_router.html
deleted file mode 100644
index 9c65e61..0000000
--- a/Testcases/cfgm_common/uve/physical_router/physical_router.html
+++ /dev/null
@@ -1,14 +0,0 @@
-<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Strict//EN"
- "http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd">
-<html xmlns="http://www.w3.org/1999/xhtml">
-<head>
-<meta http-equiv="Content-Type" content="text/html;charset=utf-8" />
-<link href="style.css" rel="stylesheet" type="text/css"/>
-<title>Module: physical_router</title></head><body>
-<h1>Module: physical_router</h1>
-<table><tr><th>Module</th><th>Sandeshs</th></tr>
-<tr>
-<td>physical_router</td><td></td></tr>
-</table>
-<hr/><h2 id="Sandeshs"></h2>
-</body></html>
diff --git a/Testcases/cfgm_common/uve/physical_router/physical_router.xml b/Testcases/cfgm_common/uve/physical_router/physical_router.xml
deleted file mode 100644
index e1befde..0000000
--- a/Testcases/cfgm_common/uve/physical_router/physical_router.xml
+++ /dev/null
@@ -1,3 +0,0 @@
-<?xml-stylesheet type="text/xsl" href="/universal_parse.xsl"?>
-<physical_router type="rlist">
-</physical_router>
diff --git a/Testcases/cfgm_common/uve/physical_router/request_skeleton.py b/Testcases/cfgm_common/uve/physical_router/request_skeleton.py
deleted file mode 100644
index 99c1196..0000000
--- a/Testcases/cfgm_common/uve/physical_router/request_skeleton.py
+++ /dev/null
@@ -1,13 +0,0 @@
-#
-# Autogenerated by Sandesh Compiler (1.0)
-#
-# DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
-#
-# options string: py:new_style
-# Copyright (c) 2013 Juniper Networks, Inc. All rights reserved.
-#
-
-
-# This autogenerated skeleton file illustrates the implementation of
-# derived class to handle the sandesh request.
-
diff --git a/Testcases/cfgm_common/uve/physical_router/request_skeleton.pyc b/Testcases/cfgm_common/uve/physical_router/request_skeleton.pyc
deleted file mode 100644
index 70674a0..0000000
--- a/Testcases/cfgm_common/uve/physical_router/request_skeleton.pyc
+++ /dev/null
Binary files differ
diff --git a/Testcases/cfgm_common/uve/physical_router/style.css b/Testcases/cfgm_common/uve/physical_router/style.css
deleted file mode 100644
index 6dc2f22..0000000
--- a/Testcases/cfgm_common/uve/physical_router/style.css
+++ /dev/null
@@ -1,10 +0,0 @@
-/* Auto-generated CSS for generated Thrift docs */
-body { font-family: Tahoma, sans-serif; }
-pre { background-color: #dddddd; padding: 6px; }
-h3,h4 { padding-top: 0px; margin-top: 0px; }
-div.definition { border: 1px solid gray; margin: 10px; padding: 10px; }
-div.extends { margin: -0.5em 0 1em 5em }
-table { border: 1px solid grey; border-collapse: collapse; }
-td { border: 1px solid grey; padding: 1px 6px; vertical-align: top; }
-th { border: 1px solid black; background-color: #bbbbbb;
- text-align: left; padding: 1px 6px; }
diff --git a/Testcases/cfgm_common/uve/physical_router/ttypes.py b/Testcases/cfgm_common/uve/physical_router/ttypes.py
deleted file mode 100644
index 2f487ce..0000000
--- a/Testcases/cfgm_common/uve/physical_router/ttypes.py
+++ /dev/null
@@ -1,461 +0,0 @@
-#
-# Autogenerated by Sandesh Compiler (1.0)
-#
-# DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
-#
-# options string: py:new_style
-# Copyright (c) 2013 Juniper Networks, Inc. All rights reserved.
-#
-
-from pysandesh.Thrift import TType, TMessageType, TException
-
-from pysandesh.transport import TTransport
-from pysandesh.protocol import TBinaryProtocol, TProtocol
-try:
- from pysandesh.protocol import fastbinary
-except:
- fastbinary = None
-
-import cStringIO
-import uuid
-import bottle
-from pysandesh import sandesh_base
-from pysandesh.sandesh_http import SandeshHttp
-from pysandesh.sandesh_uve import SandeshUVETypeMaps
-from pysandesh.util import UTCTimestampUsec, UTCTimestampUsecToString
-from pysandesh.gen_py.sandesh.constants import *
-
-
-
-class UvePhysicalRouterConfig(object):
- """
- Attributes:
- - name
- - deleted
- - ip_address
- - connected_bgp_router
- - product_info
- - auto_conf_enabled
- - netconf_enabled_status
- - last_commit_time
- - last_commit_duration
- - commit_status_message
- - total_commits_sent_since_up
- """
-
- thrift_spec = (
- None, # 0
- (1, TType.STRING, 'name', None, None, ), # 1
- (2, TType.BOOL, 'deleted', None, None, ), # 2
- (3, TType.STRING, 'ip_address', None, None, ), # 3
- (4, TType.STRING, 'connected_bgp_router', None, None, ), # 4
- (5, TType.STRING, 'product_info', None, None, ), # 5
- (6, TType.BOOL, 'auto_conf_enabled', None, None, ), # 6
- (7, TType.BOOL, 'netconf_enabled_status', None, None, ), # 7
- (8, TType.STRING, 'last_commit_time', None, None, ), # 8
- (9, TType.STRING, 'last_commit_duration', None, None, ), # 9
- (10, TType.STRING, 'commit_status_message', None, None, ), # 10
- (11, TType.I32, 'total_commits_sent_since_up', None, None, ), # 11
- )
-
- def __init__(self, name=None, deleted=None, ip_address=None, connected_bgp_router=None, product_info=None, auto_conf_enabled=None, netconf_enabled_status=None, last_commit_time=None, last_commit_duration=None, commit_status_message=None, total_commits_sent_since_up=None,):
- self.name = name
- self.deleted = deleted
- self.ip_address = ip_address
- self.connected_bgp_router = connected_bgp_router
- self.product_info = product_info
- self.auto_conf_enabled = auto_conf_enabled
- self.netconf_enabled_status = netconf_enabled_status
- self.last_commit_time = last_commit_time
- self.last_commit_duration = last_commit_duration
- self.commit_status_message = commit_status_message
- self.total_commits_sent_since_up = total_commits_sent_since_up
- self._table = 'ObjectPRouter'
-
- def read(self, iprot):
- if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
- fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
- return 0
- read_cnt = 0
- length = iprot.readStructBegin()
- if length < 0: return -1
- read_cnt += length
- while True:
- (length, fname, ftype, fid) = iprot.readFieldBegin()
- if length < 0: return -1
- read_cnt += length
- if ftype == TType.STOP:
- break
- if fid == 1:
- if ftype == TType.STRING:
- (length, self.name) = iprot.readString();
- if length < 0: return -1
- read_cnt += length
- else:
- iprot.skip(ftype)
- elif fid == 2:
- if ftype == TType.BOOL:
- (length, self.deleted) = iprot.readBool();
- if length < 0: return -1
- read_cnt += length
- else:
- iprot.skip(ftype)
- elif fid == 3:
- if ftype == TType.STRING:
- (length, self.ip_address) = iprot.readString();
- if length < 0: return -1
- read_cnt += length
- else:
- iprot.skip(ftype)
- elif fid == 4:
- if ftype == TType.STRING:
- (length, self.connected_bgp_router) = iprot.readString();
- if length < 0: return -1
- read_cnt += length
- else:
- iprot.skip(ftype)
- elif fid == 5:
- if ftype == TType.STRING:
- (length, self.product_info) = iprot.readString();
- if length < 0: return -1
- read_cnt += length
- else:
- iprot.skip(ftype)
- elif fid == 6:
- if ftype == TType.BOOL:
- (length, self.auto_conf_enabled) = iprot.readBool();
- if length < 0: return -1
- read_cnt += length
- else:
- iprot.skip(ftype)
- elif fid == 7:
- if ftype == TType.BOOL:
- (length, self.netconf_enabled_status) = iprot.readBool();
- if length < 0: return -1
- read_cnt += length
- else:
- iprot.skip(ftype)
- elif fid == 8:
- if ftype == TType.STRING:
- (length, self.last_commit_time) = iprot.readString();
- if length < 0: return -1
- read_cnt += length
- else:
- iprot.skip(ftype)
- elif fid == 9:
- if ftype == TType.STRING:
- (length, self.last_commit_duration) = iprot.readString();
- if length < 0: return -1
- read_cnt += length
- else:
- iprot.skip(ftype)
- elif fid == 10:
- if ftype == TType.STRING:
- (length, self.commit_status_message) = iprot.readString();
- if length < 0: return -1
- read_cnt += length
- else:
- iprot.skip(ftype)
- elif fid == 11:
- if ftype == TType.I32:
- (length, self.total_commits_sent_since_up) = iprot.readI32();
- if length < 0: return -1
- read_cnt += length
- else:
- iprot.skip(ftype)
- else:
- iprot.skip(ftype)
- length = iprot.readFieldEnd()
- if length < 0: return -1
- read_cnt += length
- length = iprot.readStructEnd()
- if length < 0: return -1
- read_cnt += length
- return read_cnt
-
- def write(self, oprot):
- if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
- oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
- return 0
- if oprot.writeStructBegin('UvePhysicalRouterConfig') < 0: return -1
- if self.name is not None:
- annotations = {}
- if self._table is None or self._table is '': return -1
- annotations['key'] = self._table
- if oprot.writeFieldBegin('name', TType.STRING, 1, annotations) < 0: return -1
- if oprot.writeString(self.name) < 0: return -1
- if oprot.writeFieldEnd() < 0: return -1
- if self.deleted is not None:
- annotations = {}
- if oprot.writeFieldBegin('deleted', TType.BOOL, 2, annotations) < 0: return -1
- if oprot.writeBool(self.deleted) < 0: return -1
- if oprot.writeFieldEnd() < 0: return -1
- if self.ip_address is not None:
- annotations = {}
- if oprot.writeFieldBegin('ip_address', TType.STRING, 3, annotations) < 0: return -1
- if oprot.writeString(self.ip_address) < 0: return -1
- if oprot.writeFieldEnd() < 0: return -1
- if self.connected_bgp_router is not None:
- annotations = {}
- if oprot.writeFieldBegin('connected_bgp_router', TType.STRING, 4, annotations) < 0: return -1
- if oprot.writeString(self.connected_bgp_router) < 0: return -1
- if oprot.writeFieldEnd() < 0: return -1
- if self.product_info is not None:
- annotations = {}
- if oprot.writeFieldBegin('product_info', TType.STRING, 5, annotations) < 0: return -1
- if oprot.writeString(self.product_info) < 0: return -1
- if oprot.writeFieldEnd() < 0: return -1
- if self.auto_conf_enabled is not None:
- annotations = {}
- if oprot.writeFieldBegin('auto_conf_enabled', TType.BOOL, 6, annotations) < 0: return -1
- if oprot.writeBool(self.auto_conf_enabled) < 0: return -1
- if oprot.writeFieldEnd() < 0: return -1
- if self.netconf_enabled_status is not None:
- annotations = {}
- if oprot.writeFieldBegin('netconf_enabled_status', TType.BOOL, 7, annotations) < 0: return -1
- if oprot.writeBool(self.netconf_enabled_status) < 0: return -1
- if oprot.writeFieldEnd() < 0: return -1
- if self.last_commit_time is not None:
- annotations = {}
- if oprot.writeFieldBegin('last_commit_time', TType.STRING, 8, annotations) < 0: return -1
- if oprot.writeString(self.last_commit_time) < 0: return -1
- if oprot.writeFieldEnd() < 0: return -1
- if self.last_commit_duration is not None:
- annotations = {}
- if oprot.writeFieldBegin('last_commit_duration', TType.STRING, 9, annotations) < 0: return -1
- if oprot.writeString(self.last_commit_duration) < 0: return -1
- if oprot.writeFieldEnd() < 0: return -1
- if self.commit_status_message is not None:
- annotations = {}
- if oprot.writeFieldBegin('commit_status_message', TType.STRING, 10, annotations) < 0: return -1
- if oprot.writeString(self.commit_status_message) < 0: return -1
- if oprot.writeFieldEnd() < 0: return -1
- if self.total_commits_sent_since_up is not None:
- annotations = {}
- if oprot.writeFieldBegin('total_commits_sent_since_up', TType.I32, 11, annotations) < 0: return -1
- if oprot.writeI32(self.total_commits_sent_since_up) < 0: return -1
- if oprot.writeFieldEnd() < 0: return -1
- if oprot.writeFieldStop() < 0: return -1
- if oprot.writeStructEnd() < 0: return -1
- return 0
-
- def validate(self):
- return
-
-
- def log(self):
- log_str = cStringIO.StringIO()
- if self.name is not None:
- log_str.write('name = ')
- log_str.write(self.name)
- log_str.write(' ')
- if self.deleted is not None:
- log_str.write('deleted = ')
- if self.deleted:
- log_str.write('True')
- else:
- log_str.write('False')
- log_str.write(' ')
- if self.ip_address is not None:
- log_str.write('ip_address = ')
- log_str.write(self.ip_address)
- log_str.write(' ')
- if self.connected_bgp_router is not None:
- log_str.write('connected_bgp_router = ')
- log_str.write(self.connected_bgp_router)
- log_str.write(' ')
- if self.product_info is not None:
- log_str.write('product_info = ')
- log_str.write(self.product_info)
- log_str.write(' ')
- if self.auto_conf_enabled is not None:
- log_str.write('auto_conf_enabled = ')
- if self.auto_conf_enabled:
- log_str.write('True')
- else:
- log_str.write('False')
- log_str.write(' ')
- if self.netconf_enabled_status is not None:
- log_str.write('netconf_enabled_status = ')
- if self.netconf_enabled_status:
- log_str.write('True')
- else:
- log_str.write('False')
- log_str.write(' ')
- if self.last_commit_time is not None:
- log_str.write('last_commit_time = ')
- log_str.write(self.last_commit_time)
- log_str.write(' ')
- if self.last_commit_duration is not None:
- log_str.write('last_commit_duration = ')
- log_str.write(self.last_commit_duration)
- log_str.write(' ')
- if self.commit_status_message is not None:
- log_str.write('commit_status_message = ')
- log_str.write(self.commit_status_message)
- log_str.write(' ')
- if self.total_commits_sent_since_up is not None:
- log_str.write('total_commits_sent_since_up = ')
- log_str.write(str(self.total_commits_sent_since_up))
- log_str.write(' ')
- return log_str.getvalue()
-
- def __repr__(self):
- L = ['%s=%r' % (key, value)
- for key, value in self.__dict__.iteritems()]
- return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
-
- def __eq__(self, other):
- return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
-
- def __ne__(self, other):
- return not (self == other)
-
-class UvePhysicalRouterConfigTrace(sandesh_base.SandeshUVE):
-
- thrift_spec = (
- None, # 0
- (1, TType.STRUCT, 'data', (UvePhysicalRouterConfig, UvePhysicalRouterConfig.thrift_spec), None, ), # 1
- )
-
- def __init__(self, data=None, table=None, sandesh=sandesh_base.sandesh_global):
- sandesh_base.SandeshUVE.__init__(self)
- self.data = data
- self._scope = sandesh.scope()
- self._module = sandesh.module()
- self._source = sandesh.source_id()
- self._node_type = sandesh.node_type()
- self._instance_id = sandesh.instance_id()
- self._seqnum = 0
- self._timestamp = UTCTimestampUsec()
- self._versionsig = 147236693
- self._hints = 0 | SANDESH_KEY_HINT
- if table is not None:
- self.data._table = table
-
- def update_uve(self, tdata):
- if self.data.name is not None:
- tdata.name = self.data.name
- if self.data.deleted is not None:
- tdata.deleted = self.data.deleted
- if self.data.ip_address is not None:
- tdata.ip_address = self.data.ip_address
- if self.data.connected_bgp_router is not None:
- tdata.connected_bgp_router = self.data.connected_bgp_router
- if self.data.product_info is not None:
- tdata.product_info = self.data.product_info
- if self.data.auto_conf_enabled is not None:
- tdata.auto_conf_enabled = self.data.auto_conf_enabled
- if self.data.netconf_enabled_status is not None:
- tdata.netconf_enabled_status = self.data.netconf_enabled_status
- if self.data.last_commit_time is not None:
- tdata.last_commit_time = self.data.last_commit_time
- if self.data.last_commit_duration is not None:
- tdata.last_commit_duration = self.data.last_commit_duration
- if self.data.commit_status_message is not None:
- tdata.commit_status_message = self.data.commit_status_message
- if self.data.total_commits_sent_since_up is not None:
- tdata.total_commits_sent_since_up = self.data.total_commits_sent_since_up
- return tdata
-
- def log(self, trace=False):
- log_str = cStringIO.StringIO()
- if trace:
- log_str.write(str(self._timestamp))
- log_str.write(' ')
- log_str.write('UvePhysicalRouterConfigTrace: ')
- if self.data is not None:
- log_str.write('data = ')
- log_str.write('<< ')
- log_str.write(self.data.log())
- log_str.write('>>')
- log_str.write(' ')
- return log_str.getvalue()
-
- def read(self, iprot):
- if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
- fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
- return -1
- read_cnt = 0
- (length, sandesh_name) = iprot.readSandeshBegin()
- if length < 0: return -1
- read_cnt += length
- while True:
- (length, fname, ftype, fid) = iprot.readFieldBegin()
- if length < 0: return -1
- read_cnt += length
- if ftype == TType.STOP:
- break
- if fid == 1:
- if ftype == TType.STRUCT:
- self.data = UvePhysicalRouterConfig()
- read_cnt += self.data.read(iprot)
- else:
- iprot.skip(ftype)
- else:
- iprot.skip(ftype)
- length = iprot.readFieldEnd()
- if length < 0: return -1
- read_cnt += length
- length = iprot.readSandeshEnd()
- if length < 0: return -1
- read_cnt += length
- return read_cnt
-
- def write(self, oprot):
- if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
- oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
- return 0
- if oprot.writeSandeshBegin('UvePhysicalRouterConfigTrace') < 0: return -1
- if self.data is not None:
- annotations = {}
- if oprot.writeFieldBegin('data', TType.STRUCT, 1, annotations) < 0: return -1
- if self.data.write(oprot) < 0: return -1
- if oprot.writeFieldEnd() < 0: return -1
- if oprot.writeFieldStop() < 0: return -1
- if oprot.writeSandeshEnd() < 0: return -1
- return 0
-
- def validate(self):
- return
-
-
- def compare(self, other):
- if not isinstance(other, self.__class__):
- return False
- if self.data != other.data:
- return False
- return True
-
- def __repr__(self):
- L = ['%s=%r' % (key, value)
- for key, value in self.__dict__.iteritems()]
- return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
-
- def __eq__(self, other):
- return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
-
- def __ne__(self, other):
- return not (self == other)
-
-
-_SANDESH_REQUEST_LIST = [
-]
-
-
-_SANDESH_UVE_LIST = [
-'UvePhysicalRouterConfigTrace',
-]
-
-
-_SANDESH_UVE_DATA_LIST = [
-'UvePhysicalRouterConfig',
-]
-
-
-_SANDESH_ALARM_LIST = [
-]
-
-
-_SANDESH_ALARM_DATA_LIST = [
-]
diff --git a/Testcases/cfgm_common/uve/physical_router/ttypes.pyc b/Testcases/cfgm_common/uve/physical_router/ttypes.pyc
deleted file mode 100644
index 3b39e02..0000000
--- a/Testcases/cfgm_common/uve/physical_router/ttypes.pyc
+++ /dev/null
Binary files differ
diff --git a/Testcases/cfgm_common/uve/service_instance/__init__.py b/Testcases/cfgm_common/uve/service_instance/__init__.py
deleted file mode 100644
index adefd8e..0000000
--- a/Testcases/cfgm_common/uve/service_instance/__init__.py
+++ /dev/null
@@ -1 +0,0 @@
-__all__ = ['ttypes', 'constants']
diff --git a/Testcases/cfgm_common/uve/service_instance/__init__.pyc b/Testcases/cfgm_common/uve/service_instance/__init__.pyc
deleted file mode 100644
index 0841faa..0000000
--- a/Testcases/cfgm_common/uve/service_instance/__init__.pyc
+++ /dev/null
Binary files differ
diff --git a/Testcases/cfgm_common/uve/service_instance/constants.py b/Testcases/cfgm_common/uve/service_instance/constants.py
deleted file mode 100644
index aadd78e..0000000
--- a/Testcases/cfgm_common/uve/service_instance/constants.py
+++ /dev/null
@@ -1,12 +0,0 @@
-#
-# Autogenerated by Sandesh Compiler (1.0)
-#
-# DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
-#
-# options string: py:new_style
-# Copyright (c) 2013 Juniper Networks, Inc. All rights reserved.
-#
-
-from pysandesh.Thrift import TType, TMessageType, TException
-from ttypes import *
-
diff --git a/Testcases/cfgm_common/uve/service_instance/constants.pyc b/Testcases/cfgm_common/uve/service_instance/constants.pyc
deleted file mode 100644
index e8ee618..0000000
--- a/Testcases/cfgm_common/uve/service_instance/constants.pyc
+++ /dev/null
Binary files differ
diff --git a/Testcases/cfgm_common/uve/service_instance/http_request.py b/Testcases/cfgm_common/uve/service_instance/http_request.py
deleted file mode 100644
index 8baea4f..0000000
--- a/Testcases/cfgm_common/uve/service_instance/http_request.py
+++ /dev/null
@@ -1,14 +0,0 @@
-#
-# Autogenerated by Sandesh Compiler (1.0)
-#
-# DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
-#
-# options string: py:new_style
-# Copyright (c) 2013 Juniper Networks, Inc. All rights reserved.
-#
-
-import ttypes
-
-_HTTP_REQUEST_LIST = [
-]
-
diff --git a/Testcases/cfgm_common/uve/service_instance/http_request.pyc b/Testcases/cfgm_common/uve/service_instance/http_request.pyc
deleted file mode 100644
index c14433a..0000000
--- a/Testcases/cfgm_common/uve/service_instance/http_request.pyc
+++ /dev/null
Binary files differ
diff --git a/Testcases/cfgm_common/uve/service_instance/index.html b/Testcases/cfgm_common/uve/service_instance/index.html
deleted file mode 100644
index 6f164e3..0000000
--- a/Testcases/cfgm_common/uve/service_instance/index.html
+++ /dev/null
@@ -1,9 +0,0 @@
-<html><head>
-<link href="style.css" rel="stylesheet" type="text/css"/>
-<title>All Thrift declarations</title></head><body>
-<h1>All Thrift declarations</h1>
-<table><tr><th>Module</th><th>Services</th><th>Sandeshs</th><th>Data types</th><th>Constants</th></tr>
-<tr>
-<td>service_instance</td><td></td></tr>
-</table>
-</body></html>
diff --git a/Testcases/cfgm_common/uve/service_instance/request_skeleton.py b/Testcases/cfgm_common/uve/service_instance/request_skeleton.py
deleted file mode 100644
index 99c1196..0000000
--- a/Testcases/cfgm_common/uve/service_instance/request_skeleton.py
+++ /dev/null
@@ -1,13 +0,0 @@
-#
-# Autogenerated by Sandesh Compiler (1.0)
-#
-# DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
-#
-# options string: py:new_style
-# Copyright (c) 2013 Juniper Networks, Inc. All rights reserved.
-#
-
-
-# This autogenerated skeleton file illustrates the implementation of
-# derived class to handle the sandesh request.
-
diff --git a/Testcases/cfgm_common/uve/service_instance/request_skeleton.pyc b/Testcases/cfgm_common/uve/service_instance/request_skeleton.pyc
deleted file mode 100644
index 96f839a..0000000
--- a/Testcases/cfgm_common/uve/service_instance/request_skeleton.pyc
+++ /dev/null
Binary files differ
diff --git a/Testcases/cfgm_common/uve/service_instance/service_instance.html b/Testcases/cfgm_common/uve/service_instance/service_instance.html
deleted file mode 100644
index 75ca67b..0000000
--- a/Testcases/cfgm_common/uve/service_instance/service_instance.html
+++ /dev/null
@@ -1,14 +0,0 @@
-<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Strict//EN"
- "http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd">
-<html xmlns="http://www.w3.org/1999/xhtml">
-<head>
-<meta http-equiv="Content-Type" content="text/html;charset=utf-8" />
-<link href="style.css" rel="stylesheet" type="text/css"/>
-<title>Module: service_instance</title></head><body>
-<h1>Module: service_instance</h1>
-<table><tr><th>Module</th><th>Sandeshs</th></tr>
-<tr>
-<td>service_instance</td><td></td></tr>
-</table>
-<hr/><h2 id="Sandeshs"></h2>
-</body></html>
diff --git a/Testcases/cfgm_common/uve/service_instance/service_instance.xml b/Testcases/cfgm_common/uve/service_instance/service_instance.xml
deleted file mode 100644
index 4ccfff9..0000000
--- a/Testcases/cfgm_common/uve/service_instance/service_instance.xml
+++ /dev/null
@@ -1,3 +0,0 @@
-<?xml-stylesheet type="text/xsl" href="/universal_parse.xsl"?>
-<service_instance type="rlist">
-</service_instance>
diff --git a/Testcases/cfgm_common/uve/service_instance/style.css b/Testcases/cfgm_common/uve/service_instance/style.css
deleted file mode 100644
index 6dc2f22..0000000
--- a/Testcases/cfgm_common/uve/service_instance/style.css
+++ /dev/null
@@ -1,10 +0,0 @@
-/* Auto-generated CSS for generated Thrift docs */
-body { font-family: Tahoma, sans-serif; }
-pre { background-color: #dddddd; padding: 6px; }
-h3,h4 { padding-top: 0px; margin-top: 0px; }
-div.definition { border: 1px solid gray; margin: 10px; padding: 10px; }
-div.extends { margin: -0.5em 0 1em 5em }
-table { border: 1px solid grey; border-collapse: collapse; }
-td { border: 1px solid grey; padding: 1px 6px; vertical-align: top; }
-th { border: 1px solid black; background-color: #bbbbbb;
- text-align: left; padding: 1px 6px; }
diff --git a/Testcases/cfgm_common/uve/service_instance/ttypes.py b/Testcases/cfgm_common/uve/service_instance/ttypes.py
deleted file mode 100644
index c8cb3a4..0000000
--- a/Testcases/cfgm_common/uve/service_instance/ttypes.py
+++ /dev/null
@@ -1,484 +0,0 @@
-#
-# Autogenerated by Sandesh Compiler (1.0)
-#
-# DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
-#
-# options string: py:new_style
-# Copyright (c) 2013 Juniper Networks, Inc. All rights reserved.
-#
-
-from pysandesh.Thrift import TType, TMessageType, TException
-
-from pysandesh.transport import TTransport
-from pysandesh.protocol import TBinaryProtocol, TProtocol
-try:
- from pysandesh.protocol import fastbinary
-except:
- fastbinary = None
-
-import cStringIO
-import uuid
-import bottle
-from pysandesh import sandesh_base
-from pysandesh.sandesh_http import SandeshHttp
-from pysandesh.sandesh_uve import SandeshUVETypeMaps
-from pysandesh.util import UTCTimestampUsec, UTCTimestampUsecToString
-from pysandesh.gen_py.sandesh.constants import *
-
-
-
-class UveSvcInstanceVMConfig(object):
- """
- Attributes:
- - uuid
- - vr_name
- - ha
- """
-
- thrift_spec = (
- None, # 0
- (1, TType.STRING, 'uuid', None, None, ), # 1
- (2, TType.STRING, 'vr_name', None, None, ), # 2
- (3, TType.STRING, 'ha', None, None, ), # 3
- )
-
- def __init__(self, uuid=None, vr_name=None, ha=None,):
- self.uuid = uuid
- self.vr_name = vr_name
- self.ha = ha
-
- def read(self, iprot):
- if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
- fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
- return 0
- read_cnt = 0
- length = iprot.readStructBegin()
- if length < 0: return -1
- read_cnt += length
- while True:
- (length, fname, ftype, fid) = iprot.readFieldBegin()
- if length < 0: return -1
- read_cnt += length
- if ftype == TType.STOP:
- break
- if fid == 1:
- if ftype == TType.STRING:
- (length, self.uuid) = iprot.readString();
- if length < 0: return -1
- read_cnt += length
- else:
- iprot.skip(ftype)
- elif fid == 2:
- if ftype == TType.STRING:
- (length, self.vr_name) = iprot.readString();
- if length < 0: return -1
- read_cnt += length
- else:
- iprot.skip(ftype)
- elif fid == 3:
- if ftype == TType.STRING:
- (length, self.ha) = iprot.readString();
- if length < 0: return -1
- read_cnt += length
- else:
- iprot.skip(ftype)
- else:
- iprot.skip(ftype)
- length = iprot.readFieldEnd()
- if length < 0: return -1
- read_cnt += length
- length = iprot.readStructEnd()
- if length < 0: return -1
- read_cnt += length
- return read_cnt
-
- def write(self, oprot):
- if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
- oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
- return 0
- if oprot.writeStructBegin('UveSvcInstanceVMConfig') < 0: return -1
- if self.uuid is not None:
- annotations = {}
- if oprot.writeFieldBegin('uuid', TType.STRING, 1, annotations) < 0: return -1
- if oprot.writeString(self.uuid) < 0: return -1
- if oprot.writeFieldEnd() < 0: return -1
- if self.vr_name is not None:
- annotations = {}
- if oprot.writeFieldBegin('vr_name', TType.STRING, 2, annotations) < 0: return -1
- if oprot.writeString(self.vr_name) < 0: return -1
- if oprot.writeFieldEnd() < 0: return -1
- if self.ha is not None:
- annotations = {}
- if oprot.writeFieldBegin('ha', TType.STRING, 3, annotations) < 0: return -1
- if oprot.writeString(self.ha) < 0: return -1
- if oprot.writeFieldEnd() < 0: return -1
- if oprot.writeFieldStop() < 0: return -1
- if oprot.writeStructEnd() < 0: return -1
- return 0
-
- def validate(self):
- return
-
-
- def log(self):
- log_str = cStringIO.StringIO()
- if self.uuid is not None:
- log_str.write('uuid = ')
- log_str.write(self.uuid)
- log_str.write(' ')
- if self.vr_name is not None:
- log_str.write('vr_name = ')
- log_str.write(self.vr_name)
- log_str.write(' ')
- if self.ha is not None:
- log_str.write('ha = ')
- log_str.write(self.ha)
- log_str.write(' ')
- return log_str.getvalue()
-
- def __repr__(self):
- L = ['%s=%r' % (key, value)
- for key, value in self.__dict__.iteritems()]
- return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
-
- def __eq__(self, other):
- return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
-
- def __ne__(self, other):
- return not (self == other)
-
-class UveSvcInstanceConfig(object):
- """
- Attributes:
- - name
- - deleted
- - st_name
- - status
- - create_ts
- - vm_list
- """
-
- thrift_spec = (
- None, # 0
- (1, TType.STRING, 'name', None, None, ), # 1
- (2, TType.BOOL, 'deleted', None, None, ), # 2
- (3, TType.STRING, 'st_name', None, None, ), # 3
- (4, TType.STRING, 'status', None, None, ), # 4
- (5, TType.I64, 'create_ts', None, None, ), # 5
- (6, TType.LIST, 'vm_list', (TType.STRUCT,(UveSvcInstanceVMConfig, UveSvcInstanceVMConfig.thrift_spec)), None, ), # 6
- )
-
- def __init__(self, name=None, deleted=None, st_name=None, status=None, create_ts=None, vm_list=None,):
- self.name = name
- self.deleted = deleted
- self.st_name = st_name
- self.status = status
- self.create_ts = create_ts
- self.vm_list = vm_list
- self._table = 'ObjectSITable'
-
- def read(self, iprot):
- if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
- fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
- return 0
- read_cnt = 0
- length = iprot.readStructBegin()
- if length < 0: return -1
- read_cnt += length
- while True:
- (length, fname, ftype, fid) = iprot.readFieldBegin()
- if length < 0: return -1
- read_cnt += length
- if ftype == TType.STOP:
- break
- if fid == 1:
- if ftype == TType.STRING:
- (length, self.name) = iprot.readString();
- if length < 0: return -1
- read_cnt += length
- else:
- iprot.skip(ftype)
- elif fid == 2:
- if ftype == TType.BOOL:
- (length, self.deleted) = iprot.readBool();
- if length < 0: return -1
- read_cnt += length
- else:
- iprot.skip(ftype)
- elif fid == 3:
- if ftype == TType.STRING:
- (length, self.st_name) = iprot.readString();
- if length < 0: return -1
- read_cnt += length
- else:
- iprot.skip(ftype)
- elif fid == 4:
- if ftype == TType.STRING:
- (length, self.status) = iprot.readString();
- if length < 0: return -1
- read_cnt += length
- else:
- iprot.skip(ftype)
- elif fid == 5:
- if ftype == TType.I64:
- (length, self.create_ts) = iprot.readI64();
- if length < 0: return -1
- read_cnt += length
- else:
- iprot.skip(ftype)
- elif fid == 6:
- if ftype == TType.LIST:
- self.vm_list = []
- (length, _etype3, _size0) = iprot.readListBegin()
- read_cnt += length
- for _i4 in xrange(_size0):
- _elem5 = UveSvcInstanceVMConfig()
- read_cnt += _elem5.read(iprot)
- self.vm_list.append(_elem5)
- read_cnt += iprot.readListEnd()
- else:
- iprot.skip(ftype)
- else:
- iprot.skip(ftype)
- length = iprot.readFieldEnd()
- if length < 0: return -1
- read_cnt += length
- length = iprot.readStructEnd()
- if length < 0: return -1
- read_cnt += length
- return read_cnt
-
- def write(self, oprot):
- if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
- oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
- return 0
- if oprot.writeStructBegin('UveSvcInstanceConfig') < 0: return -1
- if self.name is not None:
- annotations = {}
- if self._table is None or self._table is '': return -1
- annotations['key'] = self._table
- if oprot.writeFieldBegin('name', TType.STRING, 1, annotations) < 0: return -1
- if oprot.writeString(self.name) < 0: return -1
- if oprot.writeFieldEnd() < 0: return -1
- if self.deleted is not None:
- annotations = {}
- if oprot.writeFieldBegin('deleted', TType.BOOL, 2, annotations) < 0: return -1
- if oprot.writeBool(self.deleted) < 0: return -1
- if oprot.writeFieldEnd() < 0: return -1
- if self.st_name is not None:
- annotations = {}
- if oprot.writeFieldBegin('st_name', TType.STRING, 3, annotations) < 0: return -1
- if oprot.writeString(self.st_name) < 0: return -1
- if oprot.writeFieldEnd() < 0: return -1
- if self.status is not None:
- annotations = {}
- if oprot.writeFieldBegin('status', TType.STRING, 4, annotations) < 0: return -1
- if oprot.writeString(self.status) < 0: return -1
- if oprot.writeFieldEnd() < 0: return -1
- if self.create_ts is not None:
- annotations = {}
- if oprot.writeFieldBegin('create_ts', TType.I64, 5, annotations) < 0: return -1
- if oprot.writeI64(self.create_ts) < 0: return -1
- if oprot.writeFieldEnd() < 0: return -1
- if self.vm_list is not None:
- annotations = {}
- if oprot.writeFieldBegin('vm_list', TType.LIST, 6, annotations) < 0: return -1
- if oprot.writeListBegin(TType.STRUCT, len(self.vm_list)) < 0: return -1
- for iter6 in self.vm_list:
- if iter6.write(oprot) < 0: return -1
- if oprot.writeListEnd() < 0: return -1
- if oprot.writeFieldEnd() < 0: return -1
- if oprot.writeFieldStop() < 0: return -1
- if oprot.writeStructEnd() < 0: return -1
- return 0
-
- def validate(self):
- return
-
-
- def log(self):
- log_str = cStringIO.StringIO()
- if self.name is not None:
- log_str.write('name = ')
- log_str.write(self.name)
- log_str.write(' ')
- if self.deleted is not None:
- log_str.write('deleted = ')
- if self.deleted:
- log_str.write('True')
- else:
- log_str.write('False')
- log_str.write(' ')
- if self.st_name is not None:
- log_str.write('st_name = ')
- log_str.write(self.st_name)
- log_str.write(' ')
- if self.status is not None:
- log_str.write('status = ')
- log_str.write(self.status)
- log_str.write(' ')
- if self.create_ts is not None:
- log_str.write('create_ts = ')
- log_str.write(str(self.create_ts))
- log_str.write(' ')
- if self.vm_list is not None:
- log_str.write('vm_list = ')
- log_str.write('[ ')
- for iter7 in self.vm_list:
- log_str.write('<< ')
- log_str.write(iter7.log())
- log_str.write('>>')
- log_str.write(', ')
- log_str.write(' ]')
- log_str.write(' ')
- return log_str.getvalue()
-
- def __repr__(self):
- L = ['%s=%r' % (key, value)
- for key, value in self.__dict__.iteritems()]
- return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
-
- def __eq__(self, other):
- return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
-
- def __ne__(self, other):
- return not (self == other)
-
-class UveSvcInstanceConfigTrace(sandesh_base.SandeshUVE):
-
- thrift_spec = (
- None, # 0
- (1, TType.STRUCT, 'data', (UveSvcInstanceConfig, UveSvcInstanceConfig.thrift_spec), None, ), # 1
- )
-
- def __init__(self, data=None, table=None, sandesh=sandesh_base.sandesh_global):
- sandesh_base.SandeshUVE.__init__(self)
- self.data = data
- self._scope = sandesh.scope()
- self._module = sandesh.module()
- self._source = sandesh.source_id()
- self._node_type = sandesh.node_type()
- self._instance_id = sandesh.instance_id()
- self._seqnum = 0
- self._timestamp = UTCTimestampUsec()
- self._versionsig = 1572544528
- self._hints = 0 | SANDESH_KEY_HINT
- if table is not None:
- self.data._table = table
-
- def update_uve(self, tdata):
- if self.data.name is not None:
- tdata.name = self.data.name
- if self.data.deleted is not None:
- tdata.deleted = self.data.deleted
- if self.data.st_name is not None:
- tdata.st_name = self.data.st_name
- if self.data.status is not None:
- tdata.status = self.data.status
- if self.data.create_ts is not None:
- tdata.create_ts = self.data.create_ts
- if self.data.vm_list is not None:
- tdata.vm_list = self.data.vm_list
- return tdata
-
- def log(self, trace=False):
- log_str = cStringIO.StringIO()
- if trace:
- log_str.write(str(self._timestamp))
- log_str.write(' ')
- log_str.write('UveSvcInstanceConfigTrace: ')
- if self.data is not None:
- log_str.write('data = ')
- log_str.write('<< ')
- log_str.write(self.data.log())
- log_str.write('>>')
- log_str.write(' ')
- return log_str.getvalue()
-
- def read(self, iprot):
- if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
- fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
- return -1
- read_cnt = 0
- (length, sandesh_name) = iprot.readSandeshBegin()
- if length < 0: return -1
- read_cnt += length
- while True:
- (length, fname, ftype, fid) = iprot.readFieldBegin()
- if length < 0: return -1
- read_cnt += length
- if ftype == TType.STOP:
- break
- if fid == 1:
- if ftype == TType.STRUCT:
- self.data = UveSvcInstanceConfig()
- read_cnt += self.data.read(iprot)
- else:
- iprot.skip(ftype)
- else:
- iprot.skip(ftype)
- length = iprot.readFieldEnd()
- if length < 0: return -1
- read_cnt += length
- length = iprot.readSandeshEnd()
- if length < 0: return -1
- read_cnt += length
- return read_cnt
-
- def write(self, oprot):
- if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
- oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
- return 0
- if oprot.writeSandeshBegin('UveSvcInstanceConfigTrace') < 0: return -1
- if self.data is not None:
- annotations = {}
- if oprot.writeFieldBegin('data', TType.STRUCT, 1, annotations) < 0: return -1
- if self.data.write(oprot) < 0: return -1
- if oprot.writeFieldEnd() < 0: return -1
- if oprot.writeFieldStop() < 0: return -1
- if oprot.writeSandeshEnd() < 0: return -1
- return 0
-
- def validate(self):
- return
-
-
- def compare(self, other):
- if not isinstance(other, self.__class__):
- return False
- if self.data != other.data:
- return False
- return True
-
- def __repr__(self):
- L = ['%s=%r' % (key, value)
- for key, value in self.__dict__.iteritems()]
- return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
-
- def __eq__(self, other):
- return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
-
- def __ne__(self, other):
- return not (self == other)
-
-
-_SANDESH_REQUEST_LIST = [
-]
-
-
-_SANDESH_UVE_LIST = [
-'UveSvcInstanceConfigTrace',
-]
-
-
-_SANDESH_UVE_DATA_LIST = [
-'UveSvcInstanceConfig',
-]
-
-
-_SANDESH_ALARM_LIST = [
-]
-
-
-_SANDESH_ALARM_DATA_LIST = [
-]
diff --git a/Testcases/cfgm_common/uve/service_instance/ttypes.pyc b/Testcases/cfgm_common/uve/service_instance/ttypes.pyc
deleted file mode 100644
index d0f3ce4..0000000
--- a/Testcases/cfgm_common/uve/service_instance/ttypes.pyc
+++ /dev/null
Binary files differ
diff --git a/Testcases/cfgm_common/uve/virtual_machine/__init__.py b/Testcases/cfgm_common/uve/virtual_machine/__init__.py
deleted file mode 100644
index adefd8e..0000000
--- a/Testcases/cfgm_common/uve/virtual_machine/__init__.py
+++ /dev/null
@@ -1 +0,0 @@
-__all__ = ['ttypes', 'constants']
diff --git a/Testcases/cfgm_common/uve/virtual_machine/__init__.pyc b/Testcases/cfgm_common/uve/virtual_machine/__init__.pyc
deleted file mode 100644
index 9037b16..0000000
--- a/Testcases/cfgm_common/uve/virtual_machine/__init__.pyc
+++ /dev/null
Binary files differ
diff --git a/Testcases/cfgm_common/uve/virtual_machine/constants.py b/Testcases/cfgm_common/uve/virtual_machine/constants.py
deleted file mode 100644
index aadd78e..0000000
--- a/Testcases/cfgm_common/uve/virtual_machine/constants.py
+++ /dev/null
@@ -1,12 +0,0 @@
-#
-# Autogenerated by Sandesh Compiler (1.0)
-#
-# DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
-#
-# options string: py:new_style
-# Copyright (c) 2013 Juniper Networks, Inc. All rights reserved.
-#
-
-from pysandesh.Thrift import TType, TMessageType, TException
-from ttypes import *
-
diff --git a/Testcases/cfgm_common/uve/virtual_machine/constants.pyc b/Testcases/cfgm_common/uve/virtual_machine/constants.pyc
deleted file mode 100644
index 5b0397a..0000000
--- a/Testcases/cfgm_common/uve/virtual_machine/constants.pyc
+++ /dev/null
Binary files differ
diff --git a/Testcases/cfgm_common/uve/virtual_machine/http_request.py b/Testcases/cfgm_common/uve/virtual_machine/http_request.py
deleted file mode 100644
index 8baea4f..0000000
--- a/Testcases/cfgm_common/uve/virtual_machine/http_request.py
+++ /dev/null
@@ -1,14 +0,0 @@
-#
-# Autogenerated by Sandesh Compiler (1.0)
-#
-# DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
-#
-# options string: py:new_style
-# Copyright (c) 2013 Juniper Networks, Inc. All rights reserved.
-#
-
-import ttypes
-
-_HTTP_REQUEST_LIST = [
-]
-
diff --git a/Testcases/cfgm_common/uve/virtual_machine/http_request.pyc b/Testcases/cfgm_common/uve/virtual_machine/http_request.pyc
deleted file mode 100644
index ba5b9e6..0000000
--- a/Testcases/cfgm_common/uve/virtual_machine/http_request.pyc
+++ /dev/null
Binary files differ
diff --git a/Testcases/cfgm_common/uve/virtual_machine/index.html b/Testcases/cfgm_common/uve/virtual_machine/index.html
deleted file mode 100644
index 656351f..0000000
--- a/Testcases/cfgm_common/uve/virtual_machine/index.html
+++ /dev/null
@@ -1,9 +0,0 @@
-<html><head>
-<link href="style.css" rel="stylesheet" type="text/css"/>
-<title>All Thrift declarations</title></head><body>
-<h1>All Thrift declarations</h1>
-<table><tr><th>Module</th><th>Services</th><th>Sandeshs</th><th>Data types</th><th>Constants</th></tr>
-<tr>
-<td>virtual_machine</td><td></td></tr>
-</table>
-</body></html>
diff --git a/Testcases/cfgm_common/uve/virtual_machine/request_skeleton.py b/Testcases/cfgm_common/uve/virtual_machine/request_skeleton.py
deleted file mode 100644
index 99c1196..0000000
--- a/Testcases/cfgm_common/uve/virtual_machine/request_skeleton.py
+++ /dev/null
@@ -1,13 +0,0 @@
-#
-# Autogenerated by Sandesh Compiler (1.0)
-#
-# DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
-#
-# options string: py:new_style
-# Copyright (c) 2013 Juniper Networks, Inc. All rights reserved.
-#
-
-
-# This autogenerated skeleton file illustrates the implementation of
-# derived class to handle the sandesh request.
-
diff --git a/Testcases/cfgm_common/uve/virtual_machine/request_skeleton.pyc b/Testcases/cfgm_common/uve/virtual_machine/request_skeleton.pyc
deleted file mode 100644
index 7a0ffff..0000000
--- a/Testcases/cfgm_common/uve/virtual_machine/request_skeleton.pyc
+++ /dev/null
Binary files differ
diff --git a/Testcases/cfgm_common/uve/virtual_machine/style.css b/Testcases/cfgm_common/uve/virtual_machine/style.css
deleted file mode 100644
index 6dc2f22..0000000
--- a/Testcases/cfgm_common/uve/virtual_machine/style.css
+++ /dev/null
@@ -1,10 +0,0 @@
-/* Auto-generated CSS for generated Thrift docs */
-body { font-family: Tahoma, sans-serif; }
-pre { background-color: #dddddd; padding: 6px; }
-h3,h4 { padding-top: 0px; margin-top: 0px; }
-div.definition { border: 1px solid gray; margin: 10px; padding: 10px; }
-div.extends { margin: -0.5em 0 1em 5em }
-table { border: 1px solid grey; border-collapse: collapse; }
-td { border: 1px solid grey; padding: 1px 6px; vertical-align: top; }
-th { border: 1px solid black; background-color: #bbbbbb;
- text-align: left; padding: 1px 6px; }
diff --git a/Testcases/cfgm_common/uve/virtual_machine/ttypes.py b/Testcases/cfgm_common/uve/virtual_machine/ttypes.py
deleted file mode 100644
index d2a95f0..0000000
--- a/Testcases/cfgm_common/uve/virtual_machine/ttypes.py
+++ /dev/null
@@ -1,517 +0,0 @@
-#
-# Autogenerated by Sandesh Compiler (1.0)
-#
-# DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
-#
-# options string: py:new_style
-# Copyright (c) 2013 Juniper Networks, Inc. All rights reserved.
-#
-
-from pysandesh.Thrift import TType, TMessageType, TException
-
-from pysandesh.transport import TTransport
-from pysandesh.protocol import TBinaryProtocol, TProtocol
-try:
- from pysandesh.protocol import fastbinary
-except:
- fastbinary = None
-
-import cStringIO
-import uuid
-import bottle
-from pysandesh import sandesh_base
-from pysandesh.sandesh_http import SandeshHttp
-from pysandesh.sandesh_uve import SandeshUVETypeMaps
-from pysandesh.util import UTCTimestampUsec, UTCTimestampUsecToString
-from pysandesh.gen_py.sandesh.constants import *
-
-
-
-class VmInterfaceConfig(object):
- """
- Attributes:
- - name
- - ip_address
- - virtual_network
- - floating_ips
- """
-
- thrift_spec = (
- None, # 0
- (1, TType.STRING, 'name', None, None, ), # 1
- (2, TType.STRING, 'ip_address', None, None, ), # 2
- (3, TType.STRING, 'virtual_network', None, None, ), # 3
- (4, TType.LIST, 'floating_ips', (TType.STRING,None), None, ), # 4
- )
-
- def __init__(self, name=None, ip_address=None, virtual_network=None, floating_ips=None,):
- self.name = name
- self.ip_address = ip_address
- self.virtual_network = virtual_network
- self.floating_ips = floating_ips
-
- def read(self, iprot):
- if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
- fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
- return 0
- read_cnt = 0
- length = iprot.readStructBegin()
- if length < 0: return -1
- read_cnt += length
- while True:
- (length, fname, ftype, fid) = iprot.readFieldBegin()
- if length < 0: return -1
- read_cnt += length
- if ftype == TType.STOP:
- break
- if fid == 1:
- if ftype == TType.STRING:
- (length, self.name) = iprot.readString();
- if length < 0: return -1
- read_cnt += length
- else:
- iprot.skip(ftype)
- elif fid == 2:
- if ftype == TType.STRING:
- (length, self.ip_address) = iprot.readString();
- if length < 0: return -1
- read_cnt += length
- else:
- iprot.skip(ftype)
- elif fid == 3:
- if ftype == TType.STRING:
- (length, self.virtual_network) = iprot.readString();
- if length < 0: return -1
- read_cnt += length
- else:
- iprot.skip(ftype)
- elif fid == 4:
- if ftype == TType.LIST:
- self.floating_ips = []
- (length, _etype3, _size0) = iprot.readListBegin()
- read_cnt += length
- for _i4 in xrange(_size0):
- read_cnt += iprot.readContainerElementBegin()
- (length, _elem5) = iprot.readString();
- if length < 0: return -1
- read_cnt += length
- self.floating_ips.append(_elem5)
- read_cnt += iprot.readContainerElementEnd()
- read_cnt += iprot.readListEnd()
- else:
- iprot.skip(ftype)
- else:
- iprot.skip(ftype)
- length = iprot.readFieldEnd()
- if length < 0: return -1
- read_cnt += length
- length = iprot.readStructEnd()
- if length < 0: return -1
- read_cnt += length
- return read_cnt
-
- def write(self, oprot):
- if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
- oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
- return 0
- if oprot.writeStructBegin('VmInterfaceConfig') < 0: return -1
- if self.name is not None:
- annotations = {}
- if oprot.writeFieldBegin('name', TType.STRING, 1, annotations) < 0: return -1
- if oprot.writeString(self.name) < 0: return -1
- if oprot.writeFieldEnd() < 0: return -1
- if self.ip_address is not None:
- annotations = {}
- if oprot.writeFieldBegin('ip_address', TType.STRING, 2, annotations) < 0: return -1
- if oprot.writeString(self.ip_address) < 0: return -1
- if oprot.writeFieldEnd() < 0: return -1
- if self.virtual_network is not None:
- annotations = {}
- annotations['aggtype'] = 'listkey'
- if oprot.writeFieldBegin('virtual_network', TType.STRING, 3, annotations) < 0: return -1
- if oprot.writeString(self.virtual_network) < 0: return -1
- if oprot.writeFieldEnd() < 0: return -1
- if self.floating_ips is not None:
- annotations = {}
- if oprot.writeFieldBegin('floating_ips', TType.LIST, 4, annotations) < 0: return -1
- if oprot.writeListBegin(TType.STRING, len(self.floating_ips)) < 0: return -1
- for iter6 in self.floating_ips:
- if oprot.writeContainerElementBegin() < 0: return -1
- if oprot.writeString(iter6) < 0: return -1
- if oprot.writeContainerElementEnd() < 0: return -1
- if oprot.writeListEnd() < 0: return -1
- if oprot.writeFieldEnd() < 0: return -1
- if oprot.writeFieldStop() < 0: return -1
- if oprot.writeStructEnd() < 0: return -1
- return 0
-
- def validate(self):
- return
-
-
- def log(self):
- log_str = cStringIO.StringIO()
- if self.name is not None:
- log_str.write('name = ')
- log_str.write(self.name)
- log_str.write(' ')
- if self.ip_address is not None:
- log_str.write('ip_address = ')
- log_str.write(self.ip_address)
- log_str.write(' ')
- if self.virtual_network is not None:
- log_str.write('virtual_network = ')
- log_str.write(self.virtual_network)
- log_str.write(' ')
- if self.floating_ips is not None:
- log_str.write('floating_ips = ')
- log_str.write('[ ')
- for iter7 in self.floating_ips:
- log_str.write(iter7)
- log_str.write(', ')
- log_str.write(' ]')
- log_str.write(' ')
- return log_str.getvalue()
-
- def __repr__(self):
- L = ['%s=%r' % (key, value)
- for key, value in self.__dict__.iteritems()]
- return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
-
- def __eq__(self, other):
- return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
-
- def __ne__(self, other):
- return not (self == other)
-
-class UveVirtualMachineConfig(object):
- """
- Attributes:
- - name
- - deleted
- - attached_groups
- - interface_list
- - vrouter
- """
-
- thrift_spec = (
- None, # 0
- (1, TType.STRING, 'name', None, None, ), # 1
- (2, TType.BOOL, 'deleted', None, None, ), # 2
- (3, TType.LIST, 'attached_groups', (TType.STRING,None), None, ), # 3
- (4, TType.LIST, 'interface_list', (TType.STRUCT,(VmInterfaceConfig, VmInterfaceConfig.thrift_spec)), None, ), # 4
- (5, TType.STRING, 'vrouter', None, None, ), # 5
- )
-
- def __init__(self, name=None, deleted=None, attached_groups=None, interface_list=None, vrouter=None,):
- self.name = name
- self.deleted = deleted
- self.attached_groups = attached_groups
- self.interface_list = interface_list
- self.vrouter = vrouter
- self._table = 'ObjectVMTable'
-
- def read(self, iprot):
- if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
- fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
- return 0
- read_cnt = 0
- length = iprot.readStructBegin()
- if length < 0: return -1
- read_cnt += length
- while True:
- (length, fname, ftype, fid) = iprot.readFieldBegin()
- if length < 0: return -1
- read_cnt += length
- if ftype == TType.STOP:
- break
- if fid == 1:
- if ftype == TType.STRING:
- (length, self.name) = iprot.readString();
- if length < 0: return -1
- read_cnt += length
- else:
- iprot.skip(ftype)
- elif fid == 2:
- if ftype == TType.BOOL:
- (length, self.deleted) = iprot.readBool();
- if length < 0: return -1
- read_cnt += length
- else:
- iprot.skip(ftype)
- elif fid == 3:
- if ftype == TType.LIST:
- self.attached_groups = []
- (length, _etype11, _size8) = iprot.readListBegin()
- read_cnt += length
- for _i12 in xrange(_size8):
- read_cnt += iprot.readContainerElementBegin()
- (length, _elem13) = iprot.readString();
- if length < 0: return -1
- read_cnt += length
- self.attached_groups.append(_elem13)
- read_cnt += iprot.readContainerElementEnd()
- read_cnt += iprot.readListEnd()
- else:
- iprot.skip(ftype)
- elif fid == 4:
- if ftype == TType.LIST:
- self.interface_list = []
- (length, _etype17, _size14) = iprot.readListBegin()
- read_cnt += length
- for _i18 in xrange(_size14):
- _elem19 = VmInterfaceConfig()
- read_cnt += _elem19.read(iprot)
- self.interface_list.append(_elem19)
- read_cnt += iprot.readListEnd()
- else:
- iprot.skip(ftype)
- elif fid == 5:
- if ftype == TType.STRING:
- (length, self.vrouter) = iprot.readString();
- if length < 0: return -1
- read_cnt += length
- else:
- iprot.skip(ftype)
- else:
- iprot.skip(ftype)
- length = iprot.readFieldEnd()
- if length < 0: return -1
- read_cnt += length
- length = iprot.readStructEnd()
- if length < 0: return -1
- read_cnt += length
- return read_cnt
-
- def write(self, oprot):
- if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
- oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
- return 0
- if oprot.writeStructBegin('UveVirtualMachineConfig') < 0: return -1
- if self.name is not None:
- annotations = {}
- if self._table is None or self._table is '': return -1
- annotations['key'] = self._table
- if oprot.writeFieldBegin('name', TType.STRING, 1, annotations) < 0: return -1
- if oprot.writeString(self.name) < 0: return -1
- if oprot.writeFieldEnd() < 0: return -1
- if self.deleted is not None:
- annotations = {}
- if oprot.writeFieldBegin('deleted', TType.BOOL, 2, annotations) < 0: return -1
- if oprot.writeBool(self.deleted) < 0: return -1
- if oprot.writeFieldEnd() < 0: return -1
- if self.attached_groups is not None:
- annotations = {}
- if oprot.writeFieldBegin('attached_groups', TType.LIST, 3, annotations) < 0: return -1
- if oprot.writeListBegin(TType.STRING, len(self.attached_groups)) < 0: return -1
- for iter20 in self.attached_groups:
- if oprot.writeContainerElementBegin() < 0: return -1
- if oprot.writeString(iter20) < 0: return -1
- if oprot.writeContainerElementEnd() < 0: return -1
- if oprot.writeListEnd() < 0: return -1
- if oprot.writeFieldEnd() < 0: return -1
- if self.interface_list is not None:
- annotations = {}
- if oprot.writeFieldBegin('interface_list', TType.LIST, 4, annotations) < 0: return -1
- if oprot.writeListBegin(TType.STRUCT, len(self.interface_list)) < 0: return -1
- for iter21 in self.interface_list:
- if iter21.write(oprot) < 0: return -1
- if oprot.writeListEnd() < 0: return -1
- if oprot.writeFieldEnd() < 0: return -1
- if self.vrouter is not None:
- annotations = {}
- if oprot.writeFieldBegin('vrouter', TType.STRING, 5, annotations) < 0: return -1
- if oprot.writeString(self.vrouter) < 0: return -1
- if oprot.writeFieldEnd() < 0: return -1
- if oprot.writeFieldStop() < 0: return -1
- if oprot.writeStructEnd() < 0: return -1
- return 0
-
- def validate(self):
- return
-
-
- def log(self):
- log_str = cStringIO.StringIO()
- if self.name is not None:
- log_str.write('name = ')
- log_str.write(self.name)
- log_str.write(' ')
- if self.deleted is not None:
- log_str.write('deleted = ')
- if self.deleted:
- log_str.write('True')
- else:
- log_str.write('False')
- log_str.write(' ')
- if self.attached_groups is not None:
- log_str.write('attached_groups = ')
- log_str.write('[ ')
- for iter22 in self.attached_groups:
- log_str.write(iter22)
- log_str.write(', ')
- log_str.write(' ]')
- log_str.write(' ')
- if self.interface_list is not None:
- log_str.write('interface_list = ')
- log_str.write('[ ')
- for iter23 in self.interface_list:
- log_str.write('<< ')
- log_str.write(iter23.log())
- log_str.write('>>')
- log_str.write(', ')
- log_str.write(' ]')
- log_str.write(' ')
- if self.vrouter is not None:
- log_str.write('vrouter = ')
- log_str.write(self.vrouter)
- log_str.write(' ')
- return log_str.getvalue()
-
- def __repr__(self):
- L = ['%s=%r' % (key, value)
- for key, value in self.__dict__.iteritems()]
- return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
-
- def __eq__(self, other):
- return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
-
- def __ne__(self, other):
- return not (self == other)
-
-class UveVirtualMachineConfigTrace(sandesh_base.SandeshUVE):
-
- thrift_spec = (
- None, # 0
- (1, TType.STRUCT, 'data', (UveVirtualMachineConfig, UveVirtualMachineConfig.thrift_spec), None, ), # 1
- )
-
- def __init__(self, data=None, table=None, sandesh=sandesh_base.sandesh_global):
- sandesh_base.SandeshUVE.__init__(self)
- self.data = data
- self._scope = sandesh.scope()
- self._module = sandesh.module()
- self._source = sandesh.source_id()
- self._node_type = sandesh.node_type()
- self._instance_id = sandesh.instance_id()
- self._seqnum = 0
- self._timestamp = UTCTimestampUsec()
- self._versionsig = 4221818251
- self._hints = 0 | SANDESH_KEY_HINT
- if table is not None:
- self.data._table = table
-
- def update_uve(self, tdata):
- if self.data.name is not None:
- tdata.name = self.data.name
- if self.data.deleted is not None:
- tdata.deleted = self.data.deleted
- if self.data.attached_groups is not None:
- tdata.attached_groups = self.data.attached_groups
- if self.data.interface_list is not None:
- tdata.interface_list = self.data.interface_list
- if self.data.vrouter is not None:
- tdata.vrouter = self.data.vrouter
- return tdata
-
- def log(self, trace=False):
- log_str = cStringIO.StringIO()
- if trace:
- log_str.write(str(self._timestamp))
- log_str.write(' ')
- log_str.write('UveVirtualMachineConfigTrace: ')
- if self.data is not None:
- log_str.write('data = ')
- log_str.write('<< ')
- log_str.write(self.data.log())
- log_str.write('>>')
- log_str.write(' ')
- return log_str.getvalue()
-
- def read(self, iprot):
- if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
- fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
- return -1
- read_cnt = 0
- (length, sandesh_name) = iprot.readSandeshBegin()
- if length < 0: return -1
- read_cnt += length
- while True:
- (length, fname, ftype, fid) = iprot.readFieldBegin()
- if length < 0: return -1
- read_cnt += length
- if ftype == TType.STOP:
- break
- if fid == 1:
- if ftype == TType.STRUCT:
- self.data = UveVirtualMachineConfig()
- read_cnt += self.data.read(iprot)
- else:
- iprot.skip(ftype)
- else:
- iprot.skip(ftype)
- length = iprot.readFieldEnd()
- if length < 0: return -1
- read_cnt += length
- length = iprot.readSandeshEnd()
- if length < 0: return -1
- read_cnt += length
- return read_cnt
-
- def write(self, oprot):
- if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
- oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
- return 0
- if oprot.writeSandeshBegin('UveVirtualMachineConfigTrace') < 0: return -1
- if self.data is not None:
- annotations = {}
- if oprot.writeFieldBegin('data', TType.STRUCT, 1, annotations) < 0: return -1
- if self.data.write(oprot) < 0: return -1
- if oprot.writeFieldEnd() < 0: return -1
- if oprot.writeFieldStop() < 0: return -1
- if oprot.writeSandeshEnd() < 0: return -1
- return 0
-
- def validate(self):
- return
-
-
- def compare(self, other):
- if not isinstance(other, self.__class__):
- return False
- if self.data != other.data:
- return False
- return True
-
- def __repr__(self):
- L = ['%s=%r' % (key, value)
- for key, value in self.__dict__.iteritems()]
- return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
-
- def __eq__(self, other):
- return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
-
- def __ne__(self, other):
- return not (self == other)
-
-
-_SANDESH_REQUEST_LIST = [
-]
-
-
-_SANDESH_UVE_LIST = [
-'UveVirtualMachineConfigTrace',
-]
-
-
-_SANDESH_UVE_DATA_LIST = [
-'UveVirtualMachineConfig',
-]
-
-
-_SANDESH_ALARM_LIST = [
-]
-
-
-_SANDESH_ALARM_DATA_LIST = [
-]
diff --git a/Testcases/cfgm_common/uve/virtual_machine/ttypes.pyc b/Testcases/cfgm_common/uve/virtual_machine/ttypes.pyc
deleted file mode 100644
index 44085c2..0000000
--- a/Testcases/cfgm_common/uve/virtual_machine/ttypes.pyc
+++ /dev/null
Binary files differ
diff --git a/Testcases/cfgm_common/uve/virtual_machine/virtual_machine.html b/Testcases/cfgm_common/uve/virtual_machine/virtual_machine.html
deleted file mode 100644
index ab3f29d..0000000
--- a/Testcases/cfgm_common/uve/virtual_machine/virtual_machine.html
+++ /dev/null
@@ -1,14 +0,0 @@
-<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Strict//EN"
- "http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd">
-<html xmlns="http://www.w3.org/1999/xhtml">
-<head>
-<meta http-equiv="Content-Type" content="text/html;charset=utf-8" />
-<link href="style.css" rel="stylesheet" type="text/css"/>
-<title>Module: virtual_machine</title></head><body>
-<h1>Module: virtual_machine</h1>
-<table><tr><th>Module</th><th>Sandeshs</th></tr>
-<tr>
-<td>virtual_machine</td><td></td></tr>
-</table>
-<hr/><h2 id="Sandeshs"></h2>
-</body></html>
diff --git a/Testcases/cfgm_common/uve/virtual_machine/virtual_machine.xml b/Testcases/cfgm_common/uve/virtual_machine/virtual_machine.xml
deleted file mode 100644
index 72fb587..0000000
--- a/Testcases/cfgm_common/uve/virtual_machine/virtual_machine.xml
+++ /dev/null
@@ -1,3 +0,0 @@
-<?xml-stylesheet type="text/xsl" href="/universal_parse.xsl"?>
-<virtual_machine type="rlist">
-</virtual_machine>
diff --git a/Testcases/cfgm_common/uve/virtual_network/__init__.py b/Testcases/cfgm_common/uve/virtual_network/__init__.py
deleted file mode 100644
index adefd8e..0000000
--- a/Testcases/cfgm_common/uve/virtual_network/__init__.py
+++ /dev/null
@@ -1 +0,0 @@
-__all__ = ['ttypes', 'constants']
diff --git a/Testcases/cfgm_common/uve/virtual_network/__init__.pyc b/Testcases/cfgm_common/uve/virtual_network/__init__.pyc
deleted file mode 100644
index 5e20ef0..0000000
--- a/Testcases/cfgm_common/uve/virtual_network/__init__.pyc
+++ /dev/null
Binary files differ
diff --git a/Testcases/cfgm_common/uve/virtual_network/constants.py b/Testcases/cfgm_common/uve/virtual_network/constants.py
deleted file mode 100644
index aadd78e..0000000
--- a/Testcases/cfgm_common/uve/virtual_network/constants.py
+++ /dev/null
@@ -1,12 +0,0 @@
-#
-# Autogenerated by Sandesh Compiler (1.0)
-#
-# DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
-#
-# options string: py:new_style
-# Copyright (c) 2013 Juniper Networks, Inc. All rights reserved.
-#
-
-from pysandesh.Thrift import TType, TMessageType, TException
-from ttypes import *
-
diff --git a/Testcases/cfgm_common/uve/virtual_network/constants.pyc b/Testcases/cfgm_common/uve/virtual_network/constants.pyc
deleted file mode 100644
index 13aede5..0000000
--- a/Testcases/cfgm_common/uve/virtual_network/constants.pyc
+++ /dev/null
Binary files differ
diff --git a/Testcases/cfgm_common/uve/virtual_network/http_request.py b/Testcases/cfgm_common/uve/virtual_network/http_request.py
deleted file mode 100644
index 8baea4f..0000000
--- a/Testcases/cfgm_common/uve/virtual_network/http_request.py
+++ /dev/null
@@ -1,14 +0,0 @@
-#
-# Autogenerated by Sandesh Compiler (1.0)
-#
-# DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
-#
-# options string: py:new_style
-# Copyright (c) 2013 Juniper Networks, Inc. All rights reserved.
-#
-
-import ttypes
-
-_HTTP_REQUEST_LIST = [
-]
-
diff --git a/Testcases/cfgm_common/uve/virtual_network/http_request.pyc b/Testcases/cfgm_common/uve/virtual_network/http_request.pyc
deleted file mode 100644
index cebf344..0000000
--- a/Testcases/cfgm_common/uve/virtual_network/http_request.pyc
+++ /dev/null
Binary files differ
diff --git a/Testcases/cfgm_common/uve/virtual_network/index.html b/Testcases/cfgm_common/uve/virtual_network/index.html
deleted file mode 100644
index 0884ed5..0000000
--- a/Testcases/cfgm_common/uve/virtual_network/index.html
+++ /dev/null
@@ -1,9 +0,0 @@
-<html><head>
-<link href="style.css" rel="stylesheet" type="text/css"/>
-<title>All Thrift declarations</title></head><body>
-<h1>All Thrift declarations</h1>
-<table><tr><th>Module</th><th>Services</th><th>Sandeshs</th><th>Data types</th><th>Constants</th></tr>
-<tr>
-<td>virtual_network</td><td></td></tr>
-</table>
-</body></html>
diff --git a/Testcases/cfgm_common/uve/virtual_network/request_skeleton.py b/Testcases/cfgm_common/uve/virtual_network/request_skeleton.py
deleted file mode 100644
index 99c1196..0000000
--- a/Testcases/cfgm_common/uve/virtual_network/request_skeleton.py
+++ /dev/null
@@ -1,13 +0,0 @@
-#
-# Autogenerated by Sandesh Compiler (1.0)
-#
-# DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
-#
-# options string: py:new_style
-# Copyright (c) 2013 Juniper Networks, Inc. All rights reserved.
-#
-
-
-# This autogenerated skeleton file illustrates the implementation of
-# derived class to handle the sandesh request.
-
diff --git a/Testcases/cfgm_common/uve/virtual_network/request_skeleton.pyc b/Testcases/cfgm_common/uve/virtual_network/request_skeleton.pyc
deleted file mode 100644
index 438188b..0000000
--- a/Testcases/cfgm_common/uve/virtual_network/request_skeleton.pyc
+++ /dev/null
Binary files differ
diff --git a/Testcases/cfgm_common/uve/virtual_network/style.css b/Testcases/cfgm_common/uve/virtual_network/style.css
deleted file mode 100644
index 6dc2f22..0000000
--- a/Testcases/cfgm_common/uve/virtual_network/style.css
+++ /dev/null
@@ -1,10 +0,0 @@
-/* Auto-generated CSS for generated Thrift docs */
-body { font-family: Tahoma, sans-serif; }
-pre { background-color: #dddddd; padding: 6px; }
-h3,h4 { padding-top: 0px; margin-top: 0px; }
-div.definition { border: 1px solid gray; margin: 10px; padding: 10px; }
-div.extends { margin: -0.5em 0 1em 5em }
-table { border: 1px solid grey; border-collapse: collapse; }
-td { border: 1px solid grey; padding: 1px 6px; vertical-align: top; }
-th { border: 1px solid black; background-color: #bbbbbb;
- text-align: left; padding: 1px 6px; }
diff --git a/Testcases/cfgm_common/uve/virtual_network/ttypes.py b/Testcases/cfgm_common/uve/virtual_network/ttypes.py
deleted file mode 100644
index b58ac6e..0000000
--- a/Testcases/cfgm_common/uve/virtual_network/ttypes.py
+++ /dev/null
@@ -1,787 +0,0 @@
-#
-# Autogenerated by Sandesh Compiler (1.0)
-#
-# DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
-#
-# options string: py:new_style
-# Copyright (c) 2013 Juniper Networks, Inc. All rights reserved.
-#
-
-from pysandesh.Thrift import TType, TMessageType, TException
-
-from pysandesh.transport import TTransport
-from pysandesh.protocol import TBinaryProtocol, TProtocol
-try:
- from pysandesh.protocol import fastbinary
-except:
- fastbinary = None
-
-import cStringIO
-import uuid
-import bottle
-from pysandesh import sandesh_base
-from pysandesh.sandesh_http import SandeshHttp
-from pysandesh.sandesh_uve import SandeshUVETypeMaps
-from pysandesh.util import UTCTimestampUsec, UTCTimestampUsecToString
-from pysandesh.gen_py.sandesh.constants import *
-
-
-
-class UveVirtualNetworkConfig(object):
- """
- Attributes:
- - name
- - deleted
- - connected_networks
- - partially_connected_networks
- - routing_instance_list
- - total_acl_rules
- """
-
- thrift_spec = (
- None, # 0
- (1, TType.STRING, 'name', None, None, ), # 1
- (2, TType.BOOL, 'deleted', None, None, ), # 2
- None, # 3
- (4, TType.LIST, 'connected_networks', (TType.STRING,None), None, ), # 4
- (5, TType.LIST, 'partially_connected_networks', (TType.STRING,None), None, ), # 5
- (6, TType.LIST, 'routing_instance_list', (TType.STRING,None), None, ), # 6
- (7, TType.I32, 'total_acl_rules', None, None, ), # 7
- )
-
- def __init__(self, name=None, deleted=None, connected_networks=None, partially_connected_networks=None, routing_instance_list=None, total_acl_rules=None,):
- self.name = name
- self.deleted = deleted
- self.connected_networks = connected_networks
- self.partially_connected_networks = partially_connected_networks
- self.routing_instance_list = routing_instance_list
- self.total_acl_rules = total_acl_rules
- self._table = 'ObjectVNTable'
-
- def read(self, iprot):
- if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
- fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
- return 0
- read_cnt = 0
- length = iprot.readStructBegin()
- if length < 0: return -1
- read_cnt += length
- while True:
- (length, fname, ftype, fid) = iprot.readFieldBegin()
- if length < 0: return -1
- read_cnt += length
- if ftype == TType.STOP:
- break
- if fid == 1:
- if ftype == TType.STRING:
- (length, self.name) = iprot.readString();
- if length < 0: return -1
- read_cnt += length
- else:
- iprot.skip(ftype)
- elif fid == 2:
- if ftype == TType.BOOL:
- (length, self.deleted) = iprot.readBool();
- if length < 0: return -1
- read_cnt += length
- else:
- iprot.skip(ftype)
- elif fid == 4:
- if ftype == TType.LIST:
- self.connected_networks = []
- (length, _etype3, _size0) = iprot.readListBegin()
- read_cnt += length
- for _i4 in xrange(_size0):
- read_cnt += iprot.readContainerElementBegin()
- (length, _elem5) = iprot.readString();
- if length < 0: return -1
- read_cnt += length
- self.connected_networks.append(_elem5)
- read_cnt += iprot.readContainerElementEnd()
- read_cnt += iprot.readListEnd()
- else:
- iprot.skip(ftype)
- elif fid == 5:
- if ftype == TType.LIST:
- self.partially_connected_networks = []
- (length, _etype9, _size6) = iprot.readListBegin()
- read_cnt += length
- for _i10 in xrange(_size6):
- read_cnt += iprot.readContainerElementBegin()
- (length, _elem11) = iprot.readString();
- if length < 0: return -1
- read_cnt += length
- self.partially_connected_networks.append(_elem11)
- read_cnt += iprot.readContainerElementEnd()
- read_cnt += iprot.readListEnd()
- else:
- iprot.skip(ftype)
- elif fid == 6:
- if ftype == TType.LIST:
- self.routing_instance_list = []
- (length, _etype15, _size12) = iprot.readListBegin()
- read_cnt += length
- for _i16 in xrange(_size12):
- read_cnt += iprot.readContainerElementBegin()
- (length, _elem17) = iprot.readString();
- if length < 0: return -1
- read_cnt += length
- self.routing_instance_list.append(_elem17)
- read_cnt += iprot.readContainerElementEnd()
- read_cnt += iprot.readListEnd()
- else:
- iprot.skip(ftype)
- elif fid == 7:
- if ftype == TType.I32:
- (length, self.total_acl_rules) = iprot.readI32();
- if length < 0: return -1
- read_cnt += length
- else:
- iprot.skip(ftype)
- else:
- iprot.skip(ftype)
- length = iprot.readFieldEnd()
- if length < 0: return -1
- read_cnt += length
- length = iprot.readStructEnd()
- if length < 0: return -1
- read_cnt += length
- return read_cnt
-
- def write(self, oprot):
- if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
- oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
- return 0
- if oprot.writeStructBegin('UveVirtualNetworkConfig') < 0: return -1
- if self.name is not None:
- annotations = {}
- if self._table is None or self._table is '': return -1
- annotations['key'] = self._table
- if oprot.writeFieldBegin('name', TType.STRING, 1, annotations) < 0: return -1
- if oprot.writeString(self.name) < 0: return -1
- if oprot.writeFieldEnd() < 0: return -1
- if self.deleted is not None:
- annotations = {}
- if oprot.writeFieldBegin('deleted', TType.BOOL, 2, annotations) < 0: return -1
- if oprot.writeBool(self.deleted) < 0: return -1
- if oprot.writeFieldEnd() < 0: return -1
- if self.connected_networks is not None:
- annotations = {}
- annotations['aggtype'] = 'union'
- if oprot.writeFieldBegin('connected_networks', TType.LIST, 4, annotations) < 0: return -1
- if oprot.writeListBegin(TType.STRING, len(self.connected_networks)) < 0: return -1
- for iter18 in self.connected_networks:
- if oprot.writeContainerElementBegin() < 0: return -1
- if oprot.writeString(iter18) < 0: return -1
- if oprot.writeContainerElementEnd() < 0: return -1
- if oprot.writeListEnd() < 0: return -1
- if oprot.writeFieldEnd() < 0: return -1
- if self.partially_connected_networks is not None:
- annotations = {}
- annotations['aggtype'] = 'union'
- if oprot.writeFieldBegin('partially_connected_networks', TType.LIST, 5, annotations) < 0: return -1
- if oprot.writeListBegin(TType.STRING, len(self.partially_connected_networks)) < 0: return -1
- for iter19 in self.partially_connected_networks:
- if oprot.writeContainerElementBegin() < 0: return -1
- if oprot.writeString(iter19) < 0: return -1
- if oprot.writeContainerElementEnd() < 0: return -1
- if oprot.writeListEnd() < 0: return -1
- if oprot.writeFieldEnd() < 0: return -1
- if self.routing_instance_list is not None:
- annotations = {}
- annotations['aggtype'] = 'union'
- if oprot.writeFieldBegin('routing_instance_list', TType.LIST, 6, annotations) < 0: return -1
- if oprot.writeListBegin(TType.STRING, len(self.routing_instance_list)) < 0: return -1
- for iter20 in self.routing_instance_list:
- if oprot.writeContainerElementBegin() < 0: return -1
- if oprot.writeString(iter20) < 0: return -1
- if oprot.writeContainerElementEnd() < 0: return -1
- if oprot.writeListEnd() < 0: return -1
- if oprot.writeFieldEnd() < 0: return -1
- if self.total_acl_rules is not None:
- annotations = {}
- if oprot.writeFieldBegin('total_acl_rules', TType.I32, 7, annotations) < 0: return -1
- if oprot.writeI32(self.total_acl_rules) < 0: return -1
- if oprot.writeFieldEnd() < 0: return -1
- if oprot.writeFieldStop() < 0: return -1
- if oprot.writeStructEnd() < 0: return -1
- return 0
-
- def validate(self):
- return
-
-
- def log(self):
- log_str = cStringIO.StringIO()
- if self.name is not None:
- log_str.write('name = ')
- log_str.write(self.name)
- log_str.write(' ')
- if self.deleted is not None:
- log_str.write('deleted = ')
- if self.deleted:
- log_str.write('True')
- else:
- log_str.write('False')
- log_str.write(' ')
- if self.connected_networks is not None:
- log_str.write('connected_networks = ')
- log_str.write('[ ')
- for iter21 in self.connected_networks:
- log_str.write(iter21)
- log_str.write(', ')
- log_str.write(' ]')
- log_str.write(' ')
- if self.partially_connected_networks is not None:
- log_str.write('partially_connected_networks = ')
- log_str.write('[ ')
- for iter22 in self.partially_connected_networks:
- log_str.write(iter22)
- log_str.write(', ')
- log_str.write(' ]')
- log_str.write(' ')
- if self.routing_instance_list is not None:
- log_str.write('routing_instance_list = ')
- log_str.write('[ ')
- for iter23 in self.routing_instance_list:
- log_str.write(iter23)
- log_str.write(', ')
- log_str.write(' ]')
- log_str.write(' ')
- if self.total_acl_rules is not None:
- log_str.write('total_acl_rules = ')
- log_str.write(str(self.total_acl_rules))
- log_str.write(' ')
- return log_str.getvalue()
-
- def __repr__(self):
- L = ['%s=%r' % (key, value)
- for key, value in self.__dict__.iteritems()]
- return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
-
- def __eq__(self, other):
- return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
-
- def __ne__(self, other):
- return not (self == other)
-
-class UveServiceChainData(object):
- """
- Attributes:
- - name
- - deleted
- - source_virtual_network
- - destination_virtual_network
- - source_ports
- - destination_ports
- - protocol
- - direction
- - services
- """
-
- thrift_spec = (
- None, # 0
- (1, TType.STRING, 'name', None, None, ), # 1
- (2, TType.BOOL, 'deleted', None, None, ), # 2
- (3, TType.STRING, 'source_virtual_network', None, None, ), # 3
- (4, TType.STRING, 'destination_virtual_network', None, None, ), # 4
- (5, TType.STRING, 'source_ports', None, None, ), # 5
- (6, TType.STRING, 'destination_ports', None, None, ), # 6
- (7, TType.STRING, 'protocol', None, None, ), # 7
- (8, TType.STRING, 'direction', None, None, ), # 8
- (9, TType.LIST, 'services', (TType.STRING,None), None, ), # 9
- )
-
- def __init__(self, name=None, deleted=None, source_virtual_network=None, destination_virtual_network=None, source_ports=None, destination_ports=None, protocol=None, direction=None, services=None,):
- self.name = name
- self.deleted = deleted
- self.source_virtual_network = source_virtual_network
- self.destination_virtual_network = destination_virtual_network
- self.source_ports = source_ports
- self.destination_ports = destination_ports
- self.protocol = protocol
- self.direction = direction
- self.services = services
- self._table = 'ServiceChain'
-
- def read(self, iprot):
- if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
- fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
- return 0
- read_cnt = 0
- length = iprot.readStructBegin()
- if length < 0: return -1
- read_cnt += length
- while True:
- (length, fname, ftype, fid) = iprot.readFieldBegin()
- if length < 0: return -1
- read_cnt += length
- if ftype == TType.STOP:
- break
- if fid == 1:
- if ftype == TType.STRING:
- (length, self.name) = iprot.readString();
- if length < 0: return -1
- read_cnt += length
- else:
- iprot.skip(ftype)
- elif fid == 2:
- if ftype == TType.BOOL:
- (length, self.deleted) = iprot.readBool();
- if length < 0: return -1
- read_cnt += length
- else:
- iprot.skip(ftype)
- elif fid == 3:
- if ftype == TType.STRING:
- (length, self.source_virtual_network) = iprot.readString();
- if length < 0: return -1
- read_cnt += length
- else:
- iprot.skip(ftype)
- elif fid == 4:
- if ftype == TType.STRING:
- (length, self.destination_virtual_network) = iprot.readString();
- if length < 0: return -1
- read_cnt += length
- else:
- iprot.skip(ftype)
- elif fid == 5:
- if ftype == TType.STRING:
- (length, self.source_ports) = iprot.readString();
- if length < 0: return -1
- read_cnt += length
- else:
- iprot.skip(ftype)
- elif fid == 6:
- if ftype == TType.STRING:
- (length, self.destination_ports) = iprot.readString();
- if length < 0: return -1
- read_cnt += length
- else:
- iprot.skip(ftype)
- elif fid == 7:
- if ftype == TType.STRING:
- (length, self.protocol) = iprot.readString();
- if length < 0: return -1
- read_cnt += length
- else:
- iprot.skip(ftype)
- elif fid == 8:
- if ftype == TType.STRING:
- (length, self.direction) = iprot.readString();
- if length < 0: return -1
- read_cnt += length
- else:
- iprot.skip(ftype)
- elif fid == 9:
- if ftype == TType.LIST:
- self.services = []
- (length, _etype27, _size24) = iprot.readListBegin()
- read_cnt += length
- for _i28 in xrange(_size24):
- read_cnt += iprot.readContainerElementBegin()
- (length, _elem29) = iprot.readString();
- if length < 0: return -1
- read_cnt += length
- self.services.append(_elem29)
- read_cnt += iprot.readContainerElementEnd()
- read_cnt += iprot.readListEnd()
- else:
- iprot.skip(ftype)
- else:
- iprot.skip(ftype)
- length = iprot.readFieldEnd()
- if length < 0: return -1
- read_cnt += length
- length = iprot.readStructEnd()
- if length < 0: return -1
- read_cnt += length
- return read_cnt
-
- def write(self, oprot):
- if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
- oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
- return 0
- if oprot.writeStructBegin('UveServiceChainData') < 0: return -1
- if self.name is not None:
- annotations = {}
- if self._table is None or self._table is '': return -1
- annotations['key'] = self._table
- if oprot.writeFieldBegin('name', TType.STRING, 1, annotations) < 0: return -1
- if oprot.writeString(self.name) < 0: return -1
- if oprot.writeFieldEnd() < 0: return -1
- if self.deleted is not None:
- annotations = {}
- if oprot.writeFieldBegin('deleted', TType.BOOL, 2, annotations) < 0: return -1
- if oprot.writeBool(self.deleted) < 0: return -1
- if oprot.writeFieldEnd() < 0: return -1
- if self.source_virtual_network is not None:
- annotations = {}
- if oprot.writeFieldBegin('source_virtual_network', TType.STRING, 3, annotations) < 0: return -1
- if oprot.writeString(self.source_virtual_network) < 0: return -1
- if oprot.writeFieldEnd() < 0: return -1
- if self.destination_virtual_network is not None:
- annotations = {}
- if oprot.writeFieldBegin('destination_virtual_network', TType.STRING, 4, annotations) < 0: return -1
- if oprot.writeString(self.destination_virtual_network) < 0: return -1
- if oprot.writeFieldEnd() < 0: return -1
- if self.source_ports is not None:
- annotations = {}
- if oprot.writeFieldBegin('source_ports', TType.STRING, 5, annotations) < 0: return -1
- if oprot.writeString(self.source_ports) < 0: return -1
- if oprot.writeFieldEnd() < 0: return -1
- if self.destination_ports is not None:
- annotations = {}
- if oprot.writeFieldBegin('destination_ports', TType.STRING, 6, annotations) < 0: return -1
- if oprot.writeString(self.destination_ports) < 0: return -1
- if oprot.writeFieldEnd() < 0: return -1
- if self.protocol is not None:
- annotations = {}
- if oprot.writeFieldBegin('protocol', TType.STRING, 7, annotations) < 0: return -1
- if oprot.writeString(self.protocol) < 0: return -1
- if oprot.writeFieldEnd() < 0: return -1
- if self.direction is not None:
- annotations = {}
- if oprot.writeFieldBegin('direction', TType.STRING, 8, annotations) < 0: return -1
- if oprot.writeString(self.direction) < 0: return -1
- if oprot.writeFieldEnd() < 0: return -1
- if self.services is not None:
- annotations = {}
- if oprot.writeFieldBegin('services', TType.LIST, 9, annotations) < 0: return -1
- if oprot.writeListBegin(TType.STRING, len(self.services)) < 0: return -1
- for iter30 in self.services:
- if oprot.writeContainerElementBegin() < 0: return -1
- if oprot.writeString(iter30) < 0: return -1
- if oprot.writeContainerElementEnd() < 0: return -1
- if oprot.writeListEnd() < 0: return -1
- if oprot.writeFieldEnd() < 0: return -1
- if oprot.writeFieldStop() < 0: return -1
- if oprot.writeStructEnd() < 0: return -1
- return 0
-
- def validate(self):
- return
-
-
- def log(self):
- log_str = cStringIO.StringIO()
- if self.name is not None:
- log_str.write('name = ')
- log_str.write(self.name)
- log_str.write(' ')
- if self.deleted is not None:
- log_str.write('deleted = ')
- if self.deleted:
- log_str.write('True')
- else:
- log_str.write('False')
- log_str.write(' ')
- if self.source_virtual_network is not None:
- log_str.write('source_virtual_network = ')
- log_str.write(self.source_virtual_network)
- log_str.write(' ')
- if self.destination_virtual_network is not None:
- log_str.write('destination_virtual_network = ')
- log_str.write(self.destination_virtual_network)
- log_str.write(' ')
- if self.source_ports is not None:
- log_str.write('source_ports = ')
- log_str.write(self.source_ports)
- log_str.write(' ')
- if self.destination_ports is not None:
- log_str.write('destination_ports = ')
- log_str.write(self.destination_ports)
- log_str.write(' ')
- if self.protocol is not None:
- log_str.write('protocol = ')
- log_str.write(self.protocol)
- log_str.write(' ')
- if self.direction is not None:
- log_str.write('direction = ')
- log_str.write(self.direction)
- log_str.write(' ')
- if self.services is not None:
- log_str.write('services = ')
- log_str.write('[ ')
- for iter31 in self.services:
- log_str.write(iter31)
- log_str.write(', ')
- log_str.write(' ]')
- log_str.write(' ')
- return log_str.getvalue()
-
- def __repr__(self):
- L = ['%s=%r' % (key, value)
- for key, value in self.__dict__.iteritems()]
- return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
-
- def __eq__(self, other):
- return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
-
- def __ne__(self, other):
- return not (self == other)
-
-class UveVirtualNetworkConfigTrace(sandesh_base.SandeshUVE):
-
- thrift_spec = (
- None, # 0
- (1, TType.STRUCT, 'data', (UveVirtualNetworkConfig, UveVirtualNetworkConfig.thrift_spec), None, ), # 1
- )
-
- def __init__(self, data=None, table=None, sandesh=sandesh_base.sandesh_global):
- sandesh_base.SandeshUVE.__init__(self)
- self.data = data
- self._scope = sandesh.scope()
- self._module = sandesh.module()
- self._source = sandesh.source_id()
- self._node_type = sandesh.node_type()
- self._instance_id = sandesh.instance_id()
- self._seqnum = 0
- self._timestamp = UTCTimestampUsec()
- self._versionsig = 74672981
- self._hints = 0 | SANDESH_KEY_HINT
- if table is not None:
- self.data._table = table
-
- def update_uve(self, tdata):
- if self.data.name is not None:
- tdata.name = self.data.name
- if self.data.deleted is not None:
- tdata.deleted = self.data.deleted
- if self.data.connected_networks is not None:
- tdata.connected_networks = self.data.connected_networks
- if self.data.partially_connected_networks is not None:
- tdata.partially_connected_networks = self.data.partially_connected_networks
- if self.data.routing_instance_list is not None:
- tdata.routing_instance_list = self.data.routing_instance_list
- if self.data.total_acl_rules is not None:
- tdata.total_acl_rules = self.data.total_acl_rules
- return tdata
-
- def log(self, trace=False):
- log_str = cStringIO.StringIO()
- if trace:
- log_str.write(str(self._timestamp))
- log_str.write(' ')
- log_str.write('UveVirtualNetworkConfigTrace: ')
- if self.data is not None:
- log_str.write('data = ')
- log_str.write('<< ')
- log_str.write(self.data.log())
- log_str.write('>>')
- log_str.write(' ')
- return log_str.getvalue()
-
- def read(self, iprot):
- if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
- fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
- return -1
- read_cnt = 0
- (length, sandesh_name) = iprot.readSandeshBegin()
- if length < 0: return -1
- read_cnt += length
- while True:
- (length, fname, ftype, fid) = iprot.readFieldBegin()
- if length < 0: return -1
- read_cnt += length
- if ftype == TType.STOP:
- break
- if fid == 1:
- if ftype == TType.STRUCT:
- self.data = UveVirtualNetworkConfig()
- read_cnt += self.data.read(iprot)
- else:
- iprot.skip(ftype)
- else:
- iprot.skip(ftype)
- length = iprot.readFieldEnd()
- if length < 0: return -1
- read_cnt += length
- length = iprot.readSandeshEnd()
- if length < 0: return -1
- read_cnt += length
- return read_cnt
-
- def write(self, oprot):
- if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
- oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
- return 0
- if oprot.writeSandeshBegin('UveVirtualNetworkConfigTrace') < 0: return -1
- if self.data is not None:
- annotations = {}
- if oprot.writeFieldBegin('data', TType.STRUCT, 1, annotations) < 0: return -1
- if self.data.write(oprot) < 0: return -1
- if oprot.writeFieldEnd() < 0: return -1
- if oprot.writeFieldStop() < 0: return -1
- if oprot.writeSandeshEnd() < 0: return -1
- return 0
-
- def validate(self):
- return
-
-
- def compare(self, other):
- if not isinstance(other, self.__class__):
- return False
- if self.data != other.data:
- return False
- return True
-
- def __repr__(self):
- L = ['%s=%r' % (key, value)
- for key, value in self.__dict__.iteritems()]
- return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
-
- def __eq__(self, other):
- return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
-
- def __ne__(self, other):
- return not (self == other)
-
-class UveServiceChain(sandesh_base.SandeshUVE):
-
- thrift_spec = (
- None, # 0
- (1, TType.STRUCT, 'data', (UveServiceChainData, UveServiceChainData.thrift_spec), None, ), # 1
- )
-
- def __init__(self, data=None, table=None, sandesh=sandesh_base.sandesh_global):
- sandesh_base.SandeshUVE.__init__(self)
- self.data = data
- self._scope = sandesh.scope()
- self._module = sandesh.module()
- self._source = sandesh.source_id()
- self._node_type = sandesh.node_type()
- self._instance_id = sandesh.instance_id()
- self._seqnum = 0
- self._timestamp = UTCTimestampUsec()
- self._versionsig = 2360766836
- self._hints = 0 | SANDESH_KEY_HINT
- if table is not None:
- self.data._table = table
-
- def update_uve(self, tdata):
- if self.data.name is not None:
- tdata.name = self.data.name
- if self.data.deleted is not None:
- tdata.deleted = self.data.deleted
- if self.data.source_virtual_network is not None:
- tdata.source_virtual_network = self.data.source_virtual_network
- if self.data.destination_virtual_network is not None:
- tdata.destination_virtual_network = self.data.destination_virtual_network
- if self.data.source_ports is not None:
- tdata.source_ports = self.data.source_ports
- if self.data.destination_ports is not None:
- tdata.destination_ports = self.data.destination_ports
- if self.data.protocol is not None:
- tdata.protocol = self.data.protocol
- if self.data.direction is not None:
- tdata.direction = self.data.direction
- if self.data.services is not None:
- tdata.services = self.data.services
- return tdata
-
- def log(self, trace=False):
- log_str = cStringIO.StringIO()
- if trace:
- log_str.write(str(self._timestamp))
- log_str.write(' ')
- log_str.write('UveServiceChain: ')
- if self.data is not None:
- log_str.write('data = ')
- log_str.write('<< ')
- log_str.write(self.data.log())
- log_str.write('>>')
- log_str.write(' ')
- return log_str.getvalue()
-
- def read(self, iprot):
- if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
- fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
- return -1
- read_cnt = 0
- (length, sandesh_name) = iprot.readSandeshBegin()
- if length < 0: return -1
- read_cnt += length
- while True:
- (length, fname, ftype, fid) = iprot.readFieldBegin()
- if length < 0: return -1
- read_cnt += length
- if ftype == TType.STOP:
- break
- if fid == 1:
- if ftype == TType.STRUCT:
- self.data = UveServiceChainData()
- read_cnt += self.data.read(iprot)
- else:
- iprot.skip(ftype)
- else:
- iprot.skip(ftype)
- length = iprot.readFieldEnd()
- if length < 0: return -1
- read_cnt += length
- length = iprot.readSandeshEnd()
- if length < 0: return -1
- read_cnt += length
- return read_cnt
-
- def write(self, oprot):
- if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
- oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
- return 0
- if oprot.writeSandeshBegin('UveServiceChain') < 0: return -1
- if self.data is not None:
- annotations = {}
- if oprot.writeFieldBegin('data', TType.STRUCT, 1, annotations) < 0: return -1
- if self.data.write(oprot) < 0: return -1
- if oprot.writeFieldEnd() < 0: return -1
- if oprot.writeFieldStop() < 0: return -1
- if oprot.writeSandeshEnd() < 0: return -1
- return 0
-
- def validate(self):
- return
-
-
- def compare(self, other):
- if not isinstance(other, self.__class__):
- return False
- if self.data != other.data:
- return False
- return True
-
- def __repr__(self):
- L = ['%s=%r' % (key, value)
- for key, value in self.__dict__.iteritems()]
- return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
-
- def __eq__(self, other):
- return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
-
- def __ne__(self, other):
- return not (self == other)
-
-
-_SANDESH_REQUEST_LIST = [
-]
-
-
-_SANDESH_UVE_LIST = [
-'UveVirtualNetworkConfigTrace',
-'UveServiceChain',
-]
-
-
-_SANDESH_UVE_DATA_LIST = [
-'UveVirtualNetworkConfig',
-'UveServiceChainData',
-]
-
-
-_SANDESH_ALARM_LIST = [
-]
-
-
-_SANDESH_ALARM_DATA_LIST = [
-]
diff --git a/Testcases/cfgm_common/uve/virtual_network/ttypes.pyc b/Testcases/cfgm_common/uve/virtual_network/ttypes.pyc
deleted file mode 100644
index 2888df8..0000000
--- a/Testcases/cfgm_common/uve/virtual_network/ttypes.pyc
+++ /dev/null
Binary files differ
diff --git a/Testcases/cfgm_common/uve/virtual_network/virtual_network.html b/Testcases/cfgm_common/uve/virtual_network/virtual_network.html
deleted file mode 100644
index 270901f..0000000
--- a/Testcases/cfgm_common/uve/virtual_network/virtual_network.html
+++ /dev/null
@@ -1,14 +0,0 @@
-<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Strict//EN"
- "http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd">
-<html xmlns="http://www.w3.org/1999/xhtml">
-<head>
-<meta http-equiv="Content-Type" content="text/html;charset=utf-8" />
-<link href="style.css" rel="stylesheet" type="text/css"/>
-<title>Module: virtual_network</title></head><body>
-<h1>Module: virtual_network</h1>
-<table><tr><th>Module</th><th>Sandeshs</th></tr>
-<tr>
-<td>virtual_network</td><td></td></tr>
-</table>
-<hr/><h2 id="Sandeshs"></h2>
-</body></html>
diff --git a/Testcases/cfgm_common/uve/virtual_network/virtual_network.xml b/Testcases/cfgm_common/uve/virtual_network/virtual_network.xml
deleted file mode 100644
index 7089d62..0000000
--- a/Testcases/cfgm_common/uve/virtual_network/virtual_network.xml
+++ /dev/null
@@ -1,3 +0,0 @@
-<?xml-stylesheet type="text/xsl" href="/universal_parse.xsl"?>
-<virtual_network type="rlist">
-</virtual_network>
diff --git a/Testcases/cfgm_common/uve/vnc_api/__init__.py b/Testcases/cfgm_common/uve/vnc_api/__init__.py
deleted file mode 100644
index adefd8e..0000000
--- a/Testcases/cfgm_common/uve/vnc_api/__init__.py
+++ /dev/null
@@ -1 +0,0 @@
-__all__ = ['ttypes', 'constants']
diff --git a/Testcases/cfgm_common/uve/vnc_api/__init__.pyc b/Testcases/cfgm_common/uve/vnc_api/__init__.pyc
deleted file mode 100644
index 0ac091c..0000000
--- a/Testcases/cfgm_common/uve/vnc_api/__init__.pyc
+++ /dev/null
Binary files differ
diff --git a/Testcases/cfgm_common/uve/vnc_api/constants.py b/Testcases/cfgm_common/uve/vnc_api/constants.py
deleted file mode 100644
index aadd78e..0000000
--- a/Testcases/cfgm_common/uve/vnc_api/constants.py
+++ /dev/null
@@ -1,12 +0,0 @@
-#
-# Autogenerated by Sandesh Compiler (1.0)
-#
-# DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
-#
-# options string: py:new_style
-# Copyright (c) 2013 Juniper Networks, Inc. All rights reserved.
-#
-
-from pysandesh.Thrift import TType, TMessageType, TException
-from ttypes import *
-
diff --git a/Testcases/cfgm_common/uve/vnc_api/constants.pyc b/Testcases/cfgm_common/uve/vnc_api/constants.pyc
deleted file mode 100644
index efe1f9b..0000000
--- a/Testcases/cfgm_common/uve/vnc_api/constants.pyc
+++ /dev/null
Binary files differ
diff --git a/Testcases/cfgm_common/uve/vnc_api/http_request.py b/Testcases/cfgm_common/uve/vnc_api/http_request.py
deleted file mode 100644
index 8baea4f..0000000
--- a/Testcases/cfgm_common/uve/vnc_api/http_request.py
+++ /dev/null
@@ -1,14 +0,0 @@
-#
-# Autogenerated by Sandesh Compiler (1.0)
-#
-# DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
-#
-# options string: py:new_style
-# Copyright (c) 2013 Juniper Networks, Inc. All rights reserved.
-#
-
-import ttypes
-
-_HTTP_REQUEST_LIST = [
-]
-
diff --git a/Testcases/cfgm_common/uve/vnc_api/http_request.pyc b/Testcases/cfgm_common/uve/vnc_api/http_request.pyc
deleted file mode 100644
index 2f6880e..0000000
--- a/Testcases/cfgm_common/uve/vnc_api/http_request.pyc
+++ /dev/null
Binary files differ
diff --git a/Testcases/cfgm_common/uve/vnc_api/index.html b/Testcases/cfgm_common/uve/vnc_api/index.html
deleted file mode 100644
index bb522d0..0000000
--- a/Testcases/cfgm_common/uve/vnc_api/index.html
+++ /dev/null
@@ -1,9 +0,0 @@
-<html><head>
-<link href="style.css" rel="stylesheet" type="text/css"/>
-<title>All Thrift declarations</title></head><body>
-<h1>All Thrift declarations</h1>
-<table><tr><th>Module</th><th>Services</th><th>Sandeshs</th><th>Data types</th><th>Constants</th></tr>
-<tr>
-<td>vnc_api</td><td></td></tr>
-</table>
-</body></html>
diff --git a/Testcases/cfgm_common/uve/vnc_api/request_skeleton.py b/Testcases/cfgm_common/uve/vnc_api/request_skeleton.py
deleted file mode 100644
index 99c1196..0000000
--- a/Testcases/cfgm_common/uve/vnc_api/request_skeleton.py
+++ /dev/null
@@ -1,13 +0,0 @@
-#
-# Autogenerated by Sandesh Compiler (1.0)
-#
-# DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
-#
-# options string: py:new_style
-# Copyright (c) 2013 Juniper Networks, Inc. All rights reserved.
-#
-
-
-# This autogenerated skeleton file illustrates the implementation of
-# derived class to handle the sandesh request.
-
diff --git a/Testcases/cfgm_common/uve/vnc_api/request_skeleton.pyc b/Testcases/cfgm_common/uve/vnc_api/request_skeleton.pyc
deleted file mode 100644
index 0e855d5..0000000
--- a/Testcases/cfgm_common/uve/vnc_api/request_skeleton.pyc
+++ /dev/null
Binary files differ
diff --git a/Testcases/cfgm_common/uve/vnc_api/style.css b/Testcases/cfgm_common/uve/vnc_api/style.css
deleted file mode 100644
index 6dc2f22..0000000
--- a/Testcases/cfgm_common/uve/vnc_api/style.css
+++ /dev/null
@@ -1,10 +0,0 @@
-/* Auto-generated CSS for generated Thrift docs */
-body { font-family: Tahoma, sans-serif; }
-pre { background-color: #dddddd; padding: 6px; }
-h3,h4 { padding-top: 0px; margin-top: 0px; }
-div.definition { border: 1px solid gray; margin: 10px; padding: 10px; }
-div.extends { margin: -0.5em 0 1em 5em }
-table { border: 1px solid grey; border-collapse: collapse; }
-td { border: 1px solid grey; padding: 1px 6px; vertical-align: top; }
-th { border: 1px solid black; background-color: #bbbbbb;
- text-align: left; padding: 1px 6px; }
diff --git a/Testcases/cfgm_common/uve/vnc_api/ttypes.py b/Testcases/cfgm_common/uve/vnc_api/ttypes.py
deleted file mode 100644
index 4a38dd2..0000000
--- a/Testcases/cfgm_common/uve/vnc_api/ttypes.py
+++ /dev/null
@@ -1,778 +0,0 @@
-#
-# Autogenerated by Sandesh Compiler (1.0)
-#
-# DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
-#
-# options string: py:new_style
-# Copyright (c) 2013 Juniper Networks, Inc. All rights reserved.
-#
-
-from pysandesh.Thrift import TType, TMessageType, TException
-
-from pysandesh.transport import TTransport
-from pysandesh.protocol import TBinaryProtocol, TProtocol
-try:
- from pysandesh.protocol import fastbinary
-except:
- fastbinary = None
-
-import cStringIO
-import uuid
-import bottle
-from pysandesh import sandesh_base
-from pysandesh.sandesh_http import SandeshHttp
-from pysandesh.sandesh_uve import SandeshUVETypeMaps
-from pysandesh.util import UTCTimestampUsec, UTCTimestampUsecToString
-from pysandesh.gen_py.sandesh.constants import *
-
-
-
-class VncApiCommon(object):
- """
- Attributes:
- - identifier_uuid
- - object_type
- - identifier_name
- - url
- - operation
- - useragent
- - remote_ip
- - params
- - body
- - domain
- - project
- - user
- - error
- """
-
- thrift_spec = (
- None, # 0
- (1, TType.STRING, 'identifier_uuid', None, None, ), # 1
- (2, TType.STRING, 'object_type', None, None, ), # 2
- (3, TType.STRING, 'identifier_name', None, None, ), # 3
- (4, TType.STRING, 'url', None, None, ), # 4
- (5, TType.STRING, 'operation', None, None, ), # 5
- (6, TType.STRING, 'useragent', None, None, ), # 6
- (7, TType.STRING, 'remote_ip', None, None, ), # 7
- (8, TType.STRING, 'params', None, None, ), # 8
- (9, TType.STRING, 'body', None, None, ), # 9
- (10, TType.STRING, 'domain', None, None, ), # 10
- (11, TType.STRING, 'project', None, None, ), # 11
- (12, TType.STRING, 'user', None, None, ), # 12
- (13, TType.STRING, 'error', None, None, ), # 13
- )
-
- def __init__(self, identifier_uuid=None, object_type=None, identifier_name=None, url=None, operation=None, useragent=None, remote_ip=None, params=None, body=None, domain=None, project=None, user=None, error=None,):
- self.identifier_uuid = identifier_uuid
- self.object_type = object_type
- self.identifier_name = identifier_name
- self.url = url
- self.operation = operation
- self.useragent = useragent
- self.remote_ip = remote_ip
- self.params = params
- self.body = body
- self.domain = domain
- self.project = project
- self.user = user
- self.error = error
-
- def read(self, iprot):
- if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
- fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
- return 0
- read_cnt = 0
- length = iprot.readStructBegin()
- if length < 0: return -1
- read_cnt += length
- while True:
- (length, fname, ftype, fid) = iprot.readFieldBegin()
- if length < 0: return -1
- read_cnt += length
- if ftype == TType.STOP:
- break
- if fid == 1:
- if ftype == TType.STRING:
- (length, self.identifier_uuid) = iprot.readString();
- if length < 0: return -1
- read_cnt += length
- else:
- iprot.skip(ftype)
- elif fid == 2:
- if ftype == TType.STRING:
- (length, self.object_type) = iprot.readString();
- if length < 0: return -1
- read_cnt += length
- else:
- iprot.skip(ftype)
- elif fid == 3:
- if ftype == TType.STRING:
- (length, self.identifier_name) = iprot.readString();
- if length < 0: return -1
- read_cnt += length
- else:
- iprot.skip(ftype)
- elif fid == 4:
- if ftype == TType.STRING:
- (length, self.url) = iprot.readString();
- if length < 0: return -1
- read_cnt += length
- else:
- iprot.skip(ftype)
- elif fid == 5:
- if ftype == TType.STRING:
- (length, self.operation) = iprot.readString();
- if length < 0: return -1
- read_cnt += length
- else:
- iprot.skip(ftype)
- elif fid == 6:
- if ftype == TType.STRING:
- (length, self.useragent) = iprot.readString();
- if length < 0: return -1
- read_cnt += length
- else:
- iprot.skip(ftype)
- elif fid == 7:
- if ftype == TType.STRING:
- (length, self.remote_ip) = iprot.readString();
- if length < 0: return -1
- read_cnt += length
- else:
- iprot.skip(ftype)
- elif fid == 8:
- if ftype == TType.STRING:
- (length, self.params) = iprot.readString();
- if length < 0: return -1
- read_cnt += length
- else:
- iprot.skip(ftype)
- elif fid == 9:
- if ftype == TType.STRING:
- (length, self.body) = iprot.readString();
- if length < 0: return -1
- read_cnt += length
- else:
- iprot.skip(ftype)
- elif fid == 10:
- if ftype == TType.STRING:
- (length, self.domain) = iprot.readString();
- if length < 0: return -1
- read_cnt += length
- else:
- iprot.skip(ftype)
- elif fid == 11:
- if ftype == TType.STRING:
- (length, self.project) = iprot.readString();
- if length < 0: return -1
- read_cnt += length
- else:
- iprot.skip(ftype)
- elif fid == 12:
- if ftype == TType.STRING:
- (length, self.user) = iprot.readString();
- if length < 0: return -1
- read_cnt += length
- else:
- iprot.skip(ftype)
- elif fid == 13:
- if ftype == TType.STRING:
- (length, self.error) = iprot.readString();
- if length < 0: return -1
- read_cnt += length
- else:
- iprot.skip(ftype)
- else:
- iprot.skip(ftype)
- length = iprot.readFieldEnd()
- if length < 0: return -1
- read_cnt += length
- length = iprot.readStructEnd()
- if length < 0: return -1
- read_cnt += length
- return read_cnt
-
- def write(self, oprot):
- if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
- oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
- return 0
- if oprot.writeStructBegin('VncApiCommon') < 0: return -1
- if self.identifier_uuid is not None:
- annotations = {}
- if oprot.writeFieldBegin('identifier_uuid', TType.STRING, 1, annotations) < 0: return -1
- if oprot.writeString(self.identifier_uuid) < 0: return -1
- if oprot.writeFieldEnd() < 0: return -1
- if self.object_type is not None:
- annotations = {}
- annotations['key'] = 'ConfigObjectTable'
- if oprot.writeFieldBegin('object_type', TType.STRING, 2, annotations) < 0: return -1
- if oprot.writeString(self.object_type) < 0: return -1
- if oprot.writeFieldEnd() < 0: return -1
- if self.identifier_name is not None:
- annotations = {}
- annotations['key'] = 'ConfigObjectTable'
- if oprot.writeFieldBegin('identifier_name', TType.STRING, 3, annotations) < 0: return -1
- if oprot.writeString(self.identifier_name) < 0: return -1
- if oprot.writeFieldEnd() < 0: return -1
- if self.url is not None:
- annotations = {}
- if oprot.writeFieldBegin('url', TType.STRING, 4, annotations) < 0: return -1
- if oprot.writeString(self.url) < 0: return -1
- if oprot.writeFieldEnd() < 0: return -1
- if self.operation is not None:
- annotations = {}
- if oprot.writeFieldBegin('operation', TType.STRING, 5, annotations) < 0: return -1
- if oprot.writeString(self.operation) < 0: return -1
- if oprot.writeFieldEnd() < 0: return -1
- if self.useragent is not None:
- annotations = {}
- if oprot.writeFieldBegin('useragent', TType.STRING, 6, annotations) < 0: return -1
- if oprot.writeString(self.useragent) < 0: return -1
- if oprot.writeFieldEnd() < 0: return -1
- if self.remote_ip is not None:
- annotations = {}
- if oprot.writeFieldBegin('remote_ip', TType.STRING, 7, annotations) < 0: return -1
- if oprot.writeString(self.remote_ip) < 0: return -1
- if oprot.writeFieldEnd() < 0: return -1
- if self.params is not None:
- annotations = {}
- if oprot.writeFieldBegin('params', TType.STRING, 8, annotations) < 0: return -1
- if oprot.writeString(self.params) < 0: return -1
- if oprot.writeFieldEnd() < 0: return -1
- if self.body is not None:
- annotations = {}
- if oprot.writeFieldBegin('body', TType.STRING, 9, annotations) < 0: return -1
- if oprot.writeString(self.body) < 0: return -1
- if oprot.writeFieldEnd() < 0: return -1
- if self.domain is not None:
- annotations = {}
- annotations['key'] = 'ConfigObjectTableByUser'
- if oprot.writeFieldBegin('domain', TType.STRING, 10, annotations) < 0: return -1
- if oprot.writeString(self.domain) < 0: return -1
- if oprot.writeFieldEnd() < 0: return -1
- if self.project is not None:
- annotations = {}
- annotations['key'] = 'ConfigObjectTableByUser'
- if oprot.writeFieldBegin('project', TType.STRING, 11, annotations) < 0: return -1
- if oprot.writeString(self.project) < 0: return -1
- if oprot.writeFieldEnd() < 0: return -1
- if self.user is not None:
- annotations = {}
- annotations['key'] = 'ConfigObjectTableByUser'
- if oprot.writeFieldBegin('user', TType.STRING, 12, annotations) < 0: return -1
- if oprot.writeString(self.user) < 0: return -1
- if oprot.writeFieldEnd() < 0: return -1
- if self.error is not None:
- annotations = {}
- if oprot.writeFieldBegin('error', TType.STRING, 13, annotations) < 0: return -1
- if oprot.writeString(self.error) < 0: return -1
- if oprot.writeFieldEnd() < 0: return -1
- if oprot.writeFieldStop() < 0: return -1
- if oprot.writeStructEnd() < 0: return -1
- return 0
-
- def validate(self):
- return
-
-
- def log(self):
- log_str = cStringIO.StringIO()
- if self.identifier_uuid is not None:
- log_str.write('identifier_uuid = ')
- log_str.write(self.identifier_uuid)
- log_str.write(' ')
- if self.object_type is not None:
- log_str.write('object_type = ')
- log_str.write(self.object_type)
- log_str.write(' ')
- if self.identifier_name is not None:
- log_str.write('identifier_name = ')
- log_str.write(self.identifier_name)
- log_str.write(' ')
- if self.url is not None:
- log_str.write('url = ')
- log_str.write(self.url)
- log_str.write(' ')
- if self.operation is not None:
- log_str.write('operation = ')
- log_str.write(self.operation)
- log_str.write(' ')
- if self.useragent is not None:
- log_str.write('useragent = ')
- log_str.write(self.useragent)
- log_str.write(' ')
- if self.remote_ip is not None:
- log_str.write('remote_ip = ')
- log_str.write(self.remote_ip)
- log_str.write(' ')
- if self.params is not None:
- log_str.write('params = ')
- log_str.write(self.params)
- log_str.write(' ')
- if self.body is not None:
- log_str.write('body = ')
- log_str.write(self.body)
- log_str.write(' ')
- if self.domain is not None:
- log_str.write('domain = ')
- log_str.write(self.domain)
- log_str.write(' ')
- if self.project is not None:
- log_str.write('project = ')
- log_str.write(self.project)
- log_str.write(' ')
- if self.user is not None:
- log_str.write('user = ')
- log_str.write(self.user)
- log_str.write(' ')
- if self.error is not None:
- log_str.write('error = ')
- log_str.write(self.error)
- log_str.write(' ')
- return log_str.getvalue()
-
- def __repr__(self):
- L = ['%s=%r' % (key, value)
- for key, value in self.__dict__.iteritems()]
- return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
-
- def __eq__(self, other):
- return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
-
- def __ne__(self, other):
- return not (self == other)
-
-class VncApiReadLog(sandesh_base.SandeshTrace):
-
- thrift_spec = None
-
- def __init__(self, api_log=None, file=None, line=None, sandesh=sandesh_base.sandesh_global):
- sandesh_base.SandeshTrace.__init__(self, type=SandeshType.TRACE_OBJECT)
- self.api_log = api_log
- self.file = file
- self.line = line
- self._scope = sandesh.scope()
- self._module = sandesh.module()
- self._source = sandesh.source_id()
- self._node_type = sandesh.node_type()
- self._instance_id = sandesh.instance_id()
- self._seqnum = 0
- self._timestamp = UTCTimestampUsec()
- self._versionsig = 103727244
- self._hints = 0 | SANDESH_KEY_HINT
-
- def log(self, trace=False):
- log_str = cStringIO.StringIO()
- if trace:
- log_str.write(str(self._timestamp))
- log_str.write(' ')
- log_str.write('VncApiReadLog: ')
- if self.api_log is not None:
- log_str.write('api_log = ')
- log_str.write('<< ')
- log_str.write(self.api_log.log())
- log_str.write('>>')
- log_str.write(' ')
- if self.file is not None:
- log_str.write('file = ')
- log_str.write(self.file)
- log_str.write(' ')
- if self.line is not None:
- log_str.write('line = ')
- log_str.write(str(self.line))
- log_str.write(' ')
- return log_str.getvalue()
-
- def read(self, iprot):
- if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
- fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
- return -1
- read_cnt = 0
- (length, sandesh_name) = iprot.readSandeshBegin()
- if length < 0: return -1
- read_cnt += length
- while True:
- (length, fname, ftype, fid) = iprot.readFieldBegin()
- if length < 0: return -1
- read_cnt += length
- if ftype == TType.STOP:
- break
- if fid == 1:
- if ftype == TType.STRUCT:
- self.api_log = VncApiCommon()
- read_cnt += self.api_log.read(iprot)
- else:
- iprot.skip(ftype)
- elif fid == -32768:
- if ftype == TType.STRING:
- (length, self.file) = iprot.readString();
- if length < 0: return -1
- read_cnt += length
- else:
- iprot.skip(ftype)
- elif fid == -32767:
- if ftype == TType.I32:
- (length, self.line) = iprot.readI32();
- if length < 0: return -1
- read_cnt += length
- else:
- iprot.skip(ftype)
- else:
- iprot.skip(ftype)
- length = iprot.readFieldEnd()
- if length < 0: return -1
- read_cnt += length
- length = iprot.readSandeshEnd()
- if length < 0: return -1
- read_cnt += length
- return read_cnt
-
- def write(self, oprot):
- if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
- oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
- return 0
- if oprot.writeSandeshBegin('VncApiReadLog') < 0: return -1
- if self.file is not None:
- annotations = {}
- if oprot.writeFieldBegin('file', TType.STRING, -32768, annotations) < 0: return -1
- if oprot.writeString(self.file) < 0: return -1
- if oprot.writeFieldEnd() < 0: return -1
- if self.line is not None:
- annotations = {}
- if oprot.writeFieldBegin('line', TType.I32, -32767, annotations) < 0: return -1
- if oprot.writeI32(self.line) < 0: return -1
- if oprot.writeFieldEnd() < 0: return -1
- if self.api_log is not None:
- annotations = {}
- if oprot.writeFieldBegin('api_log', TType.STRUCT, 1, annotations) < 0: return -1
- if self.api_log.write(oprot) < 0: return -1
- if oprot.writeFieldEnd() < 0: return -1
- if oprot.writeFieldStop() < 0: return -1
- if oprot.writeSandeshEnd() < 0: return -1
- return 0
-
- def validate(self):
- return
-
-
- def compare(self, other):
- if not isinstance(other, self.__class__):
- return False
- if self.file != other.file:
- return False
- if self.line != other.line:
- return False
- if self.api_log != other.api_log:
- return False
- return True
-
- def __repr__(self):
- L = ['%s=%r' % (key, value)
- for key, value in self.__dict__.iteritems()]
- return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
-
- def __eq__(self, other):
- return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
-
- def __ne__(self, other):
- return not (self == other)
-
-class VncApiConfigLog(sandesh_base.SandeshObject):
-
- thrift_spec = None
-
- def __init__(self, api_log=None, file=None, line=None, category='__default__', level=SandeshLevel.SYS_INFO, sandesh=sandesh_base.sandesh_global):
- sandesh_base.SandeshObject.__init__(self)
- self.api_log = api_log
- self.file = file
- self.line = line
- self._scope = sandesh.scope()
- self._module = sandesh.module()
- self._source = sandesh.source_id()
- self._node_type = sandesh.node_type()
- self._instance_id = sandesh.instance_id()
- self._seqnum = 0
- self._timestamp = UTCTimestampUsec()
- self._versionsig = 103727244
- self._hints = 0 | SANDESH_KEY_HINT
- self._category = category
- self._level = level
-
- def log(self, trace=False):
- log_str = cStringIO.StringIO()
- if trace:
- log_str.write(str(self._timestamp))
- log_str.write(' ')
- if self._category is not None:
- log_str.write(self._category)
- log_str.write(' [')
- log_str.write(SandeshLevel._VALUES_TO_NAMES[self._level])
- log_str.write(']: ')
- log_str.write('VncApiConfigLog: ')
- if self.api_log is not None:
- log_str.write('api_log = ')
- log_str.write('<< ')
- log_str.write(self.api_log.log())
- log_str.write('>>')
- log_str.write(' ')
- if self.file is not None:
- log_str.write('file = ')
- log_str.write(self.file)
- log_str.write(' ')
- if self.line is not None:
- log_str.write('line = ')
- log_str.write(str(self.line))
- log_str.write(' ')
- return log_str.getvalue()
-
- def read(self, iprot):
- if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
- fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
- return -1
- read_cnt = 0
- (length, sandesh_name) = iprot.readSandeshBegin()
- if length < 0: return -1
- read_cnt += length
- while True:
- (length, fname, ftype, fid) = iprot.readFieldBegin()
- if length < 0: return -1
- read_cnt += length
- if ftype == TType.STOP:
- break
- if fid == 1:
- if ftype == TType.STRUCT:
- self.api_log = VncApiCommon()
- read_cnt += self.api_log.read(iprot)
- else:
- iprot.skip(ftype)
- elif fid == -32768:
- if ftype == TType.STRING:
- (length, self.file) = iprot.readString();
- if length < 0: return -1
- read_cnt += length
- else:
- iprot.skip(ftype)
- elif fid == -32767:
- if ftype == TType.I32:
- (length, self.line) = iprot.readI32();
- if length < 0: return -1
- read_cnt += length
- else:
- iprot.skip(ftype)
- else:
- iprot.skip(ftype)
- length = iprot.readFieldEnd()
- if length < 0: return -1
- read_cnt += length
- length = iprot.readSandeshEnd()
- if length < 0: return -1
- read_cnt += length
- return read_cnt
-
- def write(self, oprot):
- if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
- oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
- return 0
- if oprot.writeSandeshBegin('VncApiConfigLog') < 0: return -1
- if self.file is not None:
- annotations = {}
- if oprot.writeFieldBegin('file', TType.STRING, -32768, annotations) < 0: return -1
- if oprot.writeString(self.file) < 0: return -1
- if oprot.writeFieldEnd() < 0: return -1
- if self.line is not None:
- annotations = {}
- if oprot.writeFieldBegin('line', TType.I32, -32767, annotations) < 0: return -1
- if oprot.writeI32(self.line) < 0: return -1
- if oprot.writeFieldEnd() < 0: return -1
- if self.api_log is not None:
- annotations = {}
- if oprot.writeFieldBegin('api_log', TType.STRUCT, 1, annotations) < 0: return -1
- if self.api_log.write(oprot) < 0: return -1
- if oprot.writeFieldEnd() < 0: return -1
- if oprot.writeFieldStop() < 0: return -1
- if oprot.writeSandeshEnd() < 0: return -1
- return 0
-
- def validate(self):
- return
-
-
- def compare(self, other):
- if not isinstance(other, self.__class__):
- return False
- if self.file != other.file:
- return False
- if self.line != other.line:
- return False
- if self.api_log != other.api_log:
- return False
- return True
-
- def __repr__(self):
- L = ['%s=%r' % (key, value)
- for key, value in self.__dict__.iteritems()]
- return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
-
- def __eq__(self, other):
- return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
-
- def __ne__(self, other):
- return not (self == other)
-
-class VncApiError(sandesh_base.SandeshSystem):
-
- thrift_spec = None
-
- def __init__(self, api_error_msg=None, file=None, line=None, category='__default__', level=SandeshLevel.SYS_INFO, sandesh=sandesh_base.sandesh_global):
- sandesh_base.SandeshSystem.__init__(self)
- self.api_error_msg = api_error_msg
- self.file = file
- self.line = line
- self._scope = sandesh.scope()
- self._module = sandesh.module()
- self._source = sandesh.source_id()
- self._node_type = sandesh.node_type()
- self._instance_id = sandesh.instance_id()
- self._seqnum = 0
- self._timestamp = UTCTimestampUsec()
- self._versionsig = 2183673535
- self._hints = 0
- self._category = category
- self._level = level
-
- def log(self, trace=False):
- log_str = cStringIO.StringIO()
- if trace:
- log_str.write(str(self._timestamp))
- log_str.write(' ')
- if self._category is not None:
- log_str.write(self._category)
- log_str.write(' [')
- log_str.write(SandeshLevel._VALUES_TO_NAMES[self._level])
- log_str.write(']: ')
- log_str.write('VncApiError: ')
- if self.api_error_msg is not None:
- log_str.write(self.api_error_msg)
- log_str.write(' ')
- if self.file is not None:
- log_str.write(self.file)
- log_str.write(' ')
- if self.line is not None:
- log_str.write(str(self.line))
- log_str.write(' ')
- return log_str.getvalue()
-
- def read(self, iprot):
- if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
- fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
- return -1
- read_cnt = 0
- (length, sandesh_name) = iprot.readSandeshBegin()
- if length < 0: return -1
- read_cnt += length
- while True:
- (length, fname, ftype, fid) = iprot.readFieldBegin()
- if length < 0: return -1
- read_cnt += length
- if ftype == TType.STOP:
- break
- if fid == 1:
- if ftype == TType.STRING:
- (length, self.api_error_msg) = iprot.readString();
- if length < 0: return -1
- read_cnt += length
- else:
- iprot.skip(ftype)
- elif fid == -32768:
- if ftype == TType.STRING:
- (length, self.file) = iprot.readString();
- if length < 0: return -1
- read_cnt += length
- else:
- iprot.skip(ftype)
- elif fid == -32767:
- if ftype == TType.I32:
- (length, self.line) = iprot.readI32();
- if length < 0: return -1
- read_cnt += length
- else:
- iprot.skip(ftype)
- else:
- iprot.skip(ftype)
- length = iprot.readFieldEnd()
- if length < 0: return -1
- read_cnt += length
- length = iprot.readSandeshEnd()
- if length < 0: return -1
- read_cnt += length
- return read_cnt
-
- def write(self, oprot):
- if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
- oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
- return 0
- if oprot.writeSandeshBegin('VncApiError') < 0: return -1
- if self.file is not None:
- annotations = {}
- if oprot.writeFieldBegin('file', TType.STRING, -32768, annotations) < 0: return -1
- if oprot.writeString(self.file) < 0: return -1
- if oprot.writeFieldEnd() < 0: return -1
- if self.line is not None:
- annotations = {}
- if oprot.writeFieldBegin('line', TType.I32, -32767, annotations) < 0: return -1
- if oprot.writeI32(self.line) < 0: return -1
- if oprot.writeFieldEnd() < 0: return -1
- if self.api_error_msg is not None:
- annotations = {}
- if oprot.writeFieldBegin('api_error_msg', TType.STRING, 1, annotations) < 0: return -1
- if oprot.writeString(self.api_error_msg) < 0: return -1
- if oprot.writeFieldEnd() < 0: return -1
- if oprot.writeFieldStop() < 0: return -1
- if oprot.writeSandeshEnd() < 0: return -1
- return 0
-
- def validate(self):
- return
-
-
- def compare(self, other):
- if not isinstance(other, self.__class__):
- return False
- if self.file != other.file:
- return False
- if self.line != other.line:
- return False
- if self.api_error_msg != other.api_error_msg:
- return False
- return True
-
- def __repr__(self):
- L = ['%s=%r' % (key, value)
- for key, value in self.__dict__.iteritems()]
- return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
-
- def __eq__(self, other):
- return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
-
- def __ne__(self, other):
- return not (self == other)
-
-
-_SANDESH_REQUEST_LIST = [
-]
-
-
-_SANDESH_UVE_LIST = [
-]
-
-
-_SANDESH_UVE_DATA_LIST = [
-]
-
-
-_SANDESH_ALARM_LIST = [
-]
-
-
-_SANDESH_ALARM_DATA_LIST = [
-]
diff --git a/Testcases/cfgm_common/uve/vnc_api/ttypes.pyc b/Testcases/cfgm_common/uve/vnc_api/ttypes.pyc
deleted file mode 100644
index 1d0400a..0000000
--- a/Testcases/cfgm_common/uve/vnc_api/ttypes.pyc
+++ /dev/null
Binary files differ
diff --git a/Testcases/cfgm_common/uve/vnc_api/vnc_api.html b/Testcases/cfgm_common/uve/vnc_api/vnc_api.html
deleted file mode 100644
index b467b7c..0000000
--- a/Testcases/cfgm_common/uve/vnc_api/vnc_api.html
+++ /dev/null
@@ -1,14 +0,0 @@
-<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Strict//EN"
- "http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd">
-<html xmlns="http://www.w3.org/1999/xhtml">
-<head>
-<meta http-equiv="Content-Type" content="text/html;charset=utf-8" />
-<link href="style.css" rel="stylesheet" type="text/css"/>
-<title>Module: vnc_api</title></head><body>
-<h1>Module: vnc_api</h1>
-<table><tr><th>Module</th><th>Sandeshs</th></tr>
-<tr>
-<td>vnc_api</td><td></td></tr>
-</table>
-<hr/><h2 id="Sandeshs"></h2>
-</body></html>
diff --git a/Testcases/cfgm_common/uve/vnc_api/vnc_api.xml b/Testcases/cfgm_common/uve/vnc_api/vnc_api.xml
deleted file mode 100644
index 4cf212d..0000000
--- a/Testcases/cfgm_common/uve/vnc_api/vnc_api.xml
+++ /dev/null
@@ -1,3 +0,0 @@
-<?xml-stylesheet type="text/xsl" href="/universal_parse.xsl"?>
-<vnc_api type="rlist">
-</vnc_api>
diff --git a/Testcases/cfgm_common/vnc_cassandra.py b/Testcases/cfgm_common/vnc_cassandra.py
deleted file mode 100644
index 1bbb109..0000000
--- a/Testcases/cfgm_common/vnc_cassandra.py
+++ /dev/null
@@ -1,317 +0,0 @@
-#
-# Copyright (c) 2014 Juniper Networks, Inc. All rights reserved.
-#
-
-import pycassa
-from pycassa import ColumnFamily
-from pycassa.batch import Mutator
-from pycassa.system_manager import SystemManager, SIMPLE_STRATEGY
-from pycassa.pool import AllServersUnavailable
-
-from vnc_api.gen.vnc_cassandra_client_gen import VncCassandraClientGen
-from exceptions import NoIdError
-from pysandesh.connection_info import ConnectionState
-from pysandesh.gen_py.process_info.ttypes import ConnectionStatus, \
- ConnectionType
-from pysandesh.gen_py.sandesh.ttypes import SandeshLevel
-import time
-import json
-import utils
-
-class VncCassandraClient(VncCassandraClientGen):
- # Name to ID mapping keyspace + tables
- _UUID_KEYSPACE_NAME = 'config_db_uuid'
-
- # TODO describe layout
- _OBJ_UUID_CF_NAME = 'obj_uuid_table'
-
- # TODO describe layout
- _OBJ_FQ_NAME_CF_NAME = 'obj_fq_name_table'
-
- @classmethod
- def get_db_info(cls):
- db_info = [(cls._UUID_KEYSPACE_NAME, [cls._OBJ_UUID_CF_NAME,
- cls._OBJ_FQ_NAME_CF_NAME])]
- return db_info
- # end get_db_info
-
- def __init__(self, server_list, reset_config, db_prefix, keyspaces, logger,
- generate_url=None):
- super(VncCassandraClient, self).__init__()
- self._reset_config = reset_config
- self._cache_uuid_to_fq_name = {}
- if db_prefix:
- self._db_prefix = '%s_' %(db_prefix)
- else:
- self._db_prefix = ''
- self._server_list = server_list
- self._conn_state = ConnectionStatus.INIT
- self._logger = logger
-
- # if no generate_url is specified, use a dummy function that always
- # returns an empty string
- self._generate_url = generate_url or (lambda x,y: '')
- self._cf_dict = {}
- self._keyspaces = {
- self._UUID_KEYSPACE_NAME: [(self._OBJ_UUID_CF_NAME, None),
- (self._OBJ_FQ_NAME_CF_NAME, None)]}
-
- if keyspaces:
- self._keyspaces.update(keyspaces)
- self._cassandra_init(server_list)
- self._cache_uuid_to_fq_name = {}
- self._obj_uuid_cf = self._cf_dict[self._OBJ_UUID_CF_NAME]
- self._obj_fq_name_cf = self._cf_dict[self._OBJ_FQ_NAME_CF_NAME]
- # end __init__
-
- def _update_sandesh_status(self, status, msg=''):
- ConnectionState.update(conn_type=ConnectionType.DATABASE,
- name='Cassandra', status=status, message=msg,
- server_addrs=self._server_list)
-
- def _handle_exceptions(self, func):
- def wrapper(*args, **kwargs):
- try:
- if self._conn_state != ConnectionStatus.UP:
- # will set conn_state to UP if successful
- self._cassandra_init_conn_pools()
-
- return func(*args, **kwargs)
- except AllServersUnavailable:
- if self._conn_state != ConnectionStatus.DOWN:
- self._update_sandesh_status(ConnectionStatus.DOWN)
- msg = 'Cassandra connection down. Exception in %s' \
- %(str(func))
- self._logger(msg, level=SandeshLevel.SYS_ERR)
-
- self._conn_state = ConnectionStatus.DOWN
- raise
-
- return wrapper
- # end _handle_exceptions
-
- # Helper routines for cassandra
- def _cassandra_init(self, server_list):
- # 1. Ensure keyspace and schema/CFs exist
- # 2. Read in persisted data and publish to ifmap server
-
- self._update_sandesh_status(ConnectionStatus.INIT)
-
- ColumnFamily.get = self._handle_exceptions(ColumnFamily.get)
- ColumnFamily.multiget = self._handle_exceptions(ColumnFamily.multiget)
- ColumnFamily.xget = self._handle_exceptions(ColumnFamily.xget)
- ColumnFamily.get_range = self._handle_exceptions(ColumnFamily.get_range)
- ColumnFamily.insert = self._handle_exceptions(ColumnFamily.insert)
- ColumnFamily.remove = self._handle_exceptions(ColumnFamily.remove)
- Mutator.send = self._handle_exceptions(Mutator.send)
-
- for ks,cf_list in self._keyspaces.items():
- keyspace = '%s%s' %(self._db_prefix, ks)
- self._cassandra_ensure_keyspace(server_list, keyspace, cf_list)
-
- self._cassandra_init_conn_pools()
- # end _cassandra_init
-
- def _cassandra_ensure_keyspace(self, server_list,
- keyspace_name, cf_info_list):
- # Retry till cassandra is up
- server_idx = 0
- num_dbnodes = len(self._server_list)
- connected = False
- while not connected:
- try:
- cass_server = self._server_list[server_idx]
- sys_mgr = SystemManager(cass_server)
- connected = True
- except Exception as e:
- # TODO do only for
- # thrift.transport.TTransport.TTransportException
- server_idx = (server_idx + 1) % num_dbnodes
- time.sleep(3)
-
- if self._reset_config:
- try:
- sys_mgr.drop_keyspace(keyspace_name)
- except pycassa.cassandra.ttypes.InvalidRequestException as e:
- # TODO verify only EEXISTS
- self._logger("Warning! " + str(e), level=SandeshLevel.SYS_WARN)
-
- try:
- sys_mgr.create_keyspace(keyspace_name, SIMPLE_STRATEGY,
- {'replication_factor': str(num_dbnodes)})
- except pycassa.cassandra.ttypes.InvalidRequestException as e:
- # TODO verify only EEXISTS
- self._logger("Warning! " + str(e), level=SandeshLevel.SYS_WARN)
-
- gc_grace_sec = 0
- if num_dbnodes > 1:
- gc_grace_sec = 60
-
- for cf_info in cf_info_list:
- try:
- (cf_name, comparator_type) = cf_info
- if comparator_type:
- sys_mgr.create_column_family(
- keyspace_name, cf_name,
- comparator_type=comparator_type,
- gc_grace_seconds=gc_grace_sec,
- default_validation_class='UTF8Type')
- else:
- sys_mgr.create_column_family(keyspace_name, cf_name,
- gc_grace_seconds=gc_grace_sec,
- default_validation_class='UTF8Type')
- except pycassa.cassandra.ttypes.InvalidRequestException as e:
- # TODO verify only EEXISTS
- self._logger("Warning! " + str(e), level=SandeshLevel.SYS_WARN)
- sys_mgr.alter_column_family(keyspace_name, cf_name,
- gc_grace_seconds=gc_grace_sec,
- default_validation_class='UTF8Type')
- # end _cassandra_ensure_keyspace
-
- def _cassandra_init_conn_pools(self):
- for ks,cf_list in self._keyspaces.items():
- pool = pycassa.ConnectionPool(
- ks, self._server_list, max_overflow=-1, use_threadlocal=True,
- prefill=True, pool_size=20, pool_timeout=120,
- max_retries=-1, timeout=5)
-
- rd_consistency = pycassa.cassandra.ttypes.ConsistencyLevel.QUORUM
- wr_consistency = pycassa.cassandra.ttypes.ConsistencyLevel.QUORUM
-
- for (cf, _) in cf_list:
- self._cf_dict[cf] = ColumnFamily(
- pool, cf, read_consistency_level = rd_consistency,
- write_consistency_level = wr_consistency)
-
- ConnectionState.update(conn_type = ConnectionType.DATABASE,
- name = 'Cassandra', status = ConnectionStatus.UP, message = '',
- server_addrs = self._server_list)
- self._conn_state = ConnectionStatus.UP
- msg = 'Cassandra connection ESTABLISHED'
- self._logger(msg, level=SandeshLevel.SYS_NOTICE)
- # end _cassandra_init_conn_pools
-
- def cache_uuid_to_fq_name_add(self, id, fq_name, obj_type):
- self._cache_uuid_to_fq_name[id] = (fq_name, obj_type)
- # end cache_uuid_to_fq_name_add
-
- def cache_uuid_to_fq_name_del(self, id):
- try:
- del self._cache_uuid_to_fq_name[id]
- except KeyError:
- pass
- # end cache_uuid_to_fq_name_del
-
- def uuid_to_fq_name(self, id):
- try:
- return self._cache_uuid_to_fq_name[id][0]
- except KeyError:
- try:
- obj = self._obj_uuid_cf.get(id, columns=['fq_name', 'type'])
- except pycassa.NotFoundException:
- raise NoIdError(id)
-
- fq_name = json.loads(obj['fq_name'])
- obj_type = json.loads(obj['type'])
- self.cache_uuid_to_fq_name_add(id, fq_name, obj_type)
- return fq_name
- # end uuid_to_fq_name
-
- def uuid_to_obj_type(self, id):
- try:
- return self._cache_uuid_to_fq_name[id][1]
- except KeyError:
- try:
- obj = self._obj_uuid_cf.get(id, columns=['fq_name', 'type'])
- except pycassa.NotFoundException:
- raise NoIdError(id)
-
- fq_name = json.loads(obj['fq_name'])
- obj_type = json.loads(obj['type'])
- self.cache_uuid_to_fq_name_add(id, fq_name, obj_type)
- return obj_type
- # end uuid_to_obj_type
-
-
- def fq_name_to_uuid(self, obj_type, fq_name):
- method_name = obj_type.replace('-', '_')
- fq_name_str = ':'.join(fq_name)
- col_start = '%s:' % (utils.encode_string(fq_name_str))
- col_fin = '%s;' % (utils.encode_string(fq_name_str))
- try:
- col_info_iter = self._obj_fq_name_cf.xget(
- method_name, column_start=col_start, column_finish=col_fin)
- except pycassa.NotFoundException:
- raise NoIdError('%s %s' % (obj_type, fq_name))
-
- col_infos = list(col_info_iter)
-
- if len(col_infos) == 0:
- raise NoIdError('%s %s' % (obj_type, fq_name))
-
- for (col_name, col_val) in col_infos:
- obj_uuid = col_name.split(':')[-1]
-
- return obj_uuid
- # end fq_name_to_uuid
-
- def _read_child(self, result, obj_uuid, child_type,
- child_uuid, child_tstamp):
- if '%ss' % (child_type) not in result:
- result['%ss' % (child_type)] = []
-
- child_info = {}
- child_info['to'] = self.uuid_to_fq_name(child_uuid)
- child_info['href'] = self._generate_url(child_type, child_uuid)
- child_info['uuid'] = child_uuid
- child_info['tstamp'] = child_tstamp
-
- result['%ss' % (child_type)].append(child_info)
- # end _read_child
-
- def _read_ref(self, result, obj_uuid, ref_type, ref_uuid, ref_data_json):
- if '%s_refs' % (ref_type) not in result:
- result['%s_refs' % (ref_type)] = []
-
- ref_data = json.loads(ref_data_json)
- ref_info = {}
- try:
- ref_info['to'] = self.uuid_to_fq_name(ref_uuid)
- except NoIdError as e:
- ref_info['to'] = ['ERROR']
-
- if ref_data:
- try:
- ref_info['attr'] = ref_data['attr']
- except KeyError:
- # TODO remove backward compat old format had attr directly
- ref_info['attr'] = ref_data
-
- ref_info['href'] = self._generate_url(ref_type, ref_uuid)
- ref_info['uuid'] = ref_uuid
-
- result['%s_refs' % (ref_type)].append(ref_info)
- # end _read_ref
-
- def _read_back_ref(self, result, obj_uuid, back_ref_type,
- back_ref_uuid, back_ref_data_json):
- if '%s_back_refs' % (back_ref_type) not in result:
- result['%s_back_refs' % (back_ref_type)] = []
-
- back_ref_info = {}
- back_ref_info['to'] = self.uuid_to_fq_name(back_ref_uuid)
- back_ref_data = json.loads(back_ref_data_json)
- if back_ref_data:
- try:
- back_ref_info['attr'] = back_ref_data['attr']
- except KeyError:
- # TODO remove backward compat old format had attr directly
- back_ref_info['attr'] = back_ref_data
-
- back_ref_info['href'] = self._generate_url(back_ref_type, back_ref_uuid)
- back_ref_info['uuid'] = back_ref_uuid
-
- result['%s_back_refs' % (back_ref_type)].append(back_ref_info)
- # end _read_back_ref
-
-
diff --git a/Testcases/cfgm_common/vnc_cassandra.pyc b/Testcases/cfgm_common/vnc_cassandra.pyc
deleted file mode 100644
index 4c8cac5..0000000
--- a/Testcases/cfgm_common/vnc_cassandra.pyc
+++ /dev/null
Binary files differ
diff --git a/Testcases/cfgm_common/vnc_cpu_info.py b/Testcases/cfgm_common/vnc_cpu_info.py
deleted file mode 100644
index d929534..0000000
--- a/Testcases/cfgm_common/vnc_cpu_info.py
+++ /dev/null
@@ -1,196 +0,0 @@
-#
-# Copyright (c) 2013 Juniper Networks, Inc. All rights reserved.
-#
-
-"""
-.. attention:: Fix the license string
-"""
-import os
-import socket
-import psutil
-import gevent
-from uve.cfgm_cpuinfo.ttypes import *
-from uve.cfgm_cpuinfo.cpuinfo.ttypes import *
-from buildinfo import build_info
-from sandesh_common.vns.ttypes import Module
-from sandesh_common.vns.constants import ModuleNames
-
-# CpuInfo object for config-node
-
-
-class CpuInfo(object):
-
- def __init__(self, module_id, instance_id, sysinfo_req, sandesh,
- time_interval, server_ip=None):
- # store cpuinfo at init
- self._module_id = module_id
- self._instance_id = instance_id
- self._sysinfo = sysinfo_req
- self._sandesh = sandesh
- self._time_interval = time_interval
- self._rss = 0
- self._vms = 0
- self._pvms = 0
- self._load_avg = (0, 0, 0)
- self._phymem_usage = (0, 0, 0, 0)
- self._phymem_buffers = 0
- self._num_cpus = 0
- self._cpu_share = 0
- self._curr_build_info = None
- self._new_build_info = None
- self._curr_ip = server_ip
- self._new_ip = None
-
- # spawn a Greenlet object to do periodic collect and send.
- gevent.spawn(self.cpu_stats)
- # end __init__
-
- def get_config_node_ip(self):
- return self._curr_ip
- # end __get_config_ndoe_ip
-
- def set_config_node_ip(self, server_ip):
- self._curr_ip = server_ip
- # end __set_config_ndoe_ip
-
- def cpu_stats(self):
- cfg_process = psutil.Process(os.getpid())
- while True:
- # collect Vmsizes
- self._ip_change = 0
- self._build_change = 0
- rss = cfg_process.get_memory_info().rss
- if (self._rss != rss):
- self._rss = rss
-
- vms = cfg_process.get_memory_info().vms
- if (self._vms != vms):
- self._vms = vms
-
- pvms = vms
- if (pvms > self._pvms):
- self._pvms = pvms
-
- if self._sysinfo:
- # collect CPU Load avg
- load_avg = os.getloadavg()
- if (load_avg != self._load_avg):
- self._load_avg = load_avg
-
- # collect systemmeory info
- phymem_usage = psutil.phymem_usage()
- if (phymem_usage != self._phymem_usage):
- self._phymem_usage = phymem_usage
-
- phymem_buffers = psutil.phymem_buffers()
- if (phymem_buffers != self._phymem_buffers):
- self._phymem_buffers = phymem_buffers
-
- if (self._new_ip != self._curr_ip):
- self._new_ip = self.get_config_node_ip()
- self._ip_change = 1
-
- # Retrieve build_info from package/rpm and cache it
- if self._curr_build_info is None:
- command = "contrail-version contrail-config | grep 'contrail-config'"
- version = os.popen(command).read()
- _, rpm_version, build_num = version.split()
- self._new_build_info = build_info + '"build-id" : "' + \
- rpm_version + '", "build-number" : "' + \
- build_num + '"}]}'
- if (self._new_build_info != self._curr_build_info):
- self._curr_build_info = self._new_build_info
- self._build_change = 1
-
- num_cpus = psutil.NUM_CPUS
- if (num_cpus != self._num_cpus):
- self._num_cpus = num_cpus
-
- cpu_percent = cfg_process.get_cpu_percent(interval=0.1)
- cpu_share = cpu_percent / num_cpus
- self._cpu_share = cpu_share
-
- self._send_cpustats()
-
- gevent.sleep(self._time_interval)
- # end cpu_stats
-
- # Send Uve Object
- def _send_cpustats(self):
- mod_cpu = ModuleCpuInfo()
- mod_cpu.module_id = self._module_id
- mod_cpu.instance_id = self._instance_id
-
- mod_cpu.cpu_info = CpuLoadInfo()
-
- # populate number of available CPU
- mod_cpu.cpu_info.num_cpu = self._num_cpus
-
- if self._sysinfo:
- # populate system memory details
- mod_cpu.cpu_info.sys_mem_info = SysMemInfo()
- mod_cpu.cpu_info.sys_mem_info.total = self._phymem_usage[0] / 1024
- mod_cpu.cpu_info.sys_mem_info.used = self._phymem_usage[1] / 1024
- mod_cpu.cpu_info.sys_mem_info.free = self._phymem_usage[2] / 1024
- mod_cpu.cpu_info.sys_mem_info.buffers = self._phymem_buffers / 1024
-
- # populate CPU Load avg
- mod_cpu.cpu_info.cpuload = CpuLoadAvg()
- mod_cpu.cpu_info.cpuload.one_min_avg = self._load_avg[0]
- mod_cpu.cpu_info.cpuload.five_min_avg = self._load_avg[1]
- mod_cpu.cpu_info.cpuload.fifteen_min_avg = self._load_avg[2]
-
- # populate Virtual Memory details
- mod_cpu.cpu_info.meminfo = MemInfo()
- mod_cpu.cpu_info.meminfo.virt = self._vms / 1024
- mod_cpu.cpu_info.meminfo.peakvirt = self._pvms / 1024
- mod_cpu.cpu_info.meminfo.res = self._rss / 1024
-
- # populate cpu_share, which is calibrated with num_cpu
- mod_cpu.cpu_info.cpu_share = self._cpu_share
-
- cpu_load_info_list = [mod_cpu]
-
- cfgm_cpu_uve = ModuleCpuState(module_cpu_info=cpu_load_info_list)
- cfgm_cpu_uve.name = socket.gethostname()
- if self._sysinfo:
- if self._ip_change:
- cfgm_cpu_uve.config_node_ip = self._new_ip
- if self._build_change:
- cfgm_cpu_uve.build_info = self._curr_build_info
-
- if (self._module_id == ModuleNames[Module.API_SERVER]):
- cfgm_cpu_uve.api_server_mem_virt = mod_cpu.cpu_info.meminfo.virt
- cfgm_cpu_uve.api_server_cpu_share = self._cpu_share
-
- if (self._module_id == ModuleNames[Module.SCHEMA_TRANSFORMER]):
- cfgm_cpu_uve.schema_xmer_mem_virt = mod_cpu.cpu_info.meminfo.virt
- cfgm_cpu_uve.schema_xmer_cpu_share = self._cpu_share
-
- if (self._module_id == ModuleNames[Module.SVC_MONITOR]):
- cfgm_cpu_uve.service_monitor_mem_virt =\
- mod_cpu.cpu_info.meminfo.virt
- cfgm_cpu_uve.service_monitor_cpu_share = self._cpu_share
-
- cpu_info_trace = ModuleCpuStateTrace(
- data=cfgm_cpu_uve, sandesh=self._sandesh)
- cpu_info_trace.send(sandesh=self._sandesh)
-
- cnf_cpu_state = ConfigCpuState()
- cnf_cpu_state.name = socket.gethostname()
-
- cnf_cpu_info = ProcessCpuInfo()
- cnf_cpu_info.module_id = self._module_id
- cnf_cpu_info.inst_id = self._instance_id
- cnf_cpu_info.cpu_share = self._cpu_share
- cnf_cpu_info.mem_virt = mod_cpu.cpu_info.meminfo.virt
- cnf_cpu_info.mem_res = mod_cpu.cpu_info.meminfo.res
- cnf_cpu_state.cpu_info = [cnf_cpu_info]
-
- cnf_cpu_state_trace = ConfigCpuStateTrace(
- sandesh=self._sandesh, data=cnf_cpu_state)
- cnf_cpu_state_trace.send(sandesh=self._sandesh)
-
- # end _send_cpustats
-
-# end class CpuInfo
diff --git a/Testcases/cfgm_common/vnc_cpu_info.pyc b/Testcases/cfgm_common/vnc_cpu_info.pyc
deleted file mode 100644
index 8a122c9..0000000
--- a/Testcases/cfgm_common/vnc_cpu_info.pyc
+++ /dev/null
Binary files differ
diff --git a/Testcases/cfgm_common/vnc_db.py b/Testcases/cfgm_common/vnc_db.py
deleted file mode 100644
index fae990d..0000000
--- a/Testcases/cfgm_common/vnc_db.py
+++ /dev/null
@@ -1,205 +0,0 @@
-#
-# Copyright (c) 2014 Juniper Networks, Inc. All rights reserved.
-#
-
-"""
-This file contains implementation of database model for contrail config daemons
-"""
-from vnc_api.common.exceptions import NoIdError
-
-class DBBase(object):
- # This is the base class for all DB objects. All derived objects must
- # have a class member called _dict of dictionary type.
- # The init method of this class must be callled before using any functions
-
- _logger = None
- _cassandra = None
- _manager = None
-
- @classmethod
- def init(cls, manager, logger, cassandra):
- cls._logger = logger
- cls._cassandra = cassandra
- cls._manager = manager
- # end init
-
- class __metaclass__(type):
-
- def __iter__(cls):
- for i in cls._dict:
- yield i
- # end __iter__
-
- def values(cls):
- for i in cls._dict.values():
- yield i
- # end values
-
- def items(cls):
- for i in cls._dict.items():
- yield i
- # end items
- # end __metaclass__
-
- @classmethod
- def get(cls, key):
- if key in cls._dict:
- return cls._dict[key]
- return None
- # end get
-
- @classmethod
- def locate(cls, key, *args):
- if key not in cls._dict:
- try:
- cls._dict[key] = cls(key, *args)
- except NoIdError as e:
- cls._logger.debug(
- "Exception %s while creating %s for %s",
- e, cls.__name__, key)
- return None
- return cls._dict[key]
- # end locate
-
- @classmethod
- def delete(cls, key):
- if key in cls._dict:
- del cls._dict[key]
- # end delete
-
- def get_ref_uuid_from_dict(self, obj_dict, ref_name):
- if ref_name in obj_dict:
- return obj_dict[ref_name][0]['uuid']
- else:
- return None
-
- def add_ref(self, ref_type, ref):
- if hasattr(self, ref_type):
- setattr(self, ref_type, ref)
- elif hasattr(self, ref_type+'s'):
- ref_set = getattr(self, ref_type+'s')
- ref_set.add(ref)
- # end add_ref
-
- def delete_ref(self, ref_type, ref):
- if hasattr(self, ref_type) and getattr(self, ref_type) == ref:
- setattr(self, ref_type, None)
- elif hasattr(self, ref_type+'s'):
- ref_set = getattr(self, ref_type+'s')
- ref_set.discard(ref)
- # end delete_ref
-
- def add_to_parent(self, obj_dict):
- self.parent_type = obj_dict.get('parent_type')
- self.parent_id = obj_dict.get('parent_uuid')
- if not self.parent_type or not self.parent_id:
- return
- p_obj = self._OBJ_TYPE_MAP[self.parent_type].get(self.parent_id)
- if p_obj is not None:
- p_obj.add_ref(self.obj_type, self.uuid)
- # end
-
- def remove_from_parent(self):
- if not self.parent_type or not self.parent_id:
- return
- p_obj = self._OBJ_TYPE_MAP[self.parent_type].get(self.parent_id)
- if p_obj is not None:
- p_obj.delete_ref(self.obj_type, self.uuid)
-
- def update_single_ref(self, ref_type, obj):
- refs = obj.get(ref_type+'_refs') or obj.get(ref_type+'_back_refs')
- if refs:
- try:
- new_id = refs[0]['uuid']
- except KeyError:
- fq_name = refs[0]['to']
- new_id = self._cassandra.fq_name_to_uuid(ref_type, fq_name)
- else:
- new_id = None
- old_id = getattr(self, ref_type, None)
- if old_id == new_id:
- return
- ref_obj = self._OBJ_TYPE_MAP[ref_type].get(old_id)
- if ref_obj is not None:
- ref_obj.delete_ref(self.obj_type, self.uuid)
- ref_obj = self._OBJ_TYPE_MAP[ref_type].get(new_id)
- if ref_obj is not None:
- ref_obj.add_ref(self.obj_type, self.uuid)
- setattr(self, ref_type, new_id)
- # end update_single_ref
-
- def set_children(self, ref_type, obj):
- refs = obj.get(ref_type+'s')
- new_refs = set()
- for ref in refs or []:
- try:
- new_id = ref['uuid']
- except KeyError:
- fq_name = ref['to']
- new_id = self._cassandra.fq_name_to_uuid(ref_type, fq_name)
- new_refs.add(new_id)
- setattr(self, ref_type+'s', new_refs)
- # end
-
- def update_multiple_refs(self, ref_type, obj):
- refs = obj.get(ref_type+'_refs') or obj.get(ref_type+'_back_refs')
- new_refs = set()
- for ref in refs or []:
- try:
- new_id = ref['uuid']
- except KeyError:
- fq_name = ref['to']
- new_id = self._cassandra.fq_name_to_uuid(ref_type, fq_name)
- new_refs.add(new_id)
- old_refs = getattr(self, ref_type+'s')
- for ref_id in old_refs - new_refs:
- ref_obj = self._OBJ_TYPE_MAP[ref_type].get(ref_id)
- if ref_obj is not None:
- ref_obj.delete_ref(self.obj_type, self.uuid)
- for ref_id in new_refs - old_refs:
- ref_obj = self._OBJ_TYPE_MAP[ref_type].get(ref_id)
- if ref_obj is not None:
- ref_obj.add_ref(self.obj_type, self.uuid)
- setattr(self, ref_type+'s', new_refs)
- # end update_multiple_refs
-
- def read_obj(self, uuid, obj_type=None):
- method_name = "_cassandra_%s_read" % (obj_type or self.obj_type)
- method = getattr(self._cassandra, method_name)
- ok, objs = method([uuid])
- if not ok:
- self._logger.error(
- 'Cannot read %s %s, error %s' % (obj_type, uuid, objs))
- raise NoIdError('')
- return objs[0]
- # end read_obj
-
- def get_parent_uuid(self, obj):
- if 'parent_uuid' in obj:
- return obj['parent_uuid']
- else:
- parent_type = obj['parent_type'].replace('-', '_')
- parent_fq_name = obj['fq_name'][:-1]
- return self._cassandra.fq_name_to_uuid(parent_type, parent_fq_name)
- # end get_parent_uuid
-
- @classmethod
- def find_by_name_or_uuid(cls, name_or_uuid):
- obj = cls.get(name_or_uuid)
- if obj:
- return obj
-
- for obj in cls.values():
- if obj.name == name_or_uuid:
- return obj
- return None
- # end find_by_name_or_uuid
-
- @classmethod
- def reset(cls):
- cls._dict = {}
-# end class DBBase
-
-DBBase._OBJ_TYPE_MAP = {
-}
-
diff --git a/Testcases/cfgm_common/vnc_db.pyc b/Testcases/cfgm_common/vnc_db.pyc
deleted file mode 100644
index af6d0d6..0000000
--- a/Testcases/cfgm_common/vnc_db.pyc
+++ /dev/null
Binary files differ
diff --git a/Testcases/cfgm_common/vnc_extensions.py b/Testcases/cfgm_common/vnc_extensions.py
deleted file mode 100644
index cebbe4d..0000000
--- a/Testcases/cfgm_common/vnc_extensions.py
+++ /dev/null
@@ -1,65 +0,0 @@
-#
-# Copyright (c) 2013 Juniper Networks, Inc. All rights reserved.
-#
-
-import functools
-
-import stevedore
-
-
-class ApiHookManager(stevedore.hook.HookManager):
- def __init__(self, namespace, hook_name):
- super(ApiHookManager, self).__init__(namespace, hook_name,
- invoke_on_load=True)
- #end __init__
-
- def run_pre(self, hook_name, args, kwargs):
- for e in self.extensions:
- obj = e.obj
- pre = getattr(obj, 'pre', None)
- if pre:
- pre(*args, **kwargs)
- #end run_pre
-
- def run_post(self, hook_name, rv, args, kwargs):
- for e in reversed(self.extensions):
- obj = e.obj
- post = getattr(obj, 'post', None)
- if post:
- post(rv, *args, **kwargs)
- #end run_post
-#end class ApiHookManager
-
-
-def add_api_hook(hook_manager, hook_name):
- def outer(f):
- @functools.wraps(f)
- def inner(*args, **kwargs):
- hook_manager.run_pre(hook_name, args, kwargs)
- rv = f(*args, **kwargs)
- hook_manager.run_post(hook_name, rv, args, kwargs)
-
- return rv
-
- return inner
- #end inner
- #end outer
-
- return outer
-#end add_api_hook
-
-
-class ExtensionManager(stevedore.extension.ExtensionManager):
- def __init__(self, namespace, api_server_ip,
- api_server_port, conf_sections, sandesh,
- propagate_map_exceptions=False):
- super(ExtensionManager, self).__init__(
- namespace, invoke_on_load=True,
- invoke_kwds={'api_server_ip': api_server_ip,
- 'api_server_port': api_server_port,
- 'conf_sections': conf_sections,
- 'sandesh': sandesh },
- propagate_map_exceptions=propagate_map_exceptions)
- #end __init__
-
-#end class ExtensionManager
diff --git a/Testcases/cfgm_common/vnc_extensions.pyc b/Testcases/cfgm_common/vnc_extensions.pyc
deleted file mode 100644
index 68318a9..0000000
--- a/Testcases/cfgm_common/vnc_extensions.pyc
+++ /dev/null
Binary files differ
diff --git a/Testcases/cfgm_common/vnc_kombu.py b/Testcases/cfgm_common/vnc_kombu.py
deleted file mode 100644
index 0f00865..0000000
--- a/Testcases/cfgm_common/vnc_kombu.py
+++ /dev/null
@@ -1,226 +0,0 @@
-#
-# Copyright (c) 2014 Juniper Networks, Inc. All rights reserved.
-#
-import re
-import amqp.exceptions
-import kombu
-import gevent
-import gevent.monkey
-gevent.monkey.patch_all()
-import time
-from gevent.queue import Queue
-try:
- from gevent.lock import Semaphore
-except ImportError:
- # older versions of gevent
- from gevent.coros import Semaphore
-
-from pysandesh.connection_info import ConnectionState
-from pysandesh.gen_py.process_info.ttypes import ConnectionStatus, \
- ConnectionType
-from pysandesh.gen_py.sandesh.ttypes import SandeshLevel
-
-__all__ = "VncKombuClient"
-
-
-class VncKombuClientBase(object):
- def _update_sandesh_status(self, status, msg=''):
- ConnectionState.update(conn_type=ConnectionType.DATABASE,
- name='RabbitMQ', status=status, message=msg,
- server_addrs=["%s:%s" % (self._rabbit_ip, self._rabbit_port)])
- # end _update_sandesh_status
-
- def publish(self, message):
- self._publish_queue.put(message)
- # end publish
-
- def __init__(self, rabbit_ip, rabbit_port, rabbit_user, rabbit_password,
- rabbit_vhost, rabbit_ha_mode, q_name, subscribe_cb, logger):
- self._rabbit_ip = rabbit_ip
- self._rabbit_port = rabbit_port
- self._rabbit_user = rabbit_user
- self._rabbit_password = rabbit_password
- self._rabbit_vhost = rabbit_vhost
- self._subscribe_cb = subscribe_cb
- self._logger = logger
- self._publish_queue = Queue()
- self._conn_lock = Semaphore()
-
- self.obj_upd_exchange = kombu.Exchange('vnc_config.object-update', 'fanout',
- durable=False)
-
- def num_pending_messages(self):
- return self._publish_queue.qsize()
- # end num_pending_messages
-
- def prepare_to_consume(self):
- # override this method
- return
-
- def _reconnect(self, delete_old_q=False):
- if self._conn_lock.locked():
- # either connection-monitor or publisher should have taken
- # the lock. The one who acquired the lock would re-establish
- # the connection and releases the lock, so the other one can
- # just wait on the lock, till it gets released
- self._conn_lock.wait()
- return
-
- self._conn_lock.acquire()
-
- msg = "RabbitMQ connection down"
- self._logger(msg, level=SandeshLevel.SYS_ERR)
- self._update_sandesh_status(ConnectionStatus.DOWN)
- self._conn_state = ConnectionStatus.DOWN
-
- self._conn.close()
-
- self._conn.ensure_connection()
- self._conn.connect()
-
- self._update_sandesh_status(ConnectionStatus.UP)
- self._conn_state = ConnectionStatus.UP
- msg = 'RabbitMQ connection ESTABLISHED %s' % repr(self._conn)
- self._logger(msg, level=SandeshLevel.SYS_NOTICE)
-
- self._channel = self._conn.channel()
- if delete_old_q:
- # delete the old queue in first-connect context
- # as db-resync would have caught up with history.
- try:
- bound_q = self._update_queue_obj(self._channel)
- bound_q.delete()
- except Exception as e:
- msg = 'Unable to delete the old ampq queue: %s' %(str(e))
- self._logger(msg, level=SandeshLevel.SYS_ERR)
-
- self._consumer = kombu.Consumer(self._channel,
- queues=self._update_queue_obj,
- callbacks=[self._subscribe])
- self._producer = kombu.Producer(self._channel, exchange=self.obj_upd_exchange)
-
- self._conn_lock.release()
- # end _reconnect
-
- def _connection_watch(self):
- self.prepare_to_consume()
- while True:
- try:
- self._consumer.consume()
- self._conn.drain_events()
- except self._conn.connection_errors + self._conn.channel_errors as e:
- self._reconnect()
- # end _connection_watch
-
- def _publisher(self):
- message = None
- while True:
- try:
- if not message:
- # earlier was sent fine, dequeue one more
- message = self._publish_queue.get()
-
- while True:
- try:
- self._producer.publish(message)
- message = None
- break
- except self._conn.connection_errors + self._conn.channel_errors as e:
- self._reconnect()
- except Exception as e:
- log_str = "Unknown exception in _publisher greenlet" + str(e)
- self._logger(log_str, level=SandeshLevel.SYS_ERR)
- # end _publisher
-
- def _subscribe(self, body, message):
- try:
- self._subscribe_cb(body)
- finally:
- message.ack()
-
-
- def _start(self):
- self._reconnect(delete_old_q=True)
-
- self._publisher_greenlet = gevent.spawn(self._publisher)
- self._connection_monitor_greenlet = gevent.spawn(self._connection_watch)
-
- def shutdown(self):
- self._publisher_greenlet.kill()
- self._connection_monitor_greenlet.kill()
- self._producer.close()
- self._consumer.close()
- self._conn.close()
-
-
-class VncKombuClientV1(VncKombuClientBase):
- def __init__(self, rabbit_ip, rabbit_port, rabbit_user, rabbit_password,
- rabbit_vhost, rabbit_ha_mode, q_name, subscribe_cb, logger):
- super(VncKombuClientV1, self).__init__(rabbit_ip, rabbit_port,
- rabbit_user, rabbit_password,
- rabbit_vhost, rabbit_ha_mode,
- q_name, subscribe_cb, logger)
-
- self._conn = kombu.Connection(hostname=self._rabbit_ip,
- port=self._rabbit_port,
- userid=self._rabbit_user,
- password=self._rabbit_password,
- virtual_host=self._rabbit_vhost)
- self._update_queue_obj = kombu.Queue(q_name, self.obj_upd_exchange, durable=False)
- self._start()
- # end __init__
-
-
-class VncKombuClientV2(VncKombuClientBase):
- def _parse_rabbit_hosts(self, rabbit_hosts):
- server_list = rabbit_hosts.split(",")
-
- default_dict = {'user': self._rabbit_user,
- 'password': self._rabbit_password,
- 'port': self._rabbit_port}
- ret = []
- for s in server_list:
- match = re.match("(?:(?P<user>.*?)(?::(?P<password>.*?))*@)*(?P<host>.*?)(?::(?P<port>\d+))*$", s)
- if match:
- mdict = match.groupdict().copy()
- for key in ['user', 'password', 'port']:
- if not mdict[key]:
- mdict[key] = default_dict[key]
-
- ret.append(mdict)
-
- return ret
-
- def __init__(self, rabbit_hosts, rabbit_port, rabbit_user, rabbit_password,
- rabbit_vhost, rabbit_ha_mode, q_name, subscribe_cb, logger):
- super(VncKombuClientV2, self).__init__(rabbit_hosts, rabbit_port,
- rabbit_user, rabbit_password,
- rabbit_vhost, rabbit_ha_mode,
- q_name, subscribe_cb, logger)
-
- _hosts = self._parse_rabbit_hosts(rabbit_hosts)
- self._urls = []
- for h in _hosts:
- h['vhost'] = "" if not rabbit_vhost else rabbit_vhost
- _url = "pyamqp://%(user)s:%(password)s@%(host)s:%(port)s/%(vhost)s/" % h
- self._urls.append(_url)
-
- msg = "Initializing RabbitMQ connection, urls %s" % self._urls
- self._logger(msg, level=SandeshLevel.SYS_NOTICE)
- self._update_sandesh_status(ConnectionStatus.INIT)
- self._conn_state = ConnectionStatus.INIT
- self._conn = kombu.Connection(self._urls)
- queue_args = {"x-ha-policy": "all"} if rabbit_ha_mode else None
- self._update_queue_obj = kombu.Queue(q_name, self.obj_upd_exchange,
- durable=False,
- queue_arguments=queue_args)
-
- self._start()
- # end __init__
-
-
-from distutils.version import LooseVersion
-if LooseVersion(kombu.__version__) >= LooseVersion("2.5.0"):
- VncKombuClient = VncKombuClientV2
-else:
- VncKombuClient = VncKombuClientV1
diff --git a/Testcases/cfgm_common/vnc_kombu.pyc b/Testcases/cfgm_common/vnc_kombu.pyc
deleted file mode 100644
index a8c19ed..0000000
--- a/Testcases/cfgm_common/vnc_kombu.pyc
+++ /dev/null
Binary files differ
diff --git a/Testcases/cfgm_common/vnc_plugin_base.py b/Testcases/cfgm_common/vnc_plugin_base.py
deleted file mode 100644
index e1d3517..0000000
--- a/Testcases/cfgm_common/vnc_plugin_base.py
+++ /dev/null
@@ -1,71 +0,0 @@
-#
-# Copyright (c) 2013 Juniper Networks, Inc. All rights reserved.
-#
-
-import abc
-from vnc_api.gen.vnc_api_extension_gen import ResourceApiGen
-
-class Resync(object):
- @abc.abstractmethod
- def __init__(self, api_server_ip, api_server_port, conf_sections):
- pass
- #end __init__
-
- @abc.abstractmethod
- def resync_domains_projects(self):
- """
- Method that implements auditing of projects between orchestration
- system and OpenContrail VNC
- """
- pass
- #end resync_projects
-
-#end class Resync
-
-
-class ResourceApi(ResourceApiGen):
- @abc.abstractmethod
- def __init__(self):
- pass
- #end __init__
-
- @abc.abstractmethod
- def transform_request(self, request):
- pass
- # end transform_request
-
- @abc.abstractmethod
- def validate_request(self, request):
- pass
- # end validate_request
-
- @abc.abstractmethod
- def transform_response(self, request, response):
- pass
- # end transform_response
-
-
-class NeutronApi(object):
- @abc.abstractmethod
- def __init__(self):
- pass
- #end __init__
-
-
-class AuthBase(object):
- __metaclass__ = abc.ABCMeta
-
- @abc.abstractmethod
- def __init__(self, auth_method, auth_opts):
- pass
- #end __init__
-
- @abc.abstractmethod
- def get_request_auth_app(self):
- """
- Middleware to invoke for authentication on every request
- """
- pass
- #end get_request_auth_app
-
-#end class AuthBase
diff --git a/Testcases/cfgm_common/vnc_plugin_base.pyc b/Testcases/cfgm_common/vnc_plugin_base.pyc
deleted file mode 100644
index 598918a..0000000
--- a/Testcases/cfgm_common/vnc_plugin_base.pyc
+++ /dev/null
Binary files differ
diff --git a/Testcases/cfgm_common/zkclient.py b/Testcases/cfgm_common/zkclient.py
deleted file mode 100644
index 5c8d461..0000000
--- a/Testcases/cfgm_common/zkclient.py
+++ /dev/null
@@ -1,358 +0,0 @@
-#
-# Copyright (c) 2013 Juniper Networks, Inc. All rights reserved.
-#
-import os
-import gevent
-import logging
-import kazoo.client
-import kazoo.exceptions
-import kazoo.handlers.gevent
-import kazoo.recipe.election
-from kazoo.client import KazooState
-from kazoo.retry import KazooRetry
-
-from bitarray import bitarray
-from cfgm_common.exceptions import ResourceExhaustionError, ResourceExistsError
-from gevent.coros import BoundedSemaphore
-
-import uuid
-
-LOG_DIR = '/var/log/contrail/'
-
-class IndexAllocator(object):
-
- def __init__(self, zookeeper_client, path, size=0, start_idx=0,
- reverse=False,alloc_list=None, max_alloc=0):
- if alloc_list is None:
- self._alloc_list = [{'start':start_idx, 'end':start_idx+size}]
- else:
- sorted_alloc_list = sorted(alloc_list, key=lambda k: k['start'])
- self._alloc_list = sorted_alloc_list
-
- alloc_count = len(self._alloc_list)
- total_size = 0
- start_idx = self._alloc_list[0]['start']
- size = 0
-
- #check for overlap in alloc_list --TODO
- for alloc_idx in range (0, alloc_count -1):
- idx_start_addr = self._alloc_list[alloc_idx]['start']
- idx_end_addr = self._alloc_list[alloc_idx]['end']
- next_start_addr = self._alloc_list[alloc_idx+1]['start']
- if next_start_addr <= idx_end_addr:
- raise Exception()
- size += idx_end_addr - idx_start_addr + 1
- size += self._alloc_list[alloc_count-1]['end'] - self._alloc_list[alloc_count-1]['start'] + 1
-
- self._size = size
- self._start_idx = start_idx
- if max_alloc == 0:
- self._max_alloc = self._size
- else:
- self._max_alloc = max_alloc
-
- self._zookeeper_client = zookeeper_client
- self._path = path
- self._in_use = bitarray('0')
- self._reverse = reverse
- for idx in self._zookeeper_client.get_children(path):
- idx_int = self._get_bit_from_zk_index(int(idx))
- if idx_int >= 0:
- self._set_in_use(idx_int)
- # end for idx
- # end __init__
-
- def _get_zk_index_from_bit(self, idx):
- size = idx
- if self._reverse:
- for alloc in reversed(self._alloc_list):
- size -= alloc['end'] - alloc['start'] + 1
- if size < 0:
- return alloc['start']-size - 1
- else:
- for alloc in self._alloc_list:
- size -= alloc['end'] - alloc['start'] + 1
- if size < 0:
- return alloc['end']+size + 1
-
- raise Exception()
- # end _get_zk_index
-
- def _get_bit_from_zk_index(self, idx):
- size = 0
- if self._reverse:
- for alloc in reversed(self._alloc_list):
- if alloc['start'] <= idx <= alloc['end']:
- return alloc['end'] - idx + size
- size += alloc['end'] - alloc['start'] + 1
- pass
- else:
- for alloc in self._alloc_list:
- if alloc['start'] <= idx <= alloc['end']:
- return idx - alloc['start'] + size
- size += alloc['end'] - alloc['start'] + 1
- return -1
- # end _get_bit_from_zk_index
-
- def _set_in_use(self, idx):
- # if the index is higher than _max_alloc, do not use the bitarray, in
- # order to reduce the size of the bitarray. Otherwise, set the bit
- # corresponding to idx to 1 and extend the _in_use bitarray if needed
- if idx > self._max_alloc:
- return
- if idx >= self._in_use.length():
- temp = bitarray(idx - self._in_use.length())
- temp.setall(0)
- temp.append('1')
- self._in_use.extend(temp)
- else:
- self._in_use[idx] = 1
- # end _set_in_use
-
- def alloc(self, value=None):
- if self._in_use.all():
- idx = self._in_use.length()
- if idx > self._max_alloc:
- raise ResourceExhaustionError()
- self._in_use.append(1)
- else:
- idx = self._in_use.index(0)
- self._in_use[idx] = 1
-
- idx = self._get_zk_index_from_bit(idx)
- try:
- # Create a node at path and return its integer value
- id_str = "%(#)010d" % {'#': idx}
- self._zookeeper_client.create_node(self._path + id_str, value)
- return idx
- except ResourceExistsError:
- return self.alloc(value)
- # end alloc
-
- def reserve(self, idx, value=None):
- bit_idx = self._get_bit_from_zk_index(idx)
- if bit_idx < 0:
- return None
- try:
- # Create a node at path and return its integer value
- id_str = "%(#)010d" % {'#': idx}
- self._zookeeper_client.create_node(self._path + id_str, value)
- self._set_in_use(bit_idx)
- return idx
- except ResourceExistsError:
- self._set_in_use(bit_idx)
- return None
- # end reserve
-
- def delete(self, idx):
- id_str = "%(#)010d" % {'#': idx}
- self._zookeeper_client.delete_node(self._path + id_str)
- bit_idx = self._get_bit_from_zk_index(idx)
- if 0 <= bit_idx < self._in_use.length():
- self._in_use[bit_idx] = 0
- # end delete
-
- def read(self, idx):
- id_str = "%(#)010d" % {'#': idx}
- id_val = self._zookeeper_client.read_node(self._path+id_str)
- if id_val is not None:
- bit_idx = self._get_bit_from_zk_index(idx)
- if bit_idx >= 0:
- self._set_in_use(bit_idx)
- return id_val
- # end read
-
- def empty(self):
- return not self._in_use.any()
- # end empty
-
- @classmethod
- def delete_all(cls, zookeeper_client, path):
- try:
- zookeeper_client.delete_node(path, recursive=True)
- except kazoo.exceptions.NotEmptyError:
- #TODO: Add retries for NotEmptyError
- zookeeper_client.syslog("NotEmptyError while deleting %s" % path)
- # end delete_all
-
-#end class IndexAllocator
-
-
-class ZookeeperClient(object):
-
- def __init__(self, module, server_list, logging_fn=None):
- # logging
- logger = logging.getLogger(module)
- logger.setLevel(logging.INFO)
- try:
- handler = logging.handlers.RotatingFileHandler(LOG_DIR + module + '-zk.log', maxBytes=10*1024*1024, backupCount=5)
- except IOError:
- print "Cannot open log file in %s" %(LOG_DIR)
- else:
- log_format = logging.Formatter('%(asctime)s [%(name)s]: %(message)s', datefmt='%m/%d/%Y %I:%M:%S %p')
- handler.setFormatter(log_format)
- logger.addHandler(handler)
-
- if logging_fn:
- self.log = logging_fn
- else:
- self.log = self.syslog
-
- self._zk_client = \
- kazoo.client.KazooClient(
- server_list,
- timeout=400,
- handler=kazoo.handlers.gevent.SequentialGeventHandler(),
- logger=logger)
-
- self._zk_client.add_listener(self._zk_listener)
- self._logger = logger
- self._election = None
- self._server_list = server_list
- # KazooRetry to retry keeper CRUD operations
- self._retry = KazooRetry(max_tries=None, max_delay=300,
- sleep_func=gevent.sleep)
-
- self._conn_state = None
- self._sandesh_connection_info_update(status='INIT', message='')
- self._lost_cb = None
-
- self.connect()
- # end __init__
-
- # start
- def connect(self):
- while True:
- try:
- self._zk_client.start()
- break
- except gevent.event.Timeout as e:
- # Update connection info
- self._sandesh_connection_info_update(status='DOWN',
- message=str(e))
- gevent.sleep(1)
- # Zookeeper is also throwing exception due to delay in master election
- except Exception as e:
- # Update connection info
- self._sandesh_connection_info_update(status='DOWN',
- message=str(e))
- gevent.sleep(1)
- # Update connection info
- self._sandesh_connection_info_update(status='UP', message='')
-
- # end
-
- def is_connected(self):
- return self._zk_client.state == KazooState.CONNECTED
- # end is_connected
-
- def syslog(self, msg, *args, **kwargs):
- if not self._logger:
- return
- self._logger.info(msg)
- # end syslog
-
- def set_lost_cb(self, lost_cb=None):
- # set a callback to be called when kazoo state is lost
- # set to None for default action
- self._lost_cb = lost_cb
- # end set_lost_cb
-
- def _zk_listener(self, state):
- if state == KazooState.CONNECTED:
- if self._election:
- self._election.cancel()
- # Update connection info
- self._sandesh_connection_info_update(status='UP', message='')
- elif state == KazooState.LOST:
- # Lost the session with ZooKeeper Server
- # Best of option we have is to exit the process and restart all
- # over again
- if self._lost_cb:
- self._lost_cb()
- else:
- os._exit(2)
- elif state == KazooState.SUSPENDED:
- # Update connection info
- self._sandesh_connection_info_update(status='INIT',
- message = 'Connection to zookeeper lost. Retrying')
-
- # end
-
- def _zk_election_callback(self, func, *args, **kwargs):
- func(*args, **kwargs)
- # Exit if running master encounters error or exception
- exit(1)
- # end
-
- def master_election(self, path, identifier, func, *args, **kwargs):
- while True:
- self._election = self._zk_client.Election(path, identifier)
- self._election.run(self._zk_election_callback, func, *args, **kwargs)
- # end master_election
-
- def create_node(self, path, value=None):
- try:
- if value is None:
- value = uuid.uuid4()
- retry = self._retry.copy()
- retry(self._zk_client.create, path, str(value), makepath=True)
- except kazoo.exceptions.NodeExistsError:
- current_value = self.read_node(path)
- if current_value == value:
- return True;
- raise ResourceExistsError(path, str(current_value))
- # end create_node
-
- def delete_node(self, path, recursive=False):
- try:
- retry = self._retry.copy()
- retry(self._zk_client.delete, path, recursive=recursive)
- except kazoo.exceptions.NoNodeError:
- pass
- except Exception as e:
- raise e
- # end delete_node
-
- def read_node(self, path):
- try:
- retry = self._retry.copy()
- value = retry(self._zk_client.get, path)
- return value[0]
- except Exception:
- return None
- # end read_node
-
- def get_children(self, path):
- try:
- retry = self._retry.copy()
- return retry(self._zk_client.get_children, path)
- except Exception:
- return []
- # end read_node
-
- def _sandesh_connection_info_update(self, status, message):
- from pysandesh.connection_info import ConnectionState
- from pysandesh.gen_py.process_info.ttypes import ConnectionStatus, \
- ConnectionType
- from pysandesh.gen_py.sandesh.ttypes import SandeshLevel
-
- new_conn_state = getattr(ConnectionStatus, status)
- ConnectionState.update(conn_type = ConnectionType.ZOOKEEPER,
- name = 'Zookeeper', status = new_conn_state,
- message = message,
- server_addrs = self._server_list.split(','))
-
- if (self._conn_state and self._conn_state != ConnectionStatus.DOWN and
- new_conn_state == ConnectionStatus.DOWN):
- msg = 'Connection to Zookeeper down: %s' %(message)
- self.log(msg, level=SandeshLevel.SYS_ERR)
- if (self._conn_state and self._conn_state != new_conn_state and
- new_conn_state == ConnectionStatus.UP):
- msg = 'Connection to Zookeeper ESTABLISHED'
- self.log(msg, level=SandeshLevel.SYS_NOTICE)
-
- self._conn_state = new_conn_state
- # end _sandesh_connection_info_update
-
-# end class ZookeeperClient
diff --git a/Testcases/cfgm_common/zkclient.pyc b/Testcases/cfgm_common/zkclient.pyc
deleted file mode 100644
index c18a227..0000000
--- a/Testcases/cfgm_common/zkclient.pyc
+++ /dev/null
Binary files differ
diff --git a/Testcases/cleanup b/Testcases/cleanup
deleted file mode 100755
index c23d556..0000000
--- a/Testcases/cleanup
+++ /dev/null
@@ -1,10 +0,0 @@
-#!/bin/bash
-
-
-if [ $1 == "v4" ]
-then
- echo "Cleaning v4"
-else
- echo "Cleaning v6"
-fi
-
diff --git a/Testcases/config b/Testcases/config
deleted file mode 100755
index bdbd1e1..0000000
--- a/Testcases/config
+++ /dev/null
@@ -1,19 +0,0 @@
-#!/usr/bin/python
-
-import sys
-import os
-from config_shell import *
-default_client_args = [
- ('--username', 'admin'),
- ('--password', os.environ["OS_PASSWORD"]),
- ('--region', 'RegionOne'),
- ('--tenant', os.environ["OS_TENANT_NAME"]),
- ('--api-server', '10.10.11.16')]
-
-
-if __name__ == '__main__':
- for arg in default_client_args:
- if not arg[0] in sys.argv:
- sys.argv.insert(1, arg[0])
- sys.argv.insert(2, arg[1])
- ConfigShell().main()
diff --git a/Testcases/config.16 b/Testcases/config.16
deleted file mode 100755
index bdbd1e1..0000000
--- a/Testcases/config.16
+++ /dev/null
@@ -1,19 +0,0 @@
-#!/usr/bin/python
-
-import sys
-import os
-from config_shell import *
-default_client_args = [
- ('--username', 'admin'),
- ('--password', os.environ["OS_PASSWORD"]),
- ('--region', 'RegionOne'),
- ('--tenant', os.environ["OS_TENANT_NAME"]),
- ('--api-server', '10.10.11.16')]
-
-
-if __name__ == '__main__':
- for arg in default_client_args:
- if not arg[0] in sys.argv:
- sys.argv.insert(1, arg[0])
- sys.argv.insert(2, arg[1])
- ConfigShell().main()
diff --git a/Testcases/config.int b/Testcases/config.int
deleted file mode 100755
index 9911363..0000000
--- a/Testcases/config.int
+++ /dev/null
@@ -1,19 +0,0 @@
-#!/usr/bin/python
-
-import sys
-import os
-from config_shell import *
-default_client_args = [
- ('--username', 'admin'),
- ('--password', 'openstack'),
- ('--region', 'RegionOne'),
- ('--tenant', 'admin'),
- ('--api-server', '172.16.50.199')]
-
-
-if __name__ == '__main__':
- for arg in default_client_args:
- if not arg[0] in sys.argv:
- sys.argv.insert(1, arg[0])
- sys.argv.insert(2, arg[1])
- ConfigShell().main()
diff --git a/Testcases/config_obj.py b/Testcases/config_obj.py
deleted file mode 100644
index 9470588..0000000
--- a/Testcases/config_obj.py
+++ /dev/null
@@ -1,1737 +0,0 @@
-
-import os
-import sys
-import time
-import uuid
-from vnc_api import vnc_api
-try:
- import novaclient.v1_1.client
- config_nova = True
-except:
- config_nova = False
-
-
-class ConfigVirtualDns():
- def __init__(self, client):
- self.vnc = client.vnc
- self.tenant = client.tenant
-
- def obj_list(self):
- list = self.vnc.virtual_DNSs_list()['virtual-DNSs']
- return list
-
- def obj_get(self, name):
- for item in self.obj_list():
- if (item['fq_name'][1] == name):
- return self.vnc.virtual_DNS_read(id = item['uuid'])
-
- def obj_show(self, obj):
- print 'Virtual DNS'
- print 'Name: %s' %(obj.get_fq_name())
- print 'UUID: %s' %(obj.uuid)
- dns = obj.get_virtual_DNS_data()
- print 'Domain name: %s' %(dns.domain_name)
- print 'Record order: %s' %(dns.record_order)
- print 'Default TTL: %s seconds' %(dns.default_ttl_seconds)
- print 'Next DNS: %s' %(dns.next_virtual_DNS)
-
- def show(self, name = None):
- if name:
- obj = self.obj_get(name)
- if not obj:
- print 'ERROR: Object %s is not found!' %(name)
- return
- self.obj_show(obj)
- else:
- for item in self.obj_list():
- print ' %s' %(item['fq_name'][1])
-
- def add(self, name, domain_name, record_order, next_dns):
- data = vnc_api.VirtualDnsType(domain_name = domain_name,
- dynamic_records_from_client = True,
- record_order = record_order,
- default_ttl_seconds = 86400,
- next_virtual_DNS = 'default-domain:' + next_dns)
- obj = vnc_api.VirtualDns(name = name, virtual_DNS_data = data)
- try:
- self.vnc.virtual_DNS_create(obj)
- except Exception as e:
- print 'ERROR: %s' %(str(e))
-
- def delete(self, name):
- try:
- self.vnc.virtual_DNS_delete(
- fq_name = ['default-domain', name])
- except Exception as e:
- print 'ERROR: %s' %(str(e))
-
-
-class ConfigIpam():
- def __init__(self, client):
- self.vnc = client.vnc
- self.tenant = client.tenant
-
- def obj_list(self):
- list = self.vnc.network_ipams_list()['network-ipams']
- return list
-
- def obj_get(self, name):
- for item in self.obj_list():
- if (item['fq_name'][1] == self.tenant.name) and \
- (item['fq_name'][2] == name):
- return self.vnc.network_ipam_read(id = item['uuid'])
-
- def dns_show(self, mgmt):
- print ' DNS Type: %s' %(mgmt.ipam_dns_method)
- if (mgmt.ipam_dns_method == 'virtual-dns-server'):
- print ' Virtual DNS Server: %s' %(
- mgmt.get_ipam_dns_server().virtual_dns_server_name)
- elif (mgmt.ipam_dns_method == 'tenant-dns-server'):
- list = mgmt.get_ipam_dns_server().get_tenant_dns_server_address().get_ip_address()
- print ' Tenant DNS Server:'
- for item in list:
- print ' %s' %(item)
-
- def dhcp_show(self, mgmt):
- dhcp_opt = {'4':'NTP Server', '15':'Domain Name'}
- print ' DHCP Options:'
- dhcp = mgmt.get_dhcp_option_list()
- if not dhcp:
- return
- for item in dhcp.get_dhcp_option():
- print ' %s: %s' %(dhcp_opt[item.dhcp_option_name],
- item.dhcp_option_value)
-
- def obj_show(self, obj):
- print 'IPAM'
- print 'Name: %s' %(obj.get_fq_name())
- print 'UUID: %s' %(obj.uuid)
- print 'Management:'
- mgmt = obj.get_network_ipam_mgmt()
- if not mgmt:
- return
- self.dns_show(mgmt)
- self.dhcp_show(mgmt)
-
- def show(self, name = None):
- if name:
- obj = self.obj_get(name)
- if not obj:
- print 'ERROR: Object %s is not found!' %(name)
- return
- self.obj_show(obj)
- else:
- for item in self.obj_list():
- if (item['fq_name'][1] == self.tenant.name):
- print ' %s' %(item['fq_name'][2])
-
- def dns_add(self, mgmt, dns_type, virtual_dns = None, tenant_dns = None):
- type = {'none':'none',
- 'default':'default-dns-server',
- 'virtual':'virtual-dns-server',
- 'tenant':'tenant-dns-server'}
- if not dns_type:
- return
- mgmt.set_ipam_dns_method(type[dns_type])
- if virtual_dns:
- mgmt.set_ipam_dns_server(vnc_api.IpamDnsAddressType(
- virtual_dns_server_name = virtual_dns))
- if tenant_dns:
- mgmt.set_ipam_dns_server(vnc_api.IpamDnsAddressType(
- tenant_dns_server_address = vnc_api.IpAddressesType(
- ip_address = tenant_dns)))
-
- def dhcp_add(self, mgmt, domain_name = None, ntp_server = None):
- if domain_name:
- list = mgmt.get_dhcp_option_list()
- if not list:
- list = vnc_api.DhcpOptionsListType()
- mgmt.set_dhcp_option_list(list)
- list.add_dhcp_option(vnc_api.DhcpOptionType(
- dhcp_option_name = '15',
- dhcp_option_value = domain_name))
- if ntp_server:
- list = mgmt.get_dhcp_option_list()
- if not list:
- list = vnc_api.DhcpOptionsListType()
- mgmt.set_dhcp_option_list()
- list.add_dhcp_option(vnc_api.DhcpOptionType(
- dhcp_option_name = '4',
- dhcp_option_value = ntp_server))
-
- def add(self, name, dns_type, virtual_dns = None, tenant_dns = None,
- domain_name = None, ntp_server = None):
- create = False
- obj = self.obj_get(name)
- if not obj:
- obj = vnc_api.NetworkIpam(name = name,
- parent_obj = self.tenant)
- create = True
- mgmt = obj.get_network_ipam_mgmt()
- if not mgmt:
- mgmt = vnc_api.IpamType()
- obj.set_network_ipam_mgmt(mgmt)
- self.dns_add(mgmt, dns_type, virtual_dns, tenant_dns)
- self.dhcp_add(mgmt, domain_name, ntp_server)
- if create:
- try:
- self.vnc.network_ipam_create(obj)
- except Exception as e:
- print 'ERROR: %s' %(str(e))
- else:
- self.vnc.network_ipam_update(obj)
-
- def delete(self, name, domain_name = None):
- update = False
- obj = self.obj_get(name)
- if not obj:
- print 'ERROR: Object %s is not found!' %(name)
- return
- if domain_name:
- mgmt = obj.get_network_ipam_mgmt()
- list = mgmt.get_dhcp_option_list()
- for item in list.get_dhcp_option():
- if (item.dhcp_option_name == '15') and \
- (item.dhcp_option_value == domain_name):
- list.delete_dhcp_option(item)
- break
- update = True
- if update:
- self.vnc.network_ipam_update(obj)
- else:
- try:
- self.vnc.network_ipam_delete(
- fq_name = ['default-domain', self.tenant.name,
- name])
- except Exception as e:
- print 'ERROR: %s' %(str(e))
-
-
-class ConfigPolicy():
- def __init__(self, client):
- self.vnc = client.vnc
- self.tenant = client.tenant
-
- def obj_list(self):
- list = self.vnc.network_policys_list()['network-policys']
- return list
-
- def obj_get(self, name):
- for item in self.obj_list():
- if (item['fq_name'][1] == self.tenant.name) and \
- (item['fq_name'][2] == name):
- return self.vnc.network_policy_read(id = item['uuid'])
-
- def addr_show(self, addr_list):
- for item in addr_list:
- print ' Virtual Network: %s' %(item.get_virtual_network())
-
- def port_show(self, port_list):
- for item in port_list:
- print ' %d:%d' %(item.get_start_port(), item.get_end_port())
-
- def action_show(self, rule):
- list = rule.get_action_list()
- if not list:
- return
- action = list.get_simple_action()
- if action:
- print ' %s' %(action)
- else:
- for item in rule.get_action_list().get_apply_service():
- print ' %s' %(item)
-
- def rule_show(self, obj):
- rules_obj = obj.get_network_policy_entries()
- if (rules_obj == None):
- return
- list = rules_obj.get_policy_rule()
- count = 1
- for rule in list:
- print 'Rule #%d' %(count)
- print ' Direction: %s' %(rule.get_direction())
- print ' Protocol: %s' %(rule.get_protocol())
- print ' Source Addresses:'
- self.addr_show(rule.get_src_addresses())
- print ' Source Ports:'
- self.port_show(rule.get_src_ports())
- print ' Destination Addresses:'
- self.addr_show(rule.get_dst_addresses())
- print ' Destination Ports:'
- self.port_show(rule.get_dst_ports())
- print ' Action:'
- self.action_show(rule)
- count += 1
-
- def obj_show(self, obj):
- print 'Policy'
- print 'Name: %s' %(obj.get_fq_name())
- print 'UUID: %s' %(obj.uuid)
- self.rule_show(obj)
- list = obj.get_virtual_network_back_refs()
- if (list != None):
- print '[BR] network:'
- for item in list:
- print ' %s' %(item['to'][2])
-
- def show(self, name):
- if name:
- obj = self.obj_get(name)
- if not obj:
- print 'ERROR: Object %s is not found!' %(name)
- return
- self.obj_show(obj)
- else:
- for item in self.obj_list():
- if (item['fq_name'][1] == self.tenant.name):
- print ' %s' %(item['fq_name'][2])
-
- def rule_add(self, arg_list):
- direction = None
- protocol = None
- src_net_list = []
- dst_net_list = []
- src_port_list = []
- dst_port_list = []
- action = None
- service_list = []
- for arg in arg_list:
- arg_name = arg.split('=')[0]
- arg_val = arg.split('=')[1]
- if (arg_name == 'direction'):
- direction = arg_val
- elif (arg_name == 'protocol'):
- protocol = arg_val
- elif (arg_name == 'src-net'):
- net = 'default-domain:%s:%s' %(self.tenant.name, arg_val)
- src_net_list.append(vnc_api.AddressType(virtual_network = net))
- elif (arg_name == 'dst-net'):
- net = 'default-domain:%s:%s' %(self.tenant.name, arg_val)
- dst_net_list.append(vnc_api.AddressType(virtual_network = net))
- elif (arg_name == 'src-port'):
- if (arg_val == 'any'):
- src_port_list.append(vnc_api.PortType(
- start_port = -1, end_port = -1))
- else:
- s_e = arg_val.split(':')
- src_port_list.append(vnc_api.PortType(
- start_port = int(s_e[0]), end_port = int(s_e[1])))
- elif (arg_name == 'dst-port'):
- if (arg_val == 'any'):
- src_port_list.append(vnc_api.PortType(
- start_port = -1, end_port = -1))
- else:
- s_e = arg_val.split(':')
- src_port_list.append(vnc_api.PortType(
- start_port = int(s_e[0]), end_port = int(s_e[1])))
- elif (arg_name == 'action'):
- action = arg_val
- elif (arg_name == 'service'):
- service_list.append('default-domain:%s:%s' \
- %(self.tenant.name, arg_val))
-
- rule = vnc_api.PolicyRuleType()
- if not direction:
- direction = '<>'
- rule.set_direction(direction)
- if not protocol:
- protocol = 'any'
- rule.set_protocol(protocol)
- if not src_net_list:
- src_net_list.append(vnc_api.AddressType(virtual_network = 'any'))
- rule.set_src_addresses(src_net_list)
- if not dst_net_list:
- dst_net_list.append(vnc_api.AddressType(virtual_network = 'any'))
- rule.set_dst_addresses(dst_net_list)
- if not src_port_list:
- src_port_list.append(vnc_api.PortType(
- start_port = -1, end_port = -1))
- rule.set_src_ports(src_port_list)
- if not dst_port_list:
- dst_port_list.append(vnc_api.PortType(
- start_port = -1, end_port = -1))
- rule.set_dst_ports(dst_port_list)
- if not action:
- action_list = vnc_api.ActionListType(simple_action = 'pass')
- elif (action == 'service'):
- action_list = vnc_api.ActionListType(apply_service = service_list)
- else:
- action_list = vnc_api.ActionListType(simple_action = action)
- rule.set_action_list(action_list)
- return rule
-
- def add(self, name, rule_arg_list):
- rule_list = []
- if not rule_arg_list:
- rule = self.rule_add([])
- rule_list.append(rule)
- else:
- for rule_arg in rule_arg_list:
- rule = self.rule_add(rule_arg.split(','))
- rule_list.append(rule)
-
- obj = self.obj_get(name = name)
- if obj:
- rules = obj.get_network_policy_entries()
- if not rules:
- rules = vnc_api.PolicyEntriesType(policy_rule = rule_list)
- else:
- for item in rule_list:
- rules.add_policy_rule(item)
- obj.set_network_policy_entries(rules)
- try:
- self.vnc.network_policy_update(obj)
- except Exception as e:
- print 'ERROR: %s' %(str(e))
- else:
- rules = vnc_api.PolicyEntriesType(policy_rule = rule_list)
- obj = vnc_api.NetworkPolicy(name = name,
- parent_obj = self.tenant,
- network_policy_entries = rules)
- try:
- self.vnc.network_policy_create(obj)
- except Exception as e:
- print 'ERROR: %s' %(str(e))
-
- def delete(self, name, rule_arg_list):
- obj = self.obj_get(name)
- if not obj:
- print 'ERROR: Object %s is not found!' %(name)
- return
- if rule_arg_list:
- rules = obj.get_network_policy_entries()
- if not rules:
- return
- for rule_arg in rule_arg_list:
- for arg in rule_arg.split(','):
- arg_name = arg.split('=')[0]
- arg_val = arg.split('=')[1]
- if (arg_name == 'index'):
- rule = rules.get_policy_rule()[int(arg_val) - 1]
- rules.delete_policy_rule(rule)
- obj.set_network_policy_entries(rules)
- self.vnc.network_policy_update(obj)
- else:
- try:
- self.vnc.network_policy_delete(fq_name = ['default-domain',
- self.tenant.name, name])
- except Exception as e:
- print 'ERROR: %s' %(str(e))
-
-
-class ConfigSecurityGroup():
- def __init__(self, client):
- self.vnc = client.vnc
- self.tenant = client.tenant
-
- def obj_list(self):
- list = self.vnc.security_groups_list()['security-groups']
- return list
-
- def obj_get(self, name):
- for item in self.obj_list():
- if (item['fq_name'][1] == self.tenant.name) and \
- (item['fq_name'][2] == name):
- return self.vnc.security_group_read(id = item['uuid'])
-
- def addr_show(self, addr_list):
- for item in addr_list:
- print ' Security Group: %s' %(item.get_security_group())
- subnet = item.get_subnet()
- if subnet:
- print ' Subnet: %s/%d' %(subnet.get_ip_prefix(), \
- subnet.get_ip_prefix_len())
- else:
- print ' Subnet: None'
-
- def port_show(self, port_list):
- for item in port_list:
- print ' %d:%d' %(item.get_start_port(), item.get_end_port())
-
- def rule_show(self, obj):
- rules_obj = obj.get_security_group_entries()
- if (rules_obj == None):
- return
- list = rules_obj.get_policy_rule()
- count = 1
- for rule in list:
- print 'Rule #%d' %(count)
- print ' Direction: %s' %(rule.get_direction())
- print ' Protocol: %s' %(rule.get_protocol())
- print ' Source Addresses:'
- self.addr_show(rule.get_src_addresses())
- print ' Source Ports:'
- self.port_show(rule.get_src_ports())
- print ' Destination Addresses:'
- self.addr_show(rule.get_dst_addresses())
- print ' Destination Ports:'
- self.port_show(rule.get_dst_ports())
- count += 1
-
- def obj_show(self, obj):
- print 'Security Group'
- print 'Name: %s' %(obj.get_fq_name())
- print 'UUID: %s' %(obj.uuid)
- self.rule_show(obj)
-
- def show(self, name):
- if name:
- obj = self.obj_get(name)
- if not obj:
- print 'ERROR: Object %s is not found!' %(name)
- return
- self.obj_show(obj)
- else:
- for item in self.obj_list():
- if (item['fq_name'][1] == self.tenant.name):
- print ' %s' %(item['fq_name'][2])
-
- def add(self, name, protocol = None, address = None, port = None,
- direction = None):
- rule = vnc_api.PolicyRuleType()
- rule.set_direction('>')
- if protocol:
- rule.set_protocol(protocol)
- else:
- rule.set_protocol('any')
-
- addr_list = []
- if address:
- for item in address:
- prefix = item.split('/')[0]
- len = item.split('/')[1]
- addr_list.append(vnc_api.AddressType(
- subnet = vnc_api.SubnetType(
- ip_prefix = prefix, ip_prefix_len = int(len))))
- else:
- addr_list.append(vnc_api.AddressType(
- subnet = vnc_api.SubnetType(
- ip_prefix = '0.0.0.0', ip_prefix_len = 0)))
-
- local_addr_list = [vnc_api.AddressType(security_group = 'local')]
-
- port_list = []
- if port:
- for item in port:
- if (item == 'any'):
- port_list.append(vnc_api.PortType(
- start_port = -1, end_port = -1))
- else:
- s_e = item.split(':')
- port_list.append(vnc_api.PortType(
- start_port = int(s_e[0]), end_port = int(s_e[1])))
- else:
- port_list.append(vnc_api.PortType(start_port = -1, end_port = -1))
-
- local_port_list = [vnc_api.PortType(start_port = -1, end_port = -1)]
-
- if (direction == 'ingress'):
- rule.set_src_addresses(addr_list)
- rule.set_src_ports(port_list)
- rule.set_dst_addresses(local_addr_list)
- rule.set_dst_ports(local_port_list)
- else:
- rule.set_src_addresses(local_addr_list)
- rule.set_src_ports(local_port_list)
- rule.set_dst_addresses(addr_list)
- rule.set_dst_ports(port_list)
-
- obj = self.obj_get(name = name)
- if obj:
- rules = obj.get_security_group_entries()
- if not rules:
- rules = vnc_api.PolicyEntriesType(policy_rule = [rule])
- else:
- rules.add_policy_rule(rule)
- try:
- self.vnc.security_group_update(obj)
- except Exception as e:
- print 'ERROR: %s' %(str(e))
- else:
- rules = vnc_api.PolicyEntriesType(policy_rule = [rule])
- obj = vnc_api.SecurityGroup(name = name,
- parent_obj = self.tenant,
- security_group_entries = rules)
- try:
- self.vnc.security_group_create(obj)
- except Exception as e:
- print 'ERROR: %s' %(str(e))
-
- def rule_del(self, obj, index):
- rules = obj.get_security_group_entries()
- if not rules:
- return
- rule = rules.get_policy_rule()[index - 1]
- rules.delete_policy_rule(rule)
- self.vnc.security_group_update(obj)
-
- def delete(self, name, rule = None):
- obj = self.obj_get(name)
- if not obj:
- print 'ERROR: Object %s is not found!' %(name)
- return
- if rule:
- self.rule_del(obj, int(rule))
- else:
- try:
- self.vnc.security_group_delete(fq_name = ['default-domain',
- self.tenant.name, name])
- except Exception as e:
- print 'ERROR: %s' %(str(e))
-
-
-class ConfigNetwork():
- def __init__(self, client):
- self.vnc = client.vnc
- self.tenant = client.tenant
-
- def obj_list(self):
- list = self.vnc.virtual_networks_list()['virtual-networks']
- return list
-
- def obj_get(self, name):
- for item in self.obj_list():
- if (item['fq_name'][1] == self.tenant.name) and \
- (item['fq_name'][2] == name):
- return self.vnc.virtual_network_read(id = item['uuid'])
-
- def prop_route_target_show(self, obj):
- print '[P] Route targets:'
- rt_list = obj.get_route_target_list()
- if not rt_list:
- return
- for rt in rt_list.get_route_target():
- print ' %s' %(rt)
-
- def child_floating_ip_pool_show(self, obj):
- print '[C] Floating IP pools:'
- pool_list = obj.get_floating_ip_pools()
- if not pool_list:
- return
- for pool in pool_list:
- print ' %s' %(pool['to'][3])
- pool_obj = self.vnc.floating_ip_pool_read(id = pool['uuid'])
- ip_list = pool_obj.get_floating_ips()
- if (ip_list != None):
- for ip in ip_list:
- ip_obj = self.vnc.floating_ip_read(id = ip['uuid'])
- print ' %s' %(ip_obj.get_floating_ip_address())
-
- def ref_ipam_show(self, obj):
- print '[R] IPAMs:'
- ipam_list = obj.get_network_ipam_refs()
- if not ipam_list:
- return
- for item in ipam_list:
- print ' %s' %(item['to'][2])
- subnet_list = item['attr'].get_ipam_subnets()
- for subnet in subnet_list:
- print ' subnet: %s/%d, gateway: %s' %(
- subnet.get_subnet().get_ip_prefix(),
- subnet.get_subnet().get_ip_prefix_len(),
- subnet.get_default_gateway())
-
- def ref_policy_show(self, obj):
- print '[R] Policies:'
- policy_list = obj.get_network_policy_refs()
- if not policy_list:
- return
- for item in policy_list:
- print ' %s (%d.%d)' %(item['to'][2],
- item['attr'].get_sequence().get_major(),
- item['attr'].get_sequence().get_minor())
-
- def ref_route_table_show(self, obj):
- print '[R] Route Tables:'
- rt_list = obj.get_route_table_refs()
- if not rt_list:
- return
- for item in rt_list:
- print ' %s' %(item['to'][2])
-
- def obj_show(self, obj):
- print 'Virtual Network'
- print 'Name: %s' %(obj.get_fq_name())
- print 'UUID: %s' %(obj.uuid)
- self.prop_route_target_show(obj)
- self.child_floating_ip_pool_show(obj)
- self.ref_ipam_show(obj)
- self.ref_policy_show(obj)
- self.ref_route_table_show(obj)
-
- def show(self, name):
- if name:
- obj = self.obj_get(name)
- if not obj:
- print 'ERROR: Object %s is not found!' %(name)
- return
- self.obj_show(obj)
- else:
- for item in self.obj_list():
- if (item['fq_name'][1] == self.tenant.name):
- print ' %s' %(item['fq_name'][2])
-
- def ipam_add(self, obj, name, subnet, gateway = None):
- try:
- ipam_obj = self.vnc.network_ipam_read(fq_name = ['default-domain',
- self.tenant.name, name])
- except Exception as e:
- print 'ERROR: %s' %(str(e))
- return
- cidr = subnet.split('/')
- subnet = vnc_api.SubnetType(ip_prefix = cidr[0],
- ip_prefix_len = int(cidr[1]))
- ipam_subnet = vnc_api.IpamSubnetType(subnet = subnet,
- default_gateway = gateway)
- obj.add_network_ipam(ref_obj = ipam_obj,
- ref_data = vnc_api.VnSubnetsType([ipam_subnet]))
-
- def ipam_del(self, obj, name):
- try:
- ipam_obj = self.vnc.network_ipam_read(fq_name = ['default-domain',
- self.tenant.name, name])
- except Exception as e:
- print 'ERROR: %s' %(str(e))
- return
- obj.del_network_ipam(ref_obj = ipam_obj)
-
- def policy_add(self, obj, name):
- try:
- policy_obj = self.vnc.network_policy_read(
- fq_name = ['default-domain', self.tenant.name, name])
- except Exception as e:
- print 'ERROR: %s' %(str(e))
- return
- seq = vnc_api.SequenceType(major = 0, minor = 0)
- obj.add_network_policy(ref_obj = policy_obj,
- ref_data = vnc_api.VirtualNetworkPolicyType(sequence = seq))
-
- def policy_del(self, obj, name):
- try:
- policy_obj = self.vnc.network_policy_read(
- fq_name = ['default-domain', self.tenant.name, name])
- except Exception as e:
- print 'ERROR: %s' %(str(e))
- return
- obj.del_network_policy(ref_obj = policy_obj)
-
- def route_target_add(self, obj, rt):
- rt_list = obj.get_route_target_list()
- if not rt_list:
- rt_list = vnc_api.RouteTargetList()
- obj.set_route_target_list(rt_list)
- rt_list.add_route_target('target:%s' %(rt))
-
- def route_target_del(self, obj, rt):
- rt_list = obj.get_route_target_list()
- if not rt_list:
- return
- rt_list.delete_route_target('target:%s' %(rt))
-
- def route_table_add(self, obj, rt):
- try:
- rt_obj = self.vnc.route_table_read(fq_name = ['default-domain',
- self.tenant.name, rt])
- except Exception as e:
- print 'ERROR: %s' %(str(e))
- return
- obj.add_route_table(ref_obj = rt_obj)
-
- def route_table_del(self, obj, rt):
- try:
- rt_obj = self.vnc.route_table_read(fq_name = ['default-domain',
- self.tenant.name, rt])
- except Exception as e:
- print 'ERROR: %s' %(str(e))
- return
- obj.del_route_table(ref_obj = rt_obj)
-
- def add(self, name, ipam = None, subnet = None, policy = None,
- route_target = None, route_table = None, shared = None,
- external = None, l2 = None):
- create = False
- obj = self.obj_get(name)
- if not obj:
- obj = vnc_api.VirtualNetwork(name = name,
- parent_obj = self.tenant)
- if l2:
- prop = vnc_api.VirtualNetworkType(forwarding_mode = 'l2')
- obj.set_virtual_network_properties(prop)
- if shared:
- obj.set_is_shared(shared)
- if external:
- obj.set_router_external(external)
- create = True
- if ipam and subnet:
- self.ipam_add(obj, ipam, subnet)
- if policy:
- self.policy_add(obj, policy)
- if route_target:
- self.route_target_add(obj, route_target)
- if route_table:
- self.route_table_add(obj, route_table)
- if create:
- try:
- self.vnc.virtual_network_create(obj)
- except Exception as e:
- print 'ERROR: %s' %(str(e))
- else:
- self.vnc.virtual_network_update(obj)
-
- def delete(self, name, ipam = None, policy = None, route_target = None,
- route_table = None):
- update = False
- obj = self.obj_get(name)
- if not obj:
- print 'ERROR: Object %s is not found!' %(name)
- if ipam:
- self.ipam_del(obj, ipam)
- update = True
- if policy:
- self.policy_del(obj, policy)
- update = True
- if route_target:
- self.route_target_del(obj, route_target)
- update = True
- if route_table:
- self.route_table_del(obj, route_table)
- update = True
- if update:
- self.vnc.virtual_network_update(obj)
- else:
- try:
- self.vnc.virtual_network_delete(id = obj.uuid)
- except Exception as e:
- print 'ERROR: %s' %(str(e))
-
-
-class ConfigFloatingIpPool():
- def __init__(self, client):
- self.vnc = client.vnc
- self.tenant = client.tenant
-
- def obj_list(self):
- list = self.vnc.floating_ip_pools_list()['floating-ip-pools']
- return list
-
- def obj_get(self, name, network = None):
- for item in self.obj_list():
- if network:
- if (item['fq_name'][1] == self.tenant.name) and \
- (item['fq_name'][2] == network) and \
- (item['fq_name'][3] == name):
- return self.vnc.floating_ip_pool_read(id = item['uuid'])
- else:
- if (item['fq_name'][1] == self.tenant.name) and \
- (item['fq_name'][3] == name):
- return self.vnc.floating_ip_pool_read(id = item['uuid'])
-
- def prop_subnet_show(self, obj):
- print '[P] Subnet:'
- prefixes = obj.get_floating_ip_pool_prefixes()
- if not prefixes:
- return
- for item in prefixes.get_subnet():
- print ' %s/%s' %(item.get_ip_prefix(), item.get_ip_prefix_len())
-
- def child_ip_show(self, obj):
- print '[C] Floating IPs:'
- list = obj.get_floating_ips()
- if not list:
- return
- for ip in list:
- ip_obj = self.vnc.floating_ip_read(id = ip['uuid'])
- print ' %s' %(ip_obj.get_floating_ip_address())
-
- def back_ref_tenant_show(self, obj):
- print '[BR] Tenants:'
- list = obj.get_project_back_refs()
- if not list:
- return
- for item in list:
- print ' %s' %(item['to'][1])
-
- def obj_show(self, obj):
- print 'Floating IP Pool'
- print 'Name: %s' %(obj.get_fq_name())
- print 'UUID: %s' %(obj.uuid)
- self.prop_subnet_show(obj)
- self.child_ip_show(obj)
- self.back_ref_tenant_show(obj)
-
- def show(self, name = None):
- if name:
- obj = self.obj_get(name)
- if not obj:
- print 'ERROR: Object %s is not found!' %(name)
- return
- self.obj_show(obj)
- else:
- for item in self.obj_list():
- if (item['fq_name'][1] == self.tenant.name):
- print ' %s in network %s' \
- %(item['fq_name'][2], item['fq_name'][3])
-
- def add(self, name, network):
- if not name:
- print 'ERROR: The name of floating IP pool is not specified!'
- return
- if not network:
- print 'ERROR: Network is not specified!'
- return
- try:
- net_obj = self.vnc.virtual_network_read(
- fq_name = ['default-domain', self.tenant.name, network])
- except Exception as e:
- print 'ERROR: %s' %(str(e))
- return
- obj = vnc_api.FloatingIpPool(name = name, parent_obj = net_obj)
- try:
- self.vnc.floating_ip_pool_create(obj)
- self.tenant.add_floating_ip_pool(obj)
- self.vnc.project_update(self.tenant)
- except Exception as e:
- print 'ERROR: %s' %(str(e))
-
- def fip_delete(self, pool_obj):
- pass
-
- def delete(self, name, network):
- if not name:
- print 'ERROR: The name of floating IP pool is not specified!'
- return
- obj = self.obj_get(name, network)
- if not obj:
- print 'ERROR: Floating IP pool %s in network %s is not found!' \
- %(name, network)
- return
- if obj.get_floating_ips():
- print 'ERROR: There are allocated floating IPs!'
- return
- for tenant_ref in obj.get_project_back_refs():
- tenant = self.vnc.project_read(fq_name = tenant_ref['to'])
- tenant.del_floating_ip_pool(obj)
- self.vnc.project_update(tenant)
- try:
- self.vnc.floating_ip_pool_delete(id = obj.uuid)
- except Exception as e:
- print 'ERROR: %s' %(str(e))
-
-
-class ConfigServiceTemplate():
- def __init__(self, client):
- self.vnc = client.vnc
- self.tenant = client.tenant
-
- def obj_list(self):
- list = self.vnc.service_templates_list()['service-templates']
- return list
-
- def obj_get(self, name):
- for item in self.obj_list():
- if (item['fq_name'][1] == name):
- return self.vnc.service_template_read(id = item['uuid'])
-
- def obj_show(self, obj):
- print 'Service Template'
- print 'Name: %s' %(obj.get_fq_name())
- print 'UUID: %s' %(obj.uuid)
- properties = obj.get_service_template_properties()
- print 'Service Mode: %s' %(properties.get_service_mode())
- print 'Service Type: %s' %(properties.get_service_type())
- print 'Service Image: %s' %(properties.get_image_name())
- print 'Service Flavor: %s' %(properties.get_flavor())
- print 'Service Interfaces:'
- for item in properties.get_interface_type():
- print ' %s' %(item.get_service_interface_type())
-
- def show(self, name = None):
- if name:
- obj = self.obj_get(name)
- if not obj:
- print 'ERROR: Object %s is not found!' %(name)
- return
- self.obj_show(obj)
- else:
- for item in self.obj_list():
- print ' %s' %(item['fq_name'][1])
-
- def add(self, name, mode, type, image, flavor, interface_type,
- scale = None):
- obj = vnc_api.ServiceTemplate(name = name)
- properties = vnc_api.ServiceTemplateType(service_mode = mode,
- service_type = type, image_name = image, flavor = flavor,
- ordered_interfaces = True, availability_zone_enable = True)
- if scale:
- properties.set_service_scaling(scale)
- for item in interface_type:
- if (mode == 'transparent') and \
- ((item == 'left') or (item == 'right')):
- shared_ip = True
- elif (mode == 'in-network') and (item == 'left'):
- shared_ip = True
- else:
- shared_ip = False
- type = vnc_api.ServiceTemplateInterfaceType(
- service_interface_type = item,
- shared_ip = shared_ip,
- static_route_enable = True)
- properties.add_interface_type(type)
- else:
- for item in interface_type:
- type = vnc_api.ServiceTemplateInterfaceType(
- service_interface_type = item,
- static_route_enable = True)
- properties.add_interface_type(type)
- obj.set_service_template_properties(properties)
- try:
- self.vnc.service_template_create(obj)
- except Exception as e:
- print 'ERROR: %s' %(str(e))
-
- def delete(self, name):
- obj = self.obj_get(name)
- if not obj:
- print 'ERROR: Object %s is not found!' %(name)
- try:
- self.vnc.service_template_delete(id = obj.uuid)
- except Exception as e:
- print 'ERROR: %s' %(str(e))
-
-
-class ConfigServiceInstance():
- def __init__(self, client):
- self.vnc = client.vnc
- self.tenant = client.tenant
-
- def obj_list(self):
- list = self.vnc.service_instances_list()['service-instances']
- return list
-
- def obj_get(self, name):
- for item in self.obj_list():
- if (item['fq_name'][1] == self.tenant.name) and \
- (item['fq_name'][2] == name):
- return self.vnc.service_instance_read(id = item['uuid'])
-
- def obj_show(self, obj):
- print 'Service Instance'
- print 'Name: %s' %(obj.get_fq_name())
- print 'UUID: %s' %(obj.uuid)
-
- def show(self, name):
- if name:
- obj = self.obj_get(name)
- if not obj:
- print 'ERROR: Object %s is not found!' %(name)
- return
- self.obj_show(obj)
- else:
- for item in self.obj_list():
- if (item['fq_name'][1] == self.tenant.name):
- print ' %s' %(item['fq_name'][2])
-
- def add(self, name, template, network_list,
- auto_policy = None, scale_max = None):
- obj = vnc_api.ServiceInstance(name = name, parent_obj = self.tenant)
- properties = vnc_api.ServiceInstanceType(auto_policy = auto_policy)
- for net in network_list:
- net_name = None
- net_route = None
- net_auto = False
- tenant_name = self.tenant.name
- for arg in net.split(','):
- arg_name = arg.split('=')[0]
- arg_val = arg.split('=')[1]
- if (arg_name == 'tenant'):
- tenant_name = arg_val
- elif (arg_name == 'network'):
- if (arg_val == 'auto'):
- net_auto = True
- else:
- net_name = arg_val
- elif (arg_name == 'route'):
- net_route = arg_val
- if net_auto:
- net_fq_name = None
- else:
- net_fq_name = 'default-domain:%s:%s' %(tenant_name, net_name)
- interface = vnc_api.ServiceInstanceInterfaceType(
- virtual_network = net_fq_name)
- if net_route:
- route = vnc_api.RouteType(prefix = net_route)
- route_table = vnc_api.RouteTableType()
- route_table.add_route(route)
- interface.set_static_routes(route_table)
- properties.add_interface_list(interface)
-
- if scale_max:
- scale = vnc_api.ServiceScaleOutType(
- max_instances = int(scale_max),
- auto_scale = True)
- else:
- scale = vnc_api.ServiceScaleOutType()
- properties.set_scale_out(scale)
-
- obj.set_service_instance_properties(properties)
- try:
- template = self.vnc.service_template_read(
- fq_name = ['default-domain', template])
- except Exception as e:
- print 'ERROR: %s' %(str(e))
- obj.set_service_template(template)
- try:
- self.vnc.service_instance_create(obj)
- except Exception as e:
- print 'ERROR: %s' %(str(e))
-
- def delete(self, name):
- obj = self.obj_get(name)
- if not obj:
- print 'ERROR: Object %s is not found!' %(name)
- try:
- self.vnc.service_instance_delete(id = obj.uuid)
- except Exception as e:
- print 'ERROR: %s' %(str(e))
-
-
-class ConfigImage():
- def __init__(self, client):
- self.nova = client.nova
-
- def obj_list(self):
- list = self.nova.images.list()
- return list
-
- def obj_get(self, name):
- for item in self.obj_list():
- if (item.name == name):
- return item
-
- def obj_show(self, obj):
- print 'Image'
- print 'Name: %s' %(obj.name)
- print 'UUID: %s' %(obj.id)
-
- def show(self, name = None):
- if name:
- obj = self.obj_get(name)
- if not obj:
- print 'ERROR: Object %s is not found!' %(name)
- return
- self.obj_show(obj)
- else:
- for item in self.obj_list():
- print ' %s' %(item.name)
-
- def add(self, name):
- pass
- def delete(self, name):
- pass
-
-
-class ConfigFlavor():
- def __init__(self, client):
- self.nova = client.nova
-
- def obj_list(self):
- list = self.nova.flavors.list()
- return list
-
- def obj_get(self, name):
- for item in self.obj_list():
- if (item.name == name):
- return item
-
- def obj_show(self, obj):
- print 'Flavor'
- print 'Name: %s' %(obj.name)
- print 'UUID: %s' %(obj.id)
-
- def show(self, name = None):
- if name:
- obj = self.obj_get(name)
- if not obj:
- print 'ERROR: Object %s is not found!' %(name)
- return
- self.obj_show(obj)
- else:
- for item in self.obj_list():
- print ' %s' %(item.name)
-
- def add(self, name):
- pass
- def delete(self, name):
- pass
-
-
-class ConfigVirtualMachine():
- def __init__(self, client):
- self.vnc = client.vnc
- self.nova = client.nova
- self.tenant = client.tenant
-
- def obj_list(self):
- list = self.nova.servers.list()
- return list
-
- def obj_get(self, name):
- for item in self.obj_list():
- if (item.name == name):
- return item
-
- def obj_show(self, obj):
- print 'Virtual Machine'
- print 'Name: %s' %(obj.name)
- print 'UUID: %s' %(obj.id)
- print 'Status: %s' %(obj.status)
- print 'Addresses:'
- for item in obj.addresses.keys():
- print ' %s %s' %(obj.addresses[item][0]['addr'], item)
-
- def show(self, name):
- if name:
- obj = self.obj_get(name)
- if not obj:
- print 'ERROR: Object %s is not found!' %(name)
- return
- self.obj_show(obj)
- else:
- for item in self.obj_list():
- print ' %s' %(item.name)
-
- def add(self, name, image, flavor, network, node = None, user_data = None,
- wait = None):
- try:
- image_obj = self.nova.images.find(name = image)
- except Exception as e:
- print 'ERROR: %s' %(str(e))
- return
- try:
- flavor_obj = self.nova.flavors.find(name = flavor)
- except Exception as e:
- print 'ERROR: %s' %(str(e))
- return
-
- networks = []
- net_list = self.vnc.virtual_networks_list()['virtual-networks']
- for item in network:
- for vn in net_list:
- if (vn['fq_name'][1] == self.tenant.name) and \
- (vn['fq_name'][2] == item):
- networks.append({'net-id': vn['uuid']})
- break
- else:
- print 'ERROR: Network %s is not found!' %(item)
- return
-
- #if node:
- # zone = self.nova.availability_zones.list()[1]
- # for item in zone.hosts.keys():
- # if (item == node):
- # break
- # else:
- # print 'ERROR: Node %s is not found!' %(name)
- # return
-
- try:
- vm = self.nova.servers.create(name = name, image = image_obj,
- flavor = flavor_obj, availability_zone = node,
- nics = networks, userdata = user_data)
- except Exception as e:
- print 'ERROR: %s' %(str(e))
- return
-
- if wait:
- timeout = 12
- while timeout:
- time.sleep(3)
- vm = self.nova.servers.get(vm.id)
- if vm.status != 'BUILD':
- print 'VM %s is %s' %(vm.name, vm.status)
- break
- timeout -= 1
-
- def delete(self, name):
- obj = self.obj_get(name)
- if not obj:
- print 'ERROR: Object %s is not found!' %(name)
- self.nova.servers.delete(obj.id)
-
-
-class ConfigRouteTable():
- def __init__(self, client):
- self.vnc = client.vnc
- self.tenant = client.tenant
-
- def obj_list(self):
- list = self.vnc.route_tables_list()['route-tables']
- return list
-
- def obj_get(self, name):
- for item in self.obj_list():
- if (item['fq_name'][1] == self.tenant.name) and \
- (item['fq_name'][2] == name):
- return self.vnc.route_table_read(id = item['uuid'])
-
- def obj_show(self, obj):
- print 'Route Table'
- print 'Name: %s' %(obj.get_fq_name())
- print 'UUID: %s' %(obj.uuid)
- routes = obj.get_routes()
- if not routes:
- return
- for item in routes.get_route():
- print ' %s next-hop %s' %(item.get_prefix(), item.get_next_hop())
-
- def show(self, name = None):
- if name:
- obj = self.obj_get(name)
- if not obj:
- print 'ERROR: Object %s is not found!' %(name)
- return
- self.obj_show(obj)
- else:
- for item in self.obj_list():
- if (item['fq_name'][1] == self.tenant.name):
- print ' %s' %(item['fq_name'][2])
-
- def route_add(self, obj, route):
- routes = obj.get_routes()
- if not routes:
- routes = vnc_api.RouteTableType()
- obj.set_routes(routes)
- prefix = route.split(':')[0]
- nh = 'default-domain:%s:%s' %(self.tenant.name, route.split(':')[1])
- routes.add_route(vnc_api.RouteType(prefix = prefix, next_hop = nh))
-
- def route_del(self, obj, prefix):
- routes = obj.get_routes()
- if not routes:
- return
- for item in routes.get_route():
- if (item.get_prefix() == prefix):
- routes.delete_route(item)
-
- def add(self, name, route = None):
- create = False
- obj = self.obj_get(name)
- if not obj:
- obj = vnc_api.RouteTable(name = name, parent_obj = self.tenant)
- create = True
- if route:
- for item in route:
- self.route_add(obj, item)
- if create:
- try:
- self.vnc.route_table_create(obj)
- except Exception as e:
- print 'ERROR: %s' %(str(e))
- else:
- self.vnc.route_table_update(obj)
-
- def delete(self, name, route = None):
- obj = self.obj_get(name)
- if not obj:
- print 'ERROR: Object %s is not found!' %(name)
- if route:
- for item in route:
- self.route_del(obj, item)
- self.vnc.route_table_update(obj)
- else:
- try:
- self.vnc.route_table_delete(id = obj.uuid)
- except Exception as e:
- print 'ERROR: %s' %(str(e))
-
-
-class ConfigInterfaceRouteTable():
- def __init__(self, client):
- self.vnc = client.vnc
- self.tenant = client.tenant
-
- def obj_list(self):
- list = self.vnc.interface_route_tables_list()['interface-route-tables']
- return list
-
- def obj_get(self, name):
- for item in self.obj_list():
- if (item['fq_name'][1] == self.tenant.name) and \
- (item['fq_name'][2] == name):
- return self.vnc.interface_route_table_read(id = item['uuid'])
-
- def obj_show(self, obj):
- print 'Interface Route Table'
- print 'Name: %s' %(obj.get_fq_name())
- print 'UUID: %s' %(obj.uuid)
- routes = obj.get_interface_route_table_routes()
- if not routes:
- return
- for item in routes.get_route():
- print ' %s' %(item.get_prefix())
-
- def show(self, name = None):
- if name:
- obj = self.obj_get(name)
- if not obj:
- print 'ERROR: Object %s is not found!' %(name)
- return
- self.obj_show(obj)
- else:
- for item in self.obj_list():
- if (item['fq_name'][1] == self.tenant.name):
- print ' %s' %(item['fq_name'][2])
-
- def route_add(self, obj, prefix):
- routes = obj.get_interface_route_table_routes()
- if not routes:
- routes = vnc_api.RouteTableType()
- routes.add_route(vnc_api.RouteType(prefix = prefix))
- obj.set_interface_route_table_routes(routes)
-
- def route_del(self, obj, prefix):
- routes = obj.get_interface_route_table_routes()
- if not routes:
- return
- for item in routes.get_route():
- if (item.get_prefix() == prefix):
- routes.delete_route(item)
- obj.set_interface_route_table_routes(routes)
-
- def add(self, name, route = None):
- create = False
- obj = self.obj_get(name)
- if not obj:
- obj = vnc_api.InterfaceRouteTable(name = name,
- parent_obj = self.tenant)
- create = True
- if route:
- for item in route:
- self.route_add(obj, item)
- if create:
- try:
- self.vnc.interface_route_table_create(obj)
- except Exception as e:
- print 'ERROR: %s' %(str(e))
- else:
- self.vnc.interface_route_table_update(obj)
-
- def delete(self, name, route = None):
- obj = self.obj_get(name)
- if not obj:
- print 'ERROR: Object %s is not found!' %(name)
- if route:
- for item in route:
- self.route_del(obj, item)
- self.vnc.interface_route_table_update(obj)
- else:
- try:
- self.vnc.interface_route_table_delete(id = obj.uuid)
- except Exception as e:
- print 'ERROR: %s' %(str(e))
-
-
-class ConfigVmInterface():
- def __init__(self, client):
- self.vnc = client.vnc
- self.tenant = client.tenant
- self.nova = client.nova
-
- def obj_list(self, vm_id = None):
- list = []
- if vm_id:
- vm = self.vnc.virtual_machine_read(id = vm_id)
- if_ref_list = vm.get_virtual_machine_interface_back_refs()
- for if_ref in if_ref_list:
- if_obj = self.vnc.virtual_machine_interface_read(
- id = if_ref['uuid'])
- vn_name = if_obj.get_virtual_network_refs()[0]['to'][2]
- list.append({'name':vn_name, 'uuid':if_ref['uuid'],
- 'obj':if_obj})
- else:
- for vm_nova in self.nova.servers.list():
- try:
- vm = self.vnc.virtual_machine_read(id = vm_nova.id)
- except Exception as e:
- print 'ERROR: %s' %(str(e))
- continue
- if_ref_list = vm.get_virtual_machine_interface_back_refs()
- for if_ref in if_ref_list:
- if_obj = self.vnc.virtual_machine_interface_read(
- id = if_ref['uuid'])
- vn_name = if_obj.get_virtual_network_refs()[0]['to'][2]
- list.append({'name':'%s:%s' %(vm_nova.name, vn_name),
- 'uuid':if_ref['uuid'], 'obj':if_obj})
- return list
-
- def obj_get(self, name, vm_id = None):
- list = self.obj_list(vm_id)
- for item in list:
- if (item['name'] == name):
- return item['obj']
-
- def prop_mac_show(self, obj):
- print '[P] MAC addresses:'
- mac = obj.get_virtual_machine_interface_mac_addresses()
- if not mac:
- return
- for item in mac.get_mac_address():
- print ' %s' %(item)
-
- def prop_prop_show(self, obj):
- prop = obj.get_virtual_machine_interface_properties()
- if not prop:
- return
- print '[P] Service interface type: %s' \
- %(prop.get_service_interface_type())
- print '[P] Interface mirror: %s' %(prop.get_interface_mirror())
-
- def ref_sg_show(self, obj):
- print '[R] Security groups:'
- refs = obj.get_security_group_refs()
- if refs:
- for item in obj.get_security_group_refs():
- print ' %s' %(item['to'][2])
-
- def ref_net_show(self, obj):
- print '[R] Virtual networks:'
- for item in obj.get_virtual_network_refs():
- print ' %s' %(item['to'][2])
-
- def ref_irt_show(self, obj):
- print '[R] Interface route tables:'
- list = obj.get_interface_route_table_refs()
- if list:
- for item in list:
- print ' %s' %(item['to'][2])
-
- def back_ref_ip_show(self, obj):
- print '[BR] Instance IPs:'
- list = obj.get_instance_ip_back_refs()
- if not list:
- return
- for item in list:
- ip = self.vnc.instance_ip_read(id = item['uuid'])
- print ' %s' %(ip.get_instance_ip_address())
-
- def back_ref_fip_show(self, obj):
- print '[BR] Floating IPs:'
- list = obj.get_floating_ip_back_refs()
- if not list:
- return
- for item in list:
- ip = self.vnc.floating_ip_read(id = item['uuid'])
- print ' %s' %(ip.get_floating_ip_address())
-
- def obj_show(self, obj, name):
- print 'Virtual Machine Interface'
- print 'Name: %s' %(name)
- print 'UUID: %s' %(obj.uuid)
- self.prop_mac_show(obj)
- self.prop_prop_show(obj)
- self.ref_sg_show(obj)
- self.ref_net_show(obj)
- self.ref_irt_show(obj)
- self.back_ref_ip_show(obj)
- self.back_ref_fip_show(obj)
-
- def show(self, name = None):
- if name:
- obj = self.obj_get(name)
- if not obj:
- print 'ERROR: Object %s is not found!' %(name)
- return
- self.obj_show(obj, name)
- else:
- for item in self.obj_list():
- print ' %s' %(item['name'])
-
- def sg_add(self, obj, sg):
- try:
- sg_obj = self.vnc.security_group_read(
- fq_name = ['default-domain', self.tenant.name, sg])
- except Exception as e:
- print 'ERROR: %s' %(str(e))
- return
- obj.add_security_group(sg_obj)
-
- def addr_add(self, obj, addr):
- id = str(uuid.uuid4())
- ip_obj = vnc_api.InstanceIp(name = id, instance_ip_address = addr)
- ip_obj.uuid = id
- ip_obj.add_virtual_machine_interface(obj)
- vn_id = obj.get_virtual_network_refs()[0]['uuid']
- vn_obj = self.vnc.virtual_network_read(id = vn_id)
- ip_obj.add_virtual_network(vn_obj)
- self.vnc.instance_ip_create(ip_obj)
-
- def irt_add(self, obj, irt):
- try:
- table_obj = self.vnc.interface_route_table_read(
- fq_name = ['default-domain', self.tenant.name, irt])
- except Exception as e:
- print 'ERROR: %s' %(str(e))
- return
- obj.add_interface_route_table(table_obj)
-
- def fip_add(self, obj, fip_pool, fip):
- pool_name = fip_pool.split(':')
- pool_name.insert(0, 'default-domain')
- try:
- pool_obj = self.vnc.floating_ip_pool_read(fq_name = pool_name)
- except Exception as e:
- print 'ERROR: %s' %(str(e))
- return
- id = str(uuid.uuid4())
- fip_obj = vnc_api.FloatingIp(name = id, parent_obj = pool_obj)
- fip_obj.uuid = id
- if (fip != 'any'):
- fip_obj.set_floating_ip_address(fip)
- fip_obj.add_project(self.tenant)
- fip_obj.add_virtual_machine_interface(obj)
- self.vnc.floating_ip_create(fip_obj)
- self.tenant.add_floating_ip_pool(pool_obj)
- self.vnc.project_update(self.tenant)
-
- def add(self, name, sg = None, irt = None, addr = None,
- fip_pool = None, fip = None):
- update = False
- obj = self.obj_get(name)
- if not obj:
- print 'ERROR: Object %s is not found!' %(name)
- return
- if sg:
- self.sg_add(obj, sg)
- update = True
- if irt:
- self.irt_add(obj, irt)
- update = True
- if addr:
- self.addr_add(obj, addr)
- update = True
- if fip and fip_pool:
- self.fip_add(obj, fip_pool, fip)
- update = True
- if update:
- self.vnc.virtual_machine_interface_update(obj)
-
- def sg_del(self, obj, sg):
- try:
- sg_obj = self.vnc.security_group_read(
- fq_name = ['default-domain', self.tenant.name, sg])
- except Exception as e:
- print 'ERROR: %s' %(str(e))
- return
- obj.del_security_group(sg_obj)
-
- def irt_del(self, obj, irt):
- try:
- table_obj = self.vnc.interface_route_table_read(
- fq_name = ['default-domain', self.tenant.name, irt])
- except Exception as e:
- print 'ERROR: %s' %(str(e))
- return
- obj.del_interface_route_table(table_obj)
-
- def addr_del(self, obj, addr):
- ip_list = obj.get_instance_ip_back_refs()
- for ip in ip_list:
- ip_obj = self.vnc.instance_ip_read(id = ip['uuid'])
- if (ip_obj.get_instance_ip_address() == addr):
- self.vnc.instance_ip_delete(id = ip_obj.uuid)
- break
- else:
- print 'ERROR: IP address %s is not found!' %(addr)
-
- def fip_del(self, obj):
- list = obj.get_floating_ip_back_refs()
- if not list:
- return
- for item in list:
- ip = self.vnc.floating_ip_delete(id = item['uuid'])
-
- def delete(self, name, sg = None, irt = None, addr = None,
- fip = None, vm_id = None):
- update = False
- obj = self.obj_get(name, vm_id)
- if not obj:
- print 'ERROR: Object %s is not found!' %(name)
- return
- if sg:
- self.sg_del(obj, sg)
- update = True
- if irt:
- self.irt_del(obj, irt)
- update = True
- if addr:
- self.addr_del(obj, addr)
- update = True
- if fip:
- self.fip_del(obj)
- update = True
- if update:
- self.vnc.virtual_machine_interface_update(obj)
-
-
-class ConfigGlobalVrouter():
- def __init__(self, client):
- self.vnc = client.vnc
- self.tenant = client.tenant
-
- def obj_list(self):
- list = self.vnc.interface_route_tables_list()['interface-route-tables']
- return list
-
- def obj_get(self, name):
- obj = self.vnc.global_vrouter_config_read(
- fq_name = ['default-global-system-config',
- 'default-global-vrouter-config'])
- return obj
-
- def obj_show(self, obj):
- pass
-
- def show(self, name = None):
- obj = self.obj_get('dummy')
- print 'Link Local Service'
- for item in obj.get_linklocal_services().get_linklocal_service_entry():
- print ' %s %s:%s %s:%s' %(item.get_linklocal_service_name(),
- item.get_linklocal_service_ip(),
- item.get_linklocal_service_port(),
- item.get_ip_fabric_service_ip()[0],
- item.get_ip_fabric_service_port())
-
- def add(self, name, link_local_addr, fabric_addr):
- obj = self.obj_get('dummy')
- list = obj.get_linklocal_services().get_linklocal_service_entry()
- list.append(vnc_api.LinklocalServiceEntryType(
- linklocal_service_name = name,
- linklocal_service_ip = link_local_addr.split(':')[0],
- linklocal_service_port = int(link_local_addr.split(':')[1]),
- ip_fabric_service_ip = [fabric_addr.split(':')[0]],
- ip_fabric_service_port = int(fabric_addr.split(':')[1])))
- self.vnc.global_vrouter_config_update(obj)
-
- def delete(self, name):
- obj = self.obj_get('dummy')
- list = obj.get_linklocal_services().get_linklocal_service_entry()
- for item in list:
- if (item.get_linklocal_service_name() == name):
- list.remove(item)
- break
- self.vnc.global_vrouter_config_update(obj)
-
-class ConfigClient():
- def __init__(self, username, password, tenant, region, api_server):
- self.vnc = vnc_api.VncApi(username = username, password = password,
- tenant_name = tenant, api_server_host = api_server)
- if config_nova:
- self.nova = novaclient.v1_1.client.Client(username = username,
- api_key = password, project_id = tenant,
- region_name = region,
- auth_url = 'http://%s:35357/v2.0' %(api_server))
- else:
- self.nova = None
- self.tenant = self.vnc.project_read(
- fq_name = ['default-domain', tenant])
-
diff --git a/Testcases/config_obj.pyc b/Testcases/config_obj.pyc
deleted file mode 100644
index 3b40b9b..0000000
--- a/Testcases/config_obj.pyc
+++ /dev/null
Binary files differ
diff --git a/Testcases/config_shell.py b/Testcases/config_shell.py
deleted file mode 100644
index 2fe5ad1..0000000
--- a/Testcases/config_shell.py
+++ /dev/null
@@ -1,379 +0,0 @@
-
-from config_obj import *
-import argparse
-
-class ConfigShell():
-
- def __init__(self):
- self.parser_init()
-
- def env(self, *args, **kwargs):
- for arg in args:
- value = os.environ.get(arg, None)
- if value:
- return value
- return kwargs.get('default', '')
-
- def do_help(self, args):
- if args.obj_parser:
- args.obj_parser.print_help()
- else:
- self.parser.print_help()
-
- def parser_init(self):
- parser = argparse.ArgumentParser()
- parser.add_argument('--username', help = 'User name')
- parser.add_argument('--password', help = 'Password')
- parser.add_argument('--tenant', help = 'Tenant name')
- parser.add_argument('--region', help = 'Region name')
- parser.add_argument('--api-server', help = 'API server address')
-
- parser.add_argument('cmd', choices = ['add', 'show', 'delete', 'help'],
- metavar = '<command>', help = '[ add | show | delete | help ]')
-
- subparsers = parser.add_subparsers(metavar = '<object>')
- self.sub_cmd_dict = {}
-
- sub_parser = subparsers.add_parser('vdns', help = 'Virtual DNS')
- sub_parser.set_defaults(obj_class = ConfigVirtualDns,
- obj_parser = sub_parser)
- sub_parser.add_argument('name', nargs = '?', default = None,
- metavar = '<name>', help = 'The name of virtual DNS')
- sub_parser.add_argument('--domain-name', metavar = '<name>',
- help = 'The name of DNS domain')
- sub_parser.add_argument('--record-order',
- choices = ['fixed', 'random', 'round-robin'],
- default = 'random', metavar = '<order>',
- help = 'The order of DNS records ' \
- '[ random | fixed | round-robin ]')
- sub_parser.add_argument('--next-dns', metavar = '<name>',
- help = 'The name of next virtual DNS service or ' \
- 'the IP address of DNS server reachable by fabric.')
-
- sub_parser = subparsers.add_parser('ipam', help = 'Network IPAM')
- sub_parser.set_defaults(obj_class = ConfigIpam,
- obj_parser = sub_parser)
- sub_parser.add_argument('name', nargs = '?', default = None,
- metavar = '<name>', help = 'The name of IPAM')
- sub_parser.add_argument('--dns-type',
- choices = ['none', 'default', 'tenant', 'virtual'],
- metavar = '<type>',
- help = 'The type of DNS service ' \
- '[ none | default | virtual | tenant ]')
- sub_parser.add_argument('--virtual-dns', metavar = '<name>',
- help = 'The name of virtual DNS service')
- sub_parser.add_argument('--tenant-dns', metavar = '<address>',
- action = 'append',
- help = 'The address of tenant DNS')
- sub_parser.add_argument('--domain-name', metavar = '<name>',
- help = 'The name of DNS domain')
- sub_parser.add_argument('--ntp-server', metavar = '<address>',
- help = 'The address of NTP server')
-
- sub_parser = subparsers.add_parser('policy', help = 'Network Policy')
- sub_parser.set_defaults(obj_class = ConfigPolicy,
- obj_parser = sub_parser)
- sub_parser.add_argument('name', nargs = '?', default = None,
- metavar = '<name>', help = 'The name of policy')
- sub_parser.add_argument('--rule', action = 'append',
- metavar = '<arguments>',
- help = 'Policy rule ' \
- 'direction=[ "<>" | ">" ],' \
- 'protocol=[ any | tcp | udp | icmp ],' \
- 'src-net=[ <name> | any ],' \
- 'dst-net=[ <name> | any ],' \
- 'src-port=[ <start>:<end> | any ],' \
- 'dst-port=[ <start>:<end> | any ],' \
- 'action=[ pass | deny | drop | reject | alert | ' \
- 'log | service ],' \
- 'service=<name>,' \
- 'index=<index>')
-
- sub_parser = subparsers.add_parser('security-group',
- help = 'Security Group')
- sub_parser.set_defaults(obj_class = ConfigSecurityGroup,
- obj_parser = sub_parser)
- sub_parser.add_argument('name', nargs = '?', default = None,
- metavar = '<name>', help = 'The name of security group')
- sub_parser.add_argument('--rule', metavar = '<index>',
- help = 'Rule index')
- sub_parser.add_argument('--direction',
- choices = ['ingress', 'egress'],
- metavar = '<direction>',
- help = 'Direction [ ingress | egress ]')
- sub_parser.add_argument('--protocol',
- choices = ['any', 'tcp', 'udp', 'icmp'],
- metavar = '<protocol>',
- help = 'Protocol [ any | tcp | udp | icmp ]')
- sub_parser.add_argument('--address', action = 'append',
- metavar = '<prefix>/<length>', help = 'Remote IP address')
- sub_parser.add_argument('--port', action = 'append', type = str,
- metavar = '<start>:<end>', help = 'The range of remote port')
-
- sub_parser = subparsers.add_parser('network',
- help = 'Virtual Network')
- sub_parser.set_defaults(obj_class = ConfigNetwork,
- obj_parser = sub_parser)
- sub_parser.add_argument('name', nargs = '?', default = None,
- metavar = '<name>', help = 'The name of virtual network')
- sub_parser.add_argument('--ipam', metavar = '<name>',
- help = 'The name of IPAM')
- sub_parser.add_argument('--subnet', metavar = '<prefix>/<length>',
- help = 'Subnet prefix and length')
- sub_parser.add_argument('--gateway', metavar = '<address>',
- help = 'The gateway address of subnet')
- sub_parser.add_argument('--policy', metavar = '<name>',
- help = 'The name of network policy')
- sub_parser.add_argument('--route-target', metavar = '<AS>:<RT>',
- help = 'Route target')
- sub_parser.add_argument('--route-table', metavar = '<name>',
- help = 'The name of route table')
- sub_parser.add_argument('--l2', action = 'store_true',
- help = 'Layer 2 network, layer 2&3 by default')
- sub_parser.add_argument('--shared', action = 'store_true',
- help = 'Enable sharing with other tenants')
- sub_parser.add_argument('--external', action = 'store_true',
- help = 'Enable external access')
-
- sub_parser = subparsers.add_parser('floating-ip-pool',
- help = 'Floating IP Pool')
- sub_parser.set_defaults(obj_class = ConfigFloatingIpPool,
- obj_parser = sub_parser)
- sub_parser.add_argument('name', nargs = '?', default = None,
- metavar = '<name>', help = 'The name of floating IP pool')
- sub_parser.add_argument('--network', metavar = '<name>',
- help = 'The name of virtual network holding floating IP pool')
- #sub_parser.add_argument('--floating-ip', action = 'store_true',
- # help = 'Floating IP')
-
- sub_parser = subparsers.add_parser('vm',
- help = 'Virtual Machine')
- sub_parser.set_defaults(obj_class = ConfigVirtualMachine,
- obj_parser = sub_parser)
- sub_parser.add_argument('name', nargs = '?', default = None,
- metavar = '<name>', help = 'The name of virtual machine')
- sub_parser.add_argument('--image', metavar = '<name>',
- help = 'The name of image')
- sub_parser.add_argument('--flavor', metavar = '<name>',
- help = 'The name of flavor')
- sub_parser.add_argument('--network', action = 'append',
- metavar = '<name>',
- help = 'The name of network')
- sub_parser.add_argument('--user-data', metavar = '<name>',
- help = 'Full file name containing user data')
- sub_parser.add_argument('--node', metavar = '<name>',
- help = 'The name of compute node')
- sub_parser.add_argument('--wait', action = 'store_true',
- help = 'Wait till VM is active')
-
- sub_parser = subparsers.add_parser('interface-route-table',
- help = 'Interface Route Table')
- sub_parser.set_defaults(obj_class = ConfigInterfaceRouteTable,
- obj_parser = sub_parser)
- sub_parser.add_argument('name', nargs = '?', default = None,
- metavar = '<name>', help = 'The name of interface route table')
- sub_parser.add_argument('--route', action = 'append',
- metavar = '<prefix>/<length>', help = 'Route')
-
- sub_parser = subparsers.add_parser('route-table',
- help = 'Network Route Table')
- sub_parser.set_defaults(obj_class = ConfigRouteTable,
- obj_parser = sub_parser)
- sub_parser.add_argument('name', nargs = '?', default = None,
- metavar = '<name>', help = 'The name of route table')
- sub_parser.add_argument('--route', action = 'append',
- metavar = '<prefix>/<length>:<next-hop>',
- help = 'The route and next-hop')
-
- sub_parser = subparsers.add_parser('vm-interface',
- help = 'Virtual Machine Interface')
- sub_parser.set_defaults(obj_class = ConfigVmInterface,
- obj_parser = sub_parser)
- sub_parser.add_argument('name', nargs = '?', default = None,
- metavar = '<VM>:<network>',
- help = 'The name of virtual machine interface')
- sub_parser.add_argument('--interface-route-table', metavar = '<name>',
- help = 'The name of interface route table')
- sub_parser.add_argument('--security-group', metavar = '<name>',
- help = 'The name of security group')
- sub_parser.add_argument('--address',
- metavar = '<address>',
- help = 'IP address')
- sub_parser.add_argument('--floating-ip',
- metavar = '<address>',
- help = 'Floating IP address [ any | <address> ]')
- sub_parser.add_argument('--floating-ip-pool',
- metavar = '<pool>',
- help = 'The floating IP pool to allocate a floating IP from ' \
- '<tenant>:<network>:<floating IP pool>')
-
- sub_parser = subparsers.add_parser('image',
- help = 'Virtual Machine Image')
- self.sub_cmd_dict['image'] = sub_parser
- sub_parser.set_defaults(obj_class = ConfigImage)
- sub_parser.add_argument('name', nargs = '?', default = None)
-
- sub_parser = subparsers.add_parser('flavor',
- help = 'Virtual Machine Flavor')
- self.sub_cmd_dict['flavor'] = sub_parser
- sub_parser.set_defaults(obj_class = ConfigFlavor)
- sub_parser.add_argument('name', nargs = '?', default = None)
-
- sub_parser = subparsers.add_parser('service-template',
- help = 'Service Template')
- sub_parser.set_defaults(obj_class = ConfigServiceTemplate,
- obj_parser = sub_parser)
- sub_parser.add_argument('name', nargs = '?', default = None,
- metavar = '<name>', help = 'The name of service template')
- sub_parser.add_argument('--mode',
- choices = ['transparent', 'in-network', 'in-network-nat'],
- metavar = '<mode>',
- help = 'Service mode ' \
- '[ transparent | in-network | in-network-nat ]')
- sub_parser.add_argument('--type',
- choices = ['firewall', 'analyzer'],
- metavar = '<type>',
- help = 'Service type [ firewall | analyzer ]')
- sub_parser.add_argument('--image', metavar = '<name>',
- help = 'The name of image')
- sub_parser.add_argument('--flavor', metavar = '<name>',
- help = 'The name of flavor')
- sub_parser.add_argument('--scale', action = 'store_true',
- help = 'Enable service scaling')
- sub_parser.add_argument('--interface',
- choices = ['management', 'left', 'right', 'other'],
- metavar = '<type>',
- action = 'append',
- help = 'Service interface ' \
- '[ management | left | right | other ]')
-
- sub_parser = subparsers.add_parser('service-instance',
- help = 'Service Instance')
- sub_parser.set_defaults(obj_class = ConfigServiceInstance,
- obj_parser = sub_parser)
- sub_parser.add_argument('name', nargs = '?', default = None,
- metavar = '<name>', help = 'The name of service instance')
- sub_parser.add_argument('--template',
- metavar = '<template>',
- help = 'Service template')
- sub_parser.add_argument('--network', action = 'append',
- metavar = '<arguments>',
- help = 'network=[ <name> | auto ],tenant=<name>,' \
- 'route=<prefix>/<length> ' \
- 'The network order must be the same as interface ' \
- 'order defined in service template.')
- sub_parser.add_argument('--scale-max',
- metavar = '<number>',
- help = 'The maximum number of instances')
- sub_parser.add_argument('--auto-policy', action = 'store_true',
- help = 'Enable automatic policy')
-
- sub_parser = subparsers.add_parser('link-local',
- help = 'Link Local Service')
- sub_parser.set_defaults(obj_class = ConfigGlobalVrouter,
- obj_parser = sub_parser)
- sub_parser.add_argument('name', nargs = '?', default = None,
- metavar = '<name>', help = 'The name of link local service')
- sub_parser.add_argument('--link-local-address',
- metavar = '<address>',
- help = 'Link Local service address and port ' \
- '<link local address>:<link local port>')
- sub_parser.add_argument('--fabric-address',
- metavar = '<address>',
- help = 'Fabric address and port ' \
- '<fabric address>:<fabric port>')
- self.parser = parser
-
- def parse(self, argv = None):
- args = self.parser.parse_args(args = argv)
- return args
-
- def run(self, args, client):
- obj = args.obj_class(client = client)
- if args.cmd == 'help':
- self.do_help(args)
- elif args.cmd == 'show':
- obj.show(args.name)
- elif args.cmd == 'add':
- if (args.obj_class == ConfigVirtualDns):
- obj.add(args.name, args.record_order, args.next_dns)
- elif (args.obj_class == ConfigIpam):
- obj.add(args.name, args.dns_type, args.virtual_dns,
- args.tenant_dns, args.domain_name, args.ntp_server)
- elif (args.obj_class == ConfigPolicy):
- obj.add(args.name, args.rule)
- elif (args.obj_class == ConfigSecurityGroup):
- obj.add(args.name, args.protocol, args.address, args.port,
- args.direction)
- elif (args.obj_class == ConfigNetwork):
- obj.add(args.name, args.ipam, args.subnet, args.policy,
- args.route_target, args.route_table, args.shared,
- args.external, args.l2)
- elif (args.obj_class == ConfigFloatingIpPool):
- obj.add(args.name, args.network)
- elif (args.obj_class == ConfigServiceTemplate):
- obj.add(args.name, args.mode, args.type, args.image,
- args.flavor, args.interface)
- elif (args.obj_class == ConfigServiceInstance):
- obj.add(args.name, args.template, args.network,
- args.auto_policy, args.scale_max)
- elif (args.obj_class == ConfigVirtualMachine):
- obj.add(args.name, args.image, args.flavor, args.network,
- args.node, args.user_data, args.wait)
- elif (args.obj_class == ConfigRouteTable):
- obj.add(args.name, args.route)
- elif (args.obj_class == ConfigInterfaceRouteTable):
- obj.add(args.name, args.route)
- elif (args.obj_class == ConfigVmInterface):
- obj.add(args.name, args.security_group,
- args.interface_route_table, args.address,
- args.floating_ip_pool, args.floating_ip)
- elif (args.obj_class == ConfigGlobalVrouter):
- obj.add(args.name, args.link_local_address,
- args.fabric_address)
- elif args.cmd == 'delete':
- if (args.obj_class == ConfigVirtualDns):
- obj.delete(args.name)
- elif (args.obj_class == ConfigIpam):
- obj.delete(args.name, args.domain_name)
- elif (args.obj_class == ConfigPolicy):
- obj.delete(args.name, args.rule)
- elif (args.obj_class == ConfigSecurityGroup):
- obj.delete(args.name, args.rule)
- elif (args.obj_class == ConfigNetwork):
- obj.delete(args.name, args.ipam, args.policy,
- args.route_target)
- elif (args.obj_class == ConfigFloatingIpPool):
- obj.delete(args.name, args.network)
- elif (args.obj_class == ConfigServiceTemplate):
- obj.delete(args.name)
- elif (args.obj_class == ConfigServiceInstance):
- obj.delete(args.name)
- elif (args.obj_class == ConfigVirtualMachine):
- obj.delete(args.name)
- elif (args.obj_class == ConfigRouteTable):
- obj.delete(args.name, args.route)
- elif (args.obj_class == ConfigInterfaceRouteTable):
- obj.delete(args.name, args.route)
- elif (args.obj_class == ConfigVmInterface):
- obj.delete(args.name, args.security_group,
- args.interface_route_table, args.address,
- args.floating_ip)
- elif (args.obj_class == ConfigGlobalVrouter):
- obj.delete(args.name)
- else:
- print 'Unknown action %s' %(args.cmd)
- return
-
- def main(self):
- args = self.parse()
- #print args
- #return
- client = ConfigClient(args.username, args.password, args.tenant,
- args.region, args.api_server)
- self.run(args, client)
-
-
-if __name__ == '__main__':
- ConfigShell().main()
-
diff --git a/Testcases/config_shell.pyc b/Testcases/config_shell.pyc
deleted file mode 100644
index 0c02e5d..0000000
--- a/Testcases/config_shell.pyc
+++ /dev/null
Binary files differ
diff --git a/Testcases/configuration.md b/Testcases/configuration.md
deleted file mode 100644
index 7ca3af2..0000000
--- a/Testcases/configuration.md
+++ /dev/null
@@ -1,666 +0,0 @@
-
-
-
-
-<!DOCTYPE html>
-<html lang="en" class="">
- <head prefix="og: http://ogp.me/ns# fb: http://ogp.me/ns/fb# object: http://ogp.me/ns/object# article: http://ogp.me/ns/article# profile: http://ogp.me/ns/profile#">
- <meta charset='utf-8'>
- <meta http-equiv="X-UA-Compatible" content="IE=edge">
- <meta http-equiv="Content-Language" content="en">
- <meta name="viewport" content="width=1020">
-
-
- <title>orch/configuration.md at master · tonyliu0592/orch · GitHub</title>
- <link rel="search" type="application/opensearchdescription+xml" href="/opensearch.xml" title="GitHub">
- <link rel="fluid-icon" href="https://github.com/fluidicon.png" title="GitHub">
- <link rel="apple-touch-icon" sizes="57x57" href="/apple-touch-icon-114.png">
- <link rel="apple-touch-icon" sizes="114x114" href="/apple-touch-icon-114.png">
- <link rel="apple-touch-icon" sizes="72x72" href="/apple-touch-icon-144.png">
- <link rel="apple-touch-icon" sizes="144x144" href="/apple-touch-icon-144.png">
- <meta property="fb:app_id" content="1401488693436528">
-
- <meta content="@github" name="twitter:site" /><meta content="summary" name="twitter:card" /><meta content="tonyliu0592/orch" name="twitter:title" /><meta content="Contribute to orch development by creating an account on GitHub." name="twitter:description" /><meta content="https://avatars2.githubusercontent.com/u/5355193?v=3&amp;s=400" name="twitter:image:src" />
- <meta content="GitHub" property="og:site_name" /><meta content="object" property="og:type" /><meta content="https://avatars2.githubusercontent.com/u/5355193?v=3&amp;s=400" property="og:image" /><meta content="tonyliu0592/orch" property="og:title" /><meta content="https://github.com/tonyliu0592/orch" property="og:url" /><meta content="Contribute to orch development by creating an account on GitHub." property="og:description" />
- <meta name="browser-stats-url" content="https://api.github.com/_private/browser/stats">
- <meta name="browser-errors-url" content="https://api.github.com/_private/browser/errors">
- <link rel="assets" href="https://assets-cdn.github.com/">
-
- <meta name="pjax-timeout" content="1000">
-
-
- <meta name="msapplication-TileImage" content="/windows-tile.png">
- <meta name="msapplication-TileColor" content="#ffffff">
- <meta name="selected-link" value="repo_source" data-pjax-transient>
-
- <meta name="google-site-verification" content="KT5gs8h0wvaagLKAVWq8bbeNwnZZK1r1XQysX3xurLU">
- <meta name="google-analytics" content="UA-3769691-2">
-
-<meta content="collector.githubapp.com" name="octolytics-host" /><meta content="github" name="octolytics-app-id" /><meta content="4281F10B:71AA:2871CE6:56A1141C" name="octolytics-dimension-request_id" />
-<meta content="/&lt;user-name&gt;/&lt;repo-name&gt;/blob/show" data-pjax-transient="true" name="analytics-location" />
-<meta content="Rails, view, blob#show" data-pjax-transient="true" name="analytics-event" />
-
-
- <meta class="js-ga-set" name="dimension1" content="Logged Out">
-
-
-
- <meta name="hostname" content="github.com">
- <meta name="user-login" content="">
-
- <meta name="expected-hostname" content="github.com">
-
- <link rel="mask-icon" href="https://assets-cdn.github.com/pinned-octocat.svg" color="#4078c0">
- <link rel="icon" type="image/x-icon" href="https://assets-cdn.github.com/favicon.ico">
-
- <meta content="57675af6296bdef5a5d3e30ee127f2b790b35ea2" name="form-nonce" />
-
- <link crossorigin="anonymous" href="https://assets-cdn.github.com/assets/github-e64d783fc73cc815bb639b1ee740d83c08b1a72e2955dbd871b5971946f6f73d.css" media="all" rel="stylesheet" />
- <link crossorigin="anonymous" href="https://assets-cdn.github.com/assets/github2-1c37ca748821e6e6c34a8627476defabae0bedd9629ef3720c56ca5cc2fa93bb.css" media="all" rel="stylesheet" />
-
-
-
-
- <meta http-equiv="x-pjax-version" content="415d0d3a23070d855ffb28c07c6e43c4">
-
-
- <meta name="description" content="Contribute to orch development by creating an account on GitHub.">
- <meta name="go-import" content="github.com/tonyliu0592/orch git https://github.com/tonyliu0592/orch.git">
-
- <meta content="5355193" name="octolytics-dimension-user_id" /><meta content="tonyliu0592" name="octolytics-dimension-user_login" /><meta content="21293312" name="octolytics-dimension-repository_id" /><meta content="tonyliu0592/orch" name="octolytics-dimension-repository_nwo" /><meta content="true" name="octolytics-dimension-repository_public" /><meta content="false" name="octolytics-dimension-repository_is_fork" /><meta content="21293312" name="octolytics-dimension-repository_network_root_id" /><meta content="tonyliu0592/orch" name="octolytics-dimension-repository_network_root_nwo" />
- <link href="https://github.com/tonyliu0592/orch/commits/master.atom" rel="alternate" title="Recent Commits to orch:master" type="application/atom+xml">
-
-
- <link rel="canonical" href="https://github.com/tonyliu0592/orch/blob/master/doc/configuration.md" data-pjax-transient>
- </head>
-
-
- <body class="logged_out env-production vis-public page-blob">
- <a href="#start-of-content" tabindex="1" class="accessibility-aid js-skip-to-content">Skip to content</a>
-
-
-
-
-
-
-
-
- <div class="header header-logged-out" role="banner">
- <div class="container clearfix">
-
- <a class="header-logo-wordmark" href="https://github.com/" data-ga-click="(Logged out) Header, go to homepage, icon:logo-wordmark">
- <span aria-hidden="true" class="mega-octicon octicon-logo-github"></span>
- </a>
-
- <div class="header-actions" role="navigation">
- <a class="btn btn-primary" href="/join" data-ga-click="(Logged out) Header, clicked Sign up, text:sign-up">Sign up</a>
- <a class="btn" href="/login?return_to=%2Ftonyliu0592%2Forch%2Fblob%2Fmaster%2Fdoc%2Fconfiguration.md" data-ga-click="(Logged out) Header, clicked Sign in, text:sign-in">Sign in</a>
- </div>
-
- <div class="site-search repo-scope js-site-search" role="search">
- <!-- </textarea> --><!-- '"` --><form accept-charset="UTF-8" action="/tonyliu0592/orch/search" class="js-site-search-form" data-global-search-url="/search" data-repo-search-url="/tonyliu0592/orch/search" method="get"><div style="margin:0;padding:0;display:inline"><input name="utf8" type="hidden" value="&#x2713;" /></div>
- <label class="js-chromeless-input-container form-control">
- <div class="scope-badge">This repository</div>
- <input type="text"
- class="js-site-search-focus js-site-search-field is-clearable chromeless-input"
- data-hotkey="s"
- name="q"
- placeholder="Search"
- aria-label="Search this repository"
- data-global-scope-placeholder="Search GitHub"
- data-repo-scope-placeholder="Search"
- tabindex="1"
- autocapitalize="off">
- </label>
-</form>
- </div>
-
- <ul class="header-nav left" role="navigation">
- <li class="header-nav-item">
- <a class="header-nav-link" href="/explore" data-ga-click="(Logged out) Header, go to explore, text:explore">Explore</a>
- </li>
- <li class="header-nav-item">
- <a class="header-nav-link" href="/features" data-ga-click="(Logged out) Header, go to features, text:features">Features</a>
- </li>
- <li class="header-nav-item">
- <a class="header-nav-link" href="https://enterprise.github.com/" data-ga-click="(Logged out) Header, go to enterprise, text:enterprise">Enterprise</a>
- </li>
- <li class="header-nav-item">
- <a class="header-nav-link" href="/pricing" data-ga-click="(Logged out) Header, go to pricing, text:pricing">Pricing</a>
- </li>
- </ul>
-
- </div>
-</div>
-
-
-
- <div id="start-of-content" class="accessibility-aid"></div>
-
- <div id="js-flash-container">
-</div>
-
-
- <div role="main" class="main-content">
- <div itemscope itemtype="http://schema.org/WebPage">
- <div id="js-repo-pjax-container" class="context-loader-container js-repo-nav-next" data-pjax-container>
-
-<div class="pagehead repohead instapaper_ignore readability-menu experiment-repo-nav">
- <div class="container repohead-details-container">
-
-
-
-<ul class="pagehead-actions">
-
- <li>
- <a href="/login?return_to=%2Ftonyliu0592%2Forch"
- class="btn btn-sm btn-with-count tooltipped tooltipped-n"
- aria-label="You must be signed in to watch a repository" rel="nofollow">
- <span aria-hidden="true" class="octicon octicon-eye"></span>
- Watch
- </a>
- <a class="social-count" href="/tonyliu0592/orch/watchers">
- 6
- </a>
-
- </li>
-
- <li>
- <a href="/login?return_to=%2Ftonyliu0592%2Forch"
- class="btn btn-sm btn-with-count tooltipped tooltipped-n"
- aria-label="You must be signed in to star a repository" rel="nofollow">
- <span aria-hidden="true" class="octicon octicon-star"></span>
- Star
- </a>
-
- <a class="social-count js-social-count" href="/tonyliu0592/orch/stargazers">
- 9
- </a>
-
- </li>
-
- <li>
- <a href="/login?return_to=%2Ftonyliu0592%2Forch"
- class="btn btn-sm btn-with-count tooltipped tooltipped-n"
- aria-label="You must be signed in to fork a repository" rel="nofollow">
- <span aria-hidden="true" class="octicon octicon-repo-forked"></span>
- Fork
- </a>
-
- <a href="/tonyliu0592/orch/network" class="social-count">
- 5
- </a>
- </li>
-</ul>
-
- <h1 itemscope itemtype="http://data-vocabulary.org/Breadcrumb" class="entry-title public ">
- <span aria-hidden="true" class="octicon octicon-repo"></span>
- <span class="author"><a href="/tonyliu0592" class="url fn" itemprop="url" rel="author"><span itemprop="title">tonyliu0592</span></a></span><!--
---><span class="path-divider">/</span><!--
---><strong><a href="/tonyliu0592/orch" data-pjax="#js-repo-pjax-container">orch</a></strong>
-
- <span class="page-context-loader">
- <img alt="" height="16" src="https://assets-cdn.github.com/images/spinners/octocat-spinner-32.gif" width="16" />
- </span>
-
-</h1>
-
- </div>
- <div class="container">
-
-<nav class="reponav js-repo-nav js-sidenav-container-pjax js-octicon-loaders"
- role="navigation"
- data-pjax="#js-repo-pjax-container">
-
- <a href="/tonyliu0592/orch" aria-label="Code" aria-selected="true" class="js-selected-navigation-item selected reponav-item" data-hotkey="g c" data-selected-links="repo_source repo_downloads repo_commits repo_releases repo_tags repo_branches /tonyliu0592/orch">
- <span aria-hidden="true" class="octicon octicon-code"></span>
- Code
-</a>
- <a href="/tonyliu0592/orch/issues" class="js-selected-navigation-item reponav-item" data-hotkey="g i" data-selected-links="repo_issues repo_labels repo_milestones /tonyliu0592/orch/issues">
- <span aria-hidden="true" class="octicon octicon-issue-opened"></span>
- Issues
- <span class="counter">1</span>
-</a>
- <a href="/tonyliu0592/orch/pulls" class="js-selected-navigation-item reponav-item" data-hotkey="g p" data-selected-links="repo_pulls /tonyliu0592/orch/pulls">
- <span aria-hidden="true" class="octicon octicon-git-pull-request"></span>
- Pull requests
- <span class="counter">0</span>
-</a>
- <a href="/tonyliu0592/orch/wiki" class="js-selected-navigation-item reponav-item" data-hotkey="g w" data-selected-links="repo_wiki /tonyliu0592/orch/wiki">
- <span aria-hidden="true" class="octicon octicon-book"></span>
- Wiki
-</a>
- <a href="/tonyliu0592/orch/pulse" class="js-selected-navigation-item reponav-item" data-selected-links="pulse /tonyliu0592/orch/pulse">
- <span aria-hidden="true" class="octicon octicon-pulse"></span>
- Pulse
-</a>
- <a href="/tonyliu0592/orch/graphs" class="js-selected-navigation-item reponav-item" data-selected-links="repo_graphs repo_contributors /tonyliu0592/orch/graphs">
- <span aria-hidden="true" class="octicon octicon-graph"></span>
- Graphs
-</a>
-
-</nav>
-
- </div>
-</div>
-
-<div class="container new-discussion-timeline experiment-repo-nav">
- <div class="repository-content">
-
-
-
-<a href="/tonyliu0592/orch/blob/6cfa26b93dfefdd661a64dd7e9dc295c652dfb61/doc/configuration.md" class="hidden js-permalink-shortcut" data-hotkey="y">Permalink</a>
-
-<!-- blob contrib key: blob_contributors:v21:85a62aa7de00e6c17c4fabaa95e47968 -->
-
-<div class="file-navigation js-zeroclipboard-container">
-
-<div class="select-menu js-menu-container js-select-menu left">
- <button class="btn btn-sm select-menu-button js-menu-target css-truncate" data-hotkey="w"
- title="master"
- type="button" aria-label="Switch branches or tags" tabindex="0" aria-haspopup="true">
- <i>Branch:</i>
- <span class="js-select-button css-truncate-target">master</span>
- </button>
-
- <div class="select-menu-modal-holder js-menu-content js-navigation-container" data-pjax aria-hidden="true">
-
- <div class="select-menu-modal">
- <div class="select-menu-header">
- <span aria-label="Close" class="octicon octicon-x js-menu-close" role="button"></span>
- <span class="select-menu-title">Switch branches/tags</span>
- </div>
-
- <div class="select-menu-filters">
- <div class="select-menu-text-filter">
- <input type="text" aria-label="Filter branches/tags" id="context-commitish-filter-field" class="js-filterable-field js-navigation-enable" placeholder="Filter branches/tags">
- </div>
- <div class="select-menu-tabs">
- <ul>
- <li class="select-menu-tab">
- <a href="#" data-tab-filter="branches" data-filter-placeholder="Filter branches/tags" class="js-select-menu-tab" role="tab">Branches</a>
- </li>
- <li class="select-menu-tab">
- <a href="#" data-tab-filter="tags" data-filter-placeholder="Find a tag…" class="js-select-menu-tab" role="tab">Tags</a>
- </li>
- </ul>
- </div>
- </div>
-
- <div class="select-menu-list select-menu-tab-bucket js-select-menu-tab-bucket" data-tab-filter="branches" role="menu">
-
- <div data-filterable-for="context-commitish-filter-field" data-filterable-type="substring">
-
-
- <a class="select-menu-item js-navigation-item js-navigation-open selected"
- href="/tonyliu0592/orch/blob/master/doc/configuration.md"
- data-name="master"
- data-skip-pjax="true"
- rel="nofollow">
- <span aria-hidden="true" class="octicon octicon-check select-menu-item-icon"></span>
- <span class="select-menu-item-text css-truncate-target" title="master">
- master
- </span>
- </a>
- </div>
-
- <div class="select-menu-no-results">Nothing to show</div>
- </div>
-
- <div class="select-menu-list select-menu-tab-bucket js-select-menu-tab-bucket" data-tab-filter="tags">
- <div data-filterable-for="context-commitish-filter-field" data-filterable-type="substring">
-
-
- </div>
-
- <div class="select-menu-no-results">Nothing to show</div>
- </div>
-
- </div>
- </div>
-</div>
-
- <div class="btn-group right">
- <a href="/tonyliu0592/orch/find/master"
- class="js-show-file-finder btn btn-sm"
- data-pjax
- data-hotkey="t">
- Find file
- </a>
- <button aria-label="Copy file path to clipboard" class="js-zeroclipboard btn btn-sm zeroclipboard-button tooltipped tooltipped-s" data-copied-hint="Copied!" type="button">Copy path</button>
- </div>
- <div class="breadcrumb js-zeroclipboard-target">
- <span class="repo-root js-repo-root"><span itemscope="" itemtype="http://data-vocabulary.org/Breadcrumb"><a href="/tonyliu0592/orch" class="" data-branch="master" data-pjax="true" itemscope="url"><span itemprop="title">orch</span></a></span></span><span class="separator">/</span><span itemscope="" itemtype="http://data-vocabulary.org/Breadcrumb"><a href="/tonyliu0592/orch/tree/master/doc" class="" data-branch="master" data-pjax="true" itemscope="url"><span itemprop="title">doc</span></a></span><span class="separator">/</span><strong class="final-path">configuration.md</strong>
- </div>
-</div>
-
-<include-fragment class="commit-tease" src="/tonyliu0592/orch/contributors/master/doc/configuration.md">
- <div>
- Fetching contributors&hellip;
- </div>
-
- <div class="commit-tease-contributors">
- <img alt="" class="loader-loading left" height="16" src="https://assets-cdn.github.com/images/spinners/octocat-spinner-32-EAF2F5.gif" width="16" />
- <span class="loader-error">Cannot retrieve contributors at this time</span>
- </div>
-</include-fragment>
-<div class="file">
- <div class="file-header">
- <div class="file-actions">
-
- <div class="btn-group">
- <a href="/tonyliu0592/orch/raw/master/doc/configuration.md" class="btn btn-sm " id="raw-url">Raw</a>
- <a href="/tonyliu0592/orch/blame/master/doc/configuration.md" class="btn btn-sm js-update-url-with-hash">Blame</a>
- <a href="/tonyliu0592/orch/commits/master/doc/configuration.md" class="btn btn-sm " rel="nofollow">History</a>
- </div>
-
-
- <button type="button" class="btn-octicon disabled tooltipped tooltipped-nw"
- aria-label="You must be signed in to make or propose changes">
- <span aria-hidden="true" class="octicon octicon-pencil"></span>
- </button>
- <button type="button" class="btn-octicon btn-octicon-danger disabled tooltipped tooltipped-nw"
- aria-label="You must be signed in to make or propose changes">
- <span aria-hidden="true" class="octicon octicon-trashcan"></span>
- </button>
- </div>
-
- <div class="file-info">
- 201 lines (153 sloc)
- <span class="file-info-divider"></span>
- 5.46 KB
- </div>
-</div>
-
-
- <div id="readme" class="blob instapaper_body">
- <article class="markdown-body entry-content" itemprop="mainContentOfPage"><h1><a id="user-content-command-line-utility-to-configure-contrail" class="anchor" href="#command-line-utility-to-configure-contrail" aria-hidden="true"><span aria-hidden="true" class="octicon octicon-link"></span></a>Command line utility to configure Contrail</h1>
-
-<h2><a id="user-content-files" class="anchor" href="#files" aria-hidden="true"><span aria-hidden="true" class="octicon octicon-link"></span></a>Files</h2>
-
-<pre><code>config
-config_shell.py
-config_obj.py
-</code></pre>
-
-<h2><a id="user-content-syntax" class="anchor" href="#syntax" aria-hidden="true"><span aria-hidden="true" class="octicon octicon-link"></span></a>Syntax</h2>
-
-<pre><code>config [access options] &lt;command&gt; &lt;object&gt; [name] [options]
-
- access options:
- Options to access API server of OpenStack and OpenContrail.
- --username &lt;user name&gt;
- --password &lt;user password&gt;
- --region &lt;region name&gt;
- --tenant &lt;tenant name&gt;
- --api-server &lt;API server IP address&gt;
-
- &lt;command&gt; &lt;object&gt; [name] [options]:
-
- add vdns &lt;name&gt;
- --domain-name &lt;name&gt;
- --record-order [ random | fixed | round-robin ]
- --next-dns &lt;name&gt;
-
- show vdns [name]
-
- delete vdns &lt;name&gt;
-
- add ipam &lt;name&gt;
- --dns-type [ none | default | virtual | tenant ]
- --virtual-dns &lt;virtual DNS&gt;
- --tenant-dns &lt;tenant DNS&gt;
- --domain-name &lt;domain name&gt;
- --ntp-server &lt;NTP server&gt;
-
- show ipam [name]
-
- delete ipam &lt;name&gt;
-
- add policy &lt;name&gt;
- --direction [ &lt;&gt; | &gt; ]
- --protocol [any | tcp | udp | icmp]
- --src-net &lt;source network&gt;
- --dst-net &lt;destination network&gt;
- --src-port &lt;start:end&gt;
- --dst-port &lt;start:end&gt;
- --action [ pass | deny | drop | reject | alert | log | service ]
- --service &lt;service&gt;
-
- show policy &lt;name&gt;
-
- delete policy &lt;name&gt;
- --rule &lt;rule index&gt;
-
- add security-group &lt;name&gt;
- --rule &lt;rule index&gt;
- --direction [ ingress | egress ]
- --protocol [any | tcp | udp | icmp]
- --address &lt;prefix/length&gt;
- --port &lt;start:end&gt;
-
- show security-group [name]
-
- delete security-group &lt;name&gt;
-
- add network &lt;name&gt;
- --ipam &lt;IPAM&gt;
- --subnet &lt;prefix/length&gt;
- --gateway &lt;gateway&gt;
- --policy &lt;policy&gt;
- --route-target &lt;route target&gt;
- --route-table &lt;route table&gt;
- --l2
-
- show network [name]
-
- delete network &lt;name&gt;
- --policy &lt;policy&gt;
- --route-target &lt;route target&gt;
- --route-table &lt;route table&gt;
-
- add floating-ip-pool &lt;network&gt;:&lt;pool&gt;
-
- show floating-ip-pool [&lt;network&gt;:&lt;pool&gt;]
-
- delete floating-ip-pool &lt;network&gt;:&lt;pool&gt;
-
- add vm &lt;name&gt;
- --image &lt;image&gt;
- --flavor &lt;flavor&gt;
- --network &lt;network&gt;
- --node &lt;node name&gt;
- --user-data &lt;file name&gt;
- --wait
-
- show vm [name]
-
- delete vm &lt;name&gt;
-
- add interface-route-table &lt;name&gt;
- --route &lt;prefix/length&gt;
-
- show interface-route-table [name]
-
- delete interface-route-table &lt;name&gt;
-
- add vm-interface &lt;VM&gt;:&lt;network&gt;
- --interface-route-table &lt;name&gt;
- --security-group &lt;name&gt;
- --floating-ip-pool &lt;tenant&gt;:&lt;network&gt;:&lt;pool&gt;
- --floating-ip any | &lt;IP&gt;
-
- show vm-interface &lt;VM&gt;:&lt;network&gt;
-
- delete vm-interface &lt;VM&gt;:&lt;network&gt;
- --interface-route-table &lt;name&gt;
- --security-group &lt;name&gt;
- --floating-ip
-
- add route-table &lt;name&gt;
- --route &lt;prefix/length:next-hop&gt;
-
- show route-table [name]
-
- delete route-table &lt;name&gt;
- --route &lt;prefix/length:next-hop&gt;
-
- add service-template &lt;name&gt;
- --mode [ transparent | in-network | in-network-nat ]
- --type [ firewall | analyzer ]
- --image &lt;name&gt;
- --flavor &lt;name&gt;
- --scale
- --interface-type [ management | left | right | other ]
-
- show service-template [name]
-
- delete service-template &lt;name&gt;
-
- add service-instance &lt;name&gt;
- --template &lt;name&gt;
- --management-network &lt;name&gt;
- --left-network &lt;name&gt;
- --left-route &lt;prefix/length&gt;
- --right-network &lt;name&gt;
- --right-route &lt;prefix/length&gt;
- --scale-max &lt;number&gt;
- --auto-policy
-
- show service-instance [name]
-
- delete service-instance &lt;name&gt;
-
- add link-local &lt;name&gt;
- --link-local-address &lt;link local address&gt;:&lt;link local port&gt;
- --fabric-address '&lt;fabric address&gt;:&lt;fabric port&gt;'
-
- show link-local [name]
-
- delete link-local &lt;name&gt;
-
- show image
-
- show flavor
-</code></pre>
-
-<h2><a id="user-content-examples" class="anchor" href="#examples" aria-hidden="true"><span aria-hidden="true" class="octicon octicon-link"></span></a>Examples</h2>
-
-<h3><a id="user-content-allocate-floating-ip-to-vm-interface" class="anchor" href="#allocate-floating-ip-to-vm-interface" aria-hidden="true"><span aria-hidden="true" class="octicon octicon-link"></span></a>Allocate floating IP to VM interface.</h3>
-
-<pre><code># config add ipam ipam-default
-# config add policy policy-default
-# config add network front-end --ipam ipam-default --subnet 192.168.1.0/24 --policy policy-default
-# config add network back-end --ipam ipam-default --subnet 192.168.1.0/24 --policy policy-default
-# config add vm server --image "CentOS 6.4 1-6" --flavor m1.small --network front-end
-# config add vm database --image "CentOS 6.4 1-6" --flavor m1.small --network back-end
-</code></pre>
-
-<h3><a id="user-content-allocate-floating-ip-to-vm-interface-1" class="anchor" href="#allocate-floating-ip-to-vm-interface-1" aria-hidden="true"><span aria-hidden="true" class="octicon octicon-link"></span></a>Allocate floating IP to VM interface.</h3>
-
-<pre><code># config add network public --ipam ipam-default --sbunet 10.8.10.0/24 --route-target 64512:10000
-# config add floating-ip-pool public-pool --network public
-# config add vm-interface server:front-end --floating-ip --floating-ip-pool public-pool
-</code></pre>
-
-<h3><a id="user-content-create-layer-3-service-template-and-service-instance" class="anchor" href="#create-layer-3-service-template-and-service-instance" aria-hidden="true"><span aria-hidden="true" class="octicon octicon-link"></span></a>Create layer-3 service template and service instance.</h3>
-
-<pre><code># config add service template vsrx-l3 --mode in-network --type firewall --image vsrx-12.1x47 --flavor m1.medium --interface-type management --interface-type left --interface-type right
-# config add service-instance vsrx-l3 --template vsrx-l2 --management-network management --left-network front-end --right-network backend
-# config add policy vsrx-l3 --src-net front-end --dst-net back-end --action service --service vsrx-l3
-# config add network front-end --policy vsrx-l3
-# config add network back-end --policy vsrx-l3
-</code></pre>
-
-<h3><a id="user-content-create-layer-2-service-template-and-service-instance" class="anchor" href="#create-layer-2-service-template-and-service-instance" aria-hidden="true"><span aria-hidden="true" class="octicon octicon-link"></span></a>Create layer-2 service template and service instance.</h3>
-
-<pre><code># config add service template vsrx-l2 --mode transparent --type firewall --image vsrx-12.1x47 --flavor m1.medium --interface-type management --interface-type left --interface-type right
-# config add service-instance vsrx-l2 --template vsrx-l2 --management-network management
-</code></pre>
-</article>
- </div>
-
-</div>
-
-<a href="#jump-to-line" rel="facebox[.linejump]" data-hotkey="l" style="display:none">Jump to Line</a>
-<div id="jump-to-line" style="display:none">
- <!-- </textarea> --><!-- '"` --><form accept-charset="UTF-8" action="" class="js-jump-to-line-form" method="get"><div style="margin:0;padding:0;display:inline"><input name="utf8" type="hidden" value="&#x2713;" /></div>
- <input class="linejump-input js-jump-to-line-field" type="text" placeholder="Jump to line&hellip;" aria-label="Jump to line" autofocus>
- <button type="submit" class="btn">Go</button>
-</form></div>
-
- </div>
- <div class="modal-backdrop"></div>
-</div>
-
- </div>
- </div>
-
- </div>
-
- <div class="container">
- <div class="site-footer" role="contentinfo">
- <ul class="site-footer-links right">
- <li><a href="https://status.github.com/" data-ga-click="Footer, go to status, text:status">Status</a></li>
- <li><a href="https://developer.github.com" data-ga-click="Footer, go to api, text:api">API</a></li>
- <li><a href="https://training.github.com" data-ga-click="Footer, go to training, text:training">Training</a></li>
- <li><a href="https://shop.github.com" data-ga-click="Footer, go to shop, text:shop">Shop</a></li>
- <li><a href="https://github.com/blog" data-ga-click="Footer, go to blog, text:blog">Blog</a></li>
- <li><a href="https://github.com/about" data-ga-click="Footer, go to about, text:about">About</a></li>
- <li><a href="https://github.com/pricing" data-ga-click="Footer, go to pricing, text:pricing">Pricing</a></li>
-
- </ul>
-
- <a href="https://github.com" aria-label="Homepage">
- <span aria-hidden="true" class="mega-octicon octicon-mark-github" title="GitHub "></span>
-</a>
- <ul class="site-footer-links">
- <li>&copy; 2016 <span title="0.04909s from github-fe122-cp1-prd.iad.github.net">GitHub</span>, Inc.</li>
- <li><a href="https://github.com/site/terms" data-ga-click="Footer, go to terms, text:terms">Terms</a></li>
- <li><a href="https://github.com/site/privacy" data-ga-click="Footer, go to privacy, text:privacy">Privacy</a></li>
- <li><a href="https://github.com/security" data-ga-click="Footer, go to security, text:security">Security</a></li>
- <li><a href="https://github.com/contact" data-ga-click="Footer, go to contact, text:contact">Contact</a></li>
- <li><a href="https://help.github.com" data-ga-click="Footer, go to help, text:help">Help</a></li>
- </ul>
- </div>
-</div>
-
-
-
-
-
-
-
- <div id="ajax-error-message" class="flash flash-error">
- <span aria-hidden="true" class="octicon octicon-alert"></span>
- <button type="button" class="flash-close js-flash-close js-ajax-error-dismiss" aria-label="Dismiss error">
- <span aria-hidden="true" class="octicon octicon-x"></span>
- </button>
- Something went wrong with that request. Please try again.
- </div>
-
-
- <script crossorigin="anonymous" src="https://assets-cdn.github.com/assets/compat-a0cee5d8d4fb535c0f41971d037b32e852a56ddca5bf67bb2124e426a2d813a5.js"></script>
- <script crossorigin="anonymous" src="https://assets-cdn.github.com/assets/frameworks-9ee55ceaf87fc34dc86334249fef6cbece88e815478e0fbe81642d57ed0fff89.js"></script>
- <script async="async" crossorigin="anonymous" src="https://assets-cdn.github.com/assets/github-dd514695678bb0ae0bcd4387cff3b88a0c2e8def9cb3cdf8b1a888fb19979467.js"></script>
-
-
-
- <div class="js-stale-session-flash stale-session-flash flash flash-warn flash-banner hidden">
- <span aria-hidden="true" class="octicon octicon-alert"></span>
- <span class="signed-in-tab-flash">You signed in with another tab or window. <a href="">Reload</a> to refresh your session.</span>
- <span class="signed-out-tab-flash">You signed out in another tab or window. <a href="">Reload</a> to refresh your session.</span>
- </div>
- <div class="facebox" id="facebox" style="display:none;">
- <div class="facebox-popup">
- <div class="facebox-content" role="dialog" aria-labelledby="facebox-header" aria-describedby="facebox-description">
- </div>
- <button type="button" class="facebox-close js-facebox-close" aria-label="Close modal">
- <span aria-hidden="true" class="octicon octicon-x"></span>
- </button>
- </div>
-</div>
-
- </body>
-</html>
-
diff --git a/Testcases/openstackrc b/Testcases/openstackrc
deleted file mode 100644
index 4253819..0000000
--- a/Testcases/openstackrc
+++ /dev/null
@@ -1,6 +0,0 @@
-export OS_PASSWORD=contrail123
-export OS_AUTH_URL=http://10.10.11.16:5000/v2.0/
-export OS_USERNAME=admin
-export OS_TENANT_NAME=user9
-export OS_NO_CACHE=1
-
diff --git a/Testcases/openstackrc.int b/Testcases/openstackrc.int
deleted file mode 100644
index 9037445..0000000
--- a/Testcases/openstackrc.int
+++ /dev/null
@@ -1,6 +0,0 @@
-export OS_PASSWORD=password
-export OS_AUTH_URL=http://172.16.50.143:5000/v2.0/
-export OS_USERNAME=admin
-export OS_TENANT_NAME=admin
-export OS_NO_CACHE=1
-
diff --git a/Testcases/vnc_api/__init__.py b/Testcases/vnc_api/__init__.py
deleted file mode 100644
index e69de29..0000000
--- a/Testcases/vnc_api/__init__.py
+++ /dev/null
diff --git a/Testcases/vnc_api/__init__.pyc b/Testcases/vnc_api/__init__.pyc
deleted file mode 100644
index 94501b0..0000000
--- a/Testcases/vnc_api/__init__.pyc
+++ /dev/null
Binary files differ
diff --git a/Testcases/vnc_api/common/__init__.py b/Testcases/vnc_api/common/__init__.py
deleted file mode 100644
index feaf215..0000000
--- a/Testcases/vnc_api/common/__init__.py
+++ /dev/null
@@ -1,50 +0,0 @@
-#
-# Copyright (c) 2013 Juniper Networks, Inc. All rights reserved.
-#
-
-import sys
-import re
-
-IP_FABRIC_VN_FQ_NAME = ['default-domain', 'default-project', 'ip-fabric']
-IP_FABRIC_RI_FQ_NAME = IP_FABRIC_VN_FQ_NAME + ['__default__']
-LINK_LOCAL_VN_FQ_NAME = ['default-domain', 'default-project', '__link_local__']
-LINK_LOCAL_RI_FQ_NAME = LINK_LOCAL_VN_FQ_NAME + ['__link_local__']
-SG_NO_RULE_NAME = "__no_rule__"
-SG_NO_RULE_FQ_NAME = ['default-domain', 'default-project', SG_NO_RULE_NAME]
-
-BGP_RTGT_MIN_ID = 8000000
-SGID_MIN_ALLOC = 8000000
-
-def obj_to_json(obj):
- return dict((k, v) for k, v in obj.__dict__.iteritems())
-#end obj_to_json
-
-def json_to_obj(obj):
- pass
-#end json_to_obj
-
-def ignore_exceptions(func):
- def wrapper(*args, **kwargs):
- try:
- return func(*args, **kwargs)
- except Exception as e:
- return None
- return wrapper
-# end ignore_exceptions
-
-_illegal_unichrs = [(0x00, 0x08), (0x0B, 0x0C), (0x0E, 0x1F),
- (0x7F, 0x84), (0x86, 0x9F),
- (0xFDD0, 0xFDDF), (0xFFFE, 0xFFFF)]
-if sys.maxunicode >= 0x10000: # not narrow build
- _illegal_unichrs.extend([(0x1FFFE, 0x1FFFF), (0x2FFFE, 0x2FFFF),
- (0x3FFFE, 0x3FFFF), (0x4FFFE, 0x4FFFF),
- (0x5FFFE, 0x5FFFF), (0x6FFFE, 0x6FFFF),
- (0x7FFFE, 0x7FFFF), (0x8FFFE, 0x8FFFF),
- (0x9FFFE, 0x9FFFF), (0xAFFFE, 0xAFFFF),
- (0xBFFFE, 0xBFFFF), (0xCFFFE, 0xCFFFF),
- (0xDFFFE, 0xDFFFF), (0xEFFFE, 0xEFFFF),
- (0xFFFFE, 0xFFFFF), (0x10FFFE, 0x10FFFF)])
-
-_illegal_ranges = ["%s-%s" % (unichr(low), unichr(high))
- for (low, high) in _illegal_unichrs]
-illegal_xml_chars_RE = re.compile(u'[%s]' % u''.join(_illegal_ranges))
diff --git a/Testcases/vnc_api/common/__init__.pyc b/Testcases/vnc_api/common/__init__.pyc
deleted file mode 100644
index a485c8c..0000000
--- a/Testcases/vnc_api/common/__init__.pyc
+++ /dev/null
Binary files differ
diff --git a/Testcases/vnc_api/common/exceptions.py b/Testcases/vnc_api/common/exceptions.py
deleted file mode 100644
index d9723a4..0000000
--- a/Testcases/vnc_api/common/exceptions.py
+++ /dev/null
@@ -1,133 +0,0 @@
-#
-# Copyright (c) 2013 Juniper Networks, Inc. All rights reserved.
-#
-# Base class of all exceptions in VNC
-
-
-class VncError(Exception):
- pass
-# end class VncError
-
-class ServiceUnavailableError(VncError):
- def __init__(self, code):
- self._reason_code = code
- # end __init__
-
- def __str__(self):
- return 'Service unavailable time out due to: %s' % (str(self._reason_code))
- # end __str__
-# end class ServiceUnavailableError
-
-class TimeOutError(VncError):
- def __init__(self, code):
- self._reason_code = code
- # end __init__
-
- def __str__(self):
- return 'Timed out due to: %s' % (str(self._reason_code))
- # end __str__
-# end class TimeOutError
-
-
-class BadRequest(Exception):
- def __init__(self, status_code, content):
- self.status_code = status_code
- self.content = content
- # end __init__
-
- def __str__(self):
- return 'HTTP Status: %s Content: %s' % (self.status_code, self.content)
- # end __str__
-# end class BadRequest
-
-
-class NoIdError(VncError):
-
- def __init__(self, unknown_id):
- self._unknown_id = unknown_id
- # end __init__
-
- def __str__(self):
- return 'Unknown id: %s' % (self._unknown_id)
- # end __str__
-# end class NoIdError
-
-
-class MaxRabbitPendingError(VncError):
-
- def __init__(self, npending):
- self._npending = npending
- # end __init__
-
- def __str__(self):
- return 'Too many pending updates to RabbitMQ: %s' % (self._npending)
- # end __str__
-# end class MaxRabbitPendingError
-
-class ResourceExistsError(VncError):
- def __init__(self, eexists_fq_name, eexists_id):
- self._eexists_fq_name = eexists_fq_name
- self._eexists_id = eexists_id
- # end __init__
-
- def __str__(self):
- return 'FQ Name: %s exists already with ID: %s' \
- % (self._eexists_fq_name, self._eexists_id)
- # end __str__
-# end class ResourceExistsError
-
-class ResourceTypeUnknownError(VncError):
- def __init__(self, obj_type):
- self._unknown_type = obj_type
- # end __init__
-
- def __str__(self):
- return 'Unknown object type: %s' %(self._unknown_type)
- # end __str__
-# end class ResourceTypeUnknownError
-
-class PermissionDenied(VncError):
- pass
-# end class PermissionDenied
-
-
-class RefsExistError(VncError):
- pass
-# end class RefsExistError
-
-
-class ResourceExhaustionError(VncError):
- pass
-# end class ResourceExhaustionError
-
-
-class NoUserAgentKey(VncError):
- pass
-# end class NoUserAgentKey
-
-
-class UnknownAuthMethod(VncError):
- pass
-# end class UnknownAuthMethod
-
-
-class HttpError(VncError):
-
- def __init__(self, status_code, content):
- self.status_code = status_code
- self.content = content
- # end __init__
-
- def __str__(self):
- return 'HTTP Status: %s Content: %s' % (self.status_code, self.content)
- # end __str__
-# end class HttpError
-
-
-class AmbiguousParentError(VncError):
- pass
-
-
-class InvalidSessionID(VncError):
- pass
-# end InvalidSessionID
diff --git a/Testcases/vnc_api/common/exceptions.pyc b/Testcases/vnc_api/common/exceptions.pyc
deleted file mode 100644
index 972c8fb..0000000
--- a/Testcases/vnc_api/common/exceptions.pyc
+++ /dev/null
Binary files differ
diff --git a/Testcases/vnc_api/common/rest.py b/Testcases/vnc_api/common/rest.py
deleted file mode 100644
index 7287f8d..0000000
--- a/Testcases/vnc_api/common/rest.py
+++ /dev/null
@@ -1,41 +0,0 @@
-#
-# Copyright (c) 2013 Juniper Networks, Inc. All rights reserved.
-#
-OP_POST = 1
-OP_GET = 2
-OP_PUT = 3
-OP_DELETE = 4
-
-
-def hdr_client_tenant():
- return 'X-Tenant-Name'
-# end hdr_tenant_client
-
-# TODO transform from client value
-
-
-def hdr_server_tenant():
- return 'HTTP_X_TENANT_NAME'
-# end hdr_tenant_server
-
-
-class LinkObject(object):
-
- def __init__(self, rel, base_url, uri, name):
- self.rel = rel
- self.base_url = base_url
- self.uri = uri
- self.name = name
- # end __init__
-
- def to_dict(self, with_url=None):
- if not with_url:
- url = self.base_url
- else:
- url = with_url
- return {'rel': self.rel,
- 'href': url + self.uri,
- 'name': self.name}
- # end to_dict
-
-# end class LinkObject
diff --git a/Testcases/vnc_api/common/rest.pyc b/Testcases/vnc_api/common/rest.pyc
deleted file mode 100644
index ed5532d..0000000
--- a/Testcases/vnc_api/common/rest.pyc
+++ /dev/null
Binary files differ
diff --git a/Testcases/vnc_api/gen/__init__.py b/Testcases/vnc_api/gen/__init__.py
deleted file mode 100644
index e69de29..0000000
--- a/Testcases/vnc_api/gen/__init__.py
+++ /dev/null
diff --git a/Testcases/vnc_api/gen/__init__.pyc b/Testcases/vnc_api/gen/__init__.pyc
deleted file mode 100644
index 48fbda3..0000000
--- a/Testcases/vnc_api/gen/__init__.pyc
+++ /dev/null
Binary files differ
diff --git a/Testcases/vnc_api/gen/cfixture.py b/Testcases/vnc_api/gen/cfixture.py
deleted file mode 100644
index 6e31f2a..0000000
--- a/Testcases/vnc_api/gen/cfixture.py
+++ /dev/null
@@ -1,46 +0,0 @@
-#
-# Copyright (c) 2013 Juniper Networks, Inc. All rights reserved.
-#
-import fixtures
-
-
-class ConrtailLink (object):
-
- def __init__(self, name, _from, _to, _type, fixtr):
- self._type = _type
- self._fixture = fixtr
- self._from = _from
- self._to = _to
- self._name = name
-
- def fixture(self):
- return self._fixture
-
-
-class ContrailFixture (fixtures.Fixture):
-
- def __init__(self):
- self._pdetails = {}
-
- def _get_link_dict(self):
- if '__links__' not in self._pdetails:
- self._pdetails['__links__'] = {}
- return self._pdetails['__links__']
-
- def _update_link_dict(self, lname):
- self._pdetails['__links__'][lname] = []
-
- def links(self):
- return self._get_link_dict().keys()
-
- def get_links(self, lname):
- return self._get_link_dict().get(lname, [])
-
- def add_link(self, lname, link):
- if not self.get_links(lname):
- self._update_link_dict(lname)
-
- return self.get_links(lname).append(link)
-
- def get_link_fixtures(self, lname):
- return map(lambda l: l.fixture(), self.get_links(lname))
diff --git a/Testcases/vnc_api/gen/cfixture.pyc b/Testcases/vnc_api/gen/cfixture.pyc
deleted file mode 100644
index a6310df..0000000
--- a/Testcases/vnc_api/gen/cfixture.pyc
+++ /dev/null
Binary files differ
diff --git a/Testcases/vnc_api/gen/connection_drv_gen.py b/Testcases/vnc_api/gen/connection_drv_gen.py
deleted file mode 100644
index aff1f49..0000000
--- a/Testcases/vnc_api/gen/connection_drv_gen.py
+++ /dev/null
@@ -1,2529 +0,0 @@
-
-# AUTO-GENERATED file from IFMapApiGenerator. Do Not Edit!
-
-import abc
-
-class ConnectionDriverBase(object):
- """
- This class provides type specific methods to create,
- read, update, delete and list objects from the server
- """
-
- __metaclass__ = abc.ABCMeta
-
- @abc.abstractmethod
- def __init__(self):
- pass
- #end __init__
- def domain_create(self, obj):
- """Create new domain.
-
- :param obj: :class:`.Domain` object
-
- """
- raise NotImplementedError, 'domain_create is %s\'s responsibility' % (str(type (self)))
- #end domain_create
-
- def domain_read(self, fq_name = None, fq_name_str = None, id = None, ifmap_id = None):
- """Return domain information.
-
- :param fq_name: Fully qualified name of domain
- :param fq_name_str: Fully qualified name string of domain
- :param id: UUID of domain
- :param ifmap_id: IFMAP id of domain
- :returns: :class:`.Domain` object
-
- """
- raise NotImplementedError, 'domain_read is %s\'s responsibility' % (str(type (self)))
- #end domain_read
-
- def domain_update(self, obj):
- """Update domain.
-
- :param obj: :class:`.Domain` object
-
- """
- raise NotImplementedError, 'domain_update is %s\'s responsibility' % (str(type (self)))
- #end domain_update
-
- def domains_list(self, parent_id = None, parent_fq_name = None, obj_uuids = None, fields = None, detail = False, count = False):
- """List all domains.
-
- :param parent_id: UUID of parent as optional search filter
- :param parent_fq_name: full qualified name of parent as optional search filter
- :returns: list of :class:`.Domain` objects
-
- """
- raise NotImplementedError, 'domains_list is %s\'s responsibility' % (str(type (self)))
- #end domains_list
-
- def domain_delete(self, fq_name = None, id = None, ifmap_id = None):
- """Delete domain from the system.
-
- :param fq_name: Fully qualified name of domain
- :param id: UUID of domain
- :param ifmap_id: IFMAP id of domain
-
- """
- raise NotImplementedError, 'domain_delete is %s\'s responsibility' % (str(type (self)))
- #end domain_delete
-
- def get_default_domain_id(self):
- """Return UUID of default domain."""
- raise NotImplementedError, 'get_default_domain_delete is %s\'s responsibility' % (str(type (self)))
- #end get_default_domain_delete
-
- def global_vrouter_config_create(self, obj):
- """Create new global-vrouter-config.
-
- :param obj: :class:`.GlobalVrouterConfig` object
-
- """
- raise NotImplementedError, 'global_vrouter_config_create is %s\'s responsibility' % (str(type (self)))
- #end global_vrouter_config_create
-
- def global_vrouter_config_read(self, fq_name = None, fq_name_str = None, id = None, ifmap_id = None):
- """Return global-vrouter-config information.
-
- :param fq_name: Fully qualified name of global-vrouter-config
- :param fq_name_str: Fully qualified name string of global-vrouter-config
- :param id: UUID of global-vrouter-config
- :param ifmap_id: IFMAP id of global-vrouter-config
- :returns: :class:`.GlobalVrouterConfig` object
-
- """
- raise NotImplementedError, 'global_vrouter_config_read is %s\'s responsibility' % (str(type (self)))
- #end global_vrouter_config_read
-
- def global_vrouter_config_update(self, obj):
- """Update global-vrouter-config.
-
- :param obj: :class:`.GlobalVrouterConfig` object
-
- """
- raise NotImplementedError, 'global_vrouter_config_update is %s\'s responsibility' % (str(type (self)))
- #end global_vrouter_config_update
-
- def global_vrouter_configs_list(self, parent_id = None, parent_fq_name = None, obj_uuids = None, fields = None, detail = False, count = False):
- """List all global-vrouter-configs.
-
- :param parent_id: UUID of parent as optional search filter
- :param parent_fq_name: full qualified name of parent as optional search filter
- :returns: list of :class:`.GlobalVrouterConfig` objects
-
- """
- raise NotImplementedError, 'global_vrouter_configs_list is %s\'s responsibility' % (str(type (self)))
- #end global_vrouter_configs_list
-
- def global_vrouter_config_delete(self, fq_name = None, id = None, ifmap_id = None):
- """Delete global-vrouter-config from the system.
-
- :param fq_name: Fully qualified name of global-vrouter-config
- :param id: UUID of global-vrouter-config
- :param ifmap_id: IFMAP id of global-vrouter-config
-
- """
- raise NotImplementedError, 'global_vrouter_config_delete is %s\'s responsibility' % (str(type (self)))
- #end global_vrouter_config_delete
-
- def get_default_global_vrouter_config_id(self):
- """Return UUID of default global-vrouter-config."""
- raise NotImplementedError, 'get_default_global_vrouter_config_delete is %s\'s responsibility' % (str(type (self)))
- #end get_default_global_vrouter_config_delete
-
- def instance_ip_create(self, obj):
- """Create new instance-ip.
-
- :param obj: :class:`.InstanceIp` object
-
- """
- raise NotImplementedError, 'instance_ip_create is %s\'s responsibility' % (str(type (self)))
- #end instance_ip_create
-
- def instance_ip_read(self, fq_name = None, fq_name_str = None, id = None, ifmap_id = None):
- """Return instance-ip information.
-
- :param fq_name: Fully qualified name of instance-ip
- :param fq_name_str: Fully qualified name string of instance-ip
- :param id: UUID of instance-ip
- :param ifmap_id: IFMAP id of instance-ip
- :returns: :class:`.InstanceIp` object
-
- """
- raise NotImplementedError, 'instance_ip_read is %s\'s responsibility' % (str(type (self)))
- #end instance_ip_read
-
- def instance_ip_update(self, obj):
- """Update instance-ip.
-
- :param obj: :class:`.InstanceIp` object
-
- """
- raise NotImplementedError, 'instance_ip_update is %s\'s responsibility' % (str(type (self)))
- #end instance_ip_update
-
- def instance_ips_list(self, back_ref_id = None, obj_uuids = None, fields = None, detail = False, count = False):
- """List all instance-ips."""
- raise NotImplementedError, 'instance_ips_list is %s\'s responsibility' % (str(type (self)))
- #end instance_ips_list
-
- def instance_ip_delete(self, fq_name = None, id = None, ifmap_id = None):
- """Delete instance-ip from the system.
-
- :param fq_name: Fully qualified name of instance-ip
- :param id: UUID of instance-ip
- :param ifmap_id: IFMAP id of instance-ip
-
- """
- raise NotImplementedError, 'instance_ip_delete is %s\'s responsibility' % (str(type (self)))
- #end instance_ip_delete
-
- def get_default_instance_ip_id(self):
- """Return UUID of default instance-ip."""
- raise NotImplementedError, 'get_default_instance_ip_delete is %s\'s responsibility' % (str(type (self)))
- #end get_default_instance_ip_delete
-
- def network_policy_create(self, obj):
- """Create new network-policy.
-
- :param obj: :class:`.NetworkPolicy` object
-
- """
- raise NotImplementedError, 'network_policy_create is %s\'s responsibility' % (str(type (self)))
- #end network_policy_create
-
- def network_policy_read(self, fq_name = None, fq_name_str = None, id = None, ifmap_id = None):
- """Return network-policy information.
-
- :param fq_name: Fully qualified name of network-policy
- :param fq_name_str: Fully qualified name string of network-policy
- :param id: UUID of network-policy
- :param ifmap_id: IFMAP id of network-policy
- :returns: :class:`.NetworkPolicy` object
-
- """
- raise NotImplementedError, 'network_policy_read is %s\'s responsibility' % (str(type (self)))
- #end network_policy_read
-
- def network_policy_update(self, obj):
- """Update network-policy.
-
- :param obj: :class:`.NetworkPolicy` object
-
- """
- raise NotImplementedError, 'network_policy_update is %s\'s responsibility' % (str(type (self)))
- #end network_policy_update
-
- def network_policys_list(self, parent_id = None, parent_fq_name = None, obj_uuids = None, fields = None, detail = False, count = False):
- """List all network-policys.
-
- :param parent_id: UUID of parent as optional search filter
- :param parent_fq_name: full qualified name of parent as optional search filter
- :returns: list of :class:`.NetworkPolicy` objects
-
- """
- raise NotImplementedError, 'network_policys_list is %s\'s responsibility' % (str(type (self)))
- #end network_policys_list
-
- def network_policy_delete(self, fq_name = None, id = None, ifmap_id = None):
- """Delete network-policy from the system.
-
- :param fq_name: Fully qualified name of network-policy
- :param id: UUID of network-policy
- :param ifmap_id: IFMAP id of network-policy
-
- """
- raise NotImplementedError, 'network_policy_delete is %s\'s responsibility' % (str(type (self)))
- #end network_policy_delete
-
- def get_default_network_policy_id(self):
- """Return UUID of default network-policy."""
- raise NotImplementedError, 'get_default_network_policy_delete is %s\'s responsibility' % (str(type (self)))
- #end get_default_network_policy_delete
-
- def loadbalancer_pool_create(self, obj):
- """Create new loadbalancer-pool.
-
- :param obj: :class:`.LoadbalancerPool` object
-
- """
- raise NotImplementedError, 'loadbalancer_pool_create is %s\'s responsibility' % (str(type (self)))
- #end loadbalancer_pool_create
-
- def loadbalancer_pool_read(self, fq_name = None, fq_name_str = None, id = None, ifmap_id = None):
- """Return loadbalancer-pool information.
-
- :param fq_name: Fully qualified name of loadbalancer-pool
- :param fq_name_str: Fully qualified name string of loadbalancer-pool
- :param id: UUID of loadbalancer-pool
- :param ifmap_id: IFMAP id of loadbalancer-pool
- :returns: :class:`.LoadbalancerPool` object
-
- """
- raise NotImplementedError, 'loadbalancer_pool_read is %s\'s responsibility' % (str(type (self)))
- #end loadbalancer_pool_read
-
- def loadbalancer_pool_update(self, obj):
- """Update loadbalancer-pool.
-
- :param obj: :class:`.LoadbalancerPool` object
-
- """
- raise NotImplementedError, 'loadbalancer_pool_update is %s\'s responsibility' % (str(type (self)))
- #end loadbalancer_pool_update
-
- def loadbalancer_pools_list(self, parent_id = None, parent_fq_name = None, back_ref_id = None, obj_uuids = None, fields = None, detail = False, count = False):
- """List all loadbalancer-pools.
-
- :param parent_id: UUID of parent as optional search filter
- :param parent_fq_name: full qualified name of parent as optional search filter
- :returns: list of :class:`.LoadbalancerPool` objects
-
- """
- raise NotImplementedError, 'loadbalancer_pools_list is %s\'s responsibility' % (str(type (self)))
- #end loadbalancer_pools_list
-
- def loadbalancer_pool_delete(self, fq_name = None, id = None, ifmap_id = None):
- """Delete loadbalancer-pool from the system.
-
- :param fq_name: Fully qualified name of loadbalancer-pool
- :param id: UUID of loadbalancer-pool
- :param ifmap_id: IFMAP id of loadbalancer-pool
-
- """
- raise NotImplementedError, 'loadbalancer_pool_delete is %s\'s responsibility' % (str(type (self)))
- #end loadbalancer_pool_delete
-
- def get_default_loadbalancer_pool_id(self):
- """Return UUID of default loadbalancer-pool."""
- raise NotImplementedError, 'get_default_loadbalancer_pool_delete is %s\'s responsibility' % (str(type (self)))
- #end get_default_loadbalancer_pool_delete
-
- def virtual_DNS_record_create(self, obj):
- """Create new virtual-DNS-record.
-
- :param obj: :class:`.VirtualDnsRecord` object
-
- """
- raise NotImplementedError, 'virtual_DNS_record_create is %s\'s responsibility' % (str(type (self)))
- #end virtual_DNS_record_create
-
- def virtual_DNS_record_read(self, fq_name = None, fq_name_str = None, id = None, ifmap_id = None):
- """Return virtual-DNS-record information.
-
- :param fq_name: Fully qualified name of virtual-DNS-record
- :param fq_name_str: Fully qualified name string of virtual-DNS-record
- :param id: UUID of virtual-DNS-record
- :param ifmap_id: IFMAP id of virtual-DNS-record
- :returns: :class:`.VirtualDnsRecord` object
-
- """
- raise NotImplementedError, 'virtual_DNS_record_read is %s\'s responsibility' % (str(type (self)))
- #end virtual_DNS_record_read
-
- def virtual_DNS_record_update(self, obj):
- """Update virtual-DNS-record.
-
- :param obj: :class:`.VirtualDnsRecord` object
-
- """
- raise NotImplementedError, 'virtual_DNS_record_update is %s\'s responsibility' % (str(type (self)))
- #end virtual_DNS_record_update
-
- def virtual_DNS_records_list(self, parent_id = None, parent_fq_name = None, obj_uuids = None, fields = None, detail = False, count = False):
- """List all virtual-DNS-records.
-
- :param parent_id: UUID of parent as optional search filter
- :param parent_fq_name: full qualified name of parent as optional search filter
- :returns: list of :class:`.VirtualDnsRecord` objects
-
- """
- raise NotImplementedError, 'virtual_DNS_records_list is %s\'s responsibility' % (str(type (self)))
- #end virtual_DNS_records_list
-
- def virtual_DNS_record_delete(self, fq_name = None, id = None, ifmap_id = None):
- """Delete virtual-DNS-record from the system.
-
- :param fq_name: Fully qualified name of virtual-DNS-record
- :param id: UUID of virtual-DNS-record
- :param ifmap_id: IFMAP id of virtual-DNS-record
-
- """
- raise NotImplementedError, 'virtual_DNS_record_delete is %s\'s responsibility' % (str(type (self)))
- #end virtual_DNS_record_delete
-
- def get_default_virtual_DNS_record_id(self):
- """Return UUID of default virtual-DNS-record."""
- raise NotImplementedError, 'get_default_virtual_DNS_record_delete is %s\'s responsibility' % (str(type (self)))
- #end get_default_virtual_DNS_record_delete
-
- def route_target_create(self, obj):
- """Create new route-target.
-
- :param obj: :class:`.RouteTarget` object
-
- """
- raise NotImplementedError, 'route_target_create is %s\'s responsibility' % (str(type (self)))
- #end route_target_create
-
- def route_target_read(self, fq_name = None, fq_name_str = None, id = None, ifmap_id = None):
- """Return route-target information.
-
- :param fq_name: Fully qualified name of route-target
- :param fq_name_str: Fully qualified name string of route-target
- :param id: UUID of route-target
- :param ifmap_id: IFMAP id of route-target
- :returns: :class:`.RouteTarget` object
-
- """
- raise NotImplementedError, 'route_target_read is %s\'s responsibility' % (str(type (self)))
- #end route_target_read
-
- def route_target_update(self, obj):
- """Update route-target.
-
- :param obj: :class:`.RouteTarget` object
-
- """
- raise NotImplementedError, 'route_target_update is %s\'s responsibility' % (str(type (self)))
- #end route_target_update
-
- def route_targets_list(self, obj_uuids = None, fields = None, detail = False, count = False):
- """List all route-targets."""
- raise NotImplementedError, 'route_targets_list is %s\'s responsibility' % (str(type (self)))
- #end route_targets_list
-
- def route_target_delete(self, fq_name = None, id = None, ifmap_id = None):
- """Delete route-target from the system.
-
- :param fq_name: Fully qualified name of route-target
- :param id: UUID of route-target
- :param ifmap_id: IFMAP id of route-target
-
- """
- raise NotImplementedError, 'route_target_delete is %s\'s responsibility' % (str(type (self)))
- #end route_target_delete
-
- def get_default_route_target_id(self):
- """Return UUID of default route-target."""
- raise NotImplementedError, 'get_default_route_target_delete is %s\'s responsibility' % (str(type (self)))
- #end get_default_route_target_delete
-
- def floating_ip_create(self, obj):
- """Create new floating-ip.
-
- :param obj: :class:`.FloatingIp` object
-
- """
- raise NotImplementedError, 'floating_ip_create is %s\'s responsibility' % (str(type (self)))
- #end floating_ip_create
-
- def floating_ip_read(self, fq_name = None, fq_name_str = None, id = None, ifmap_id = None):
- """Return floating-ip information.
-
- :param fq_name: Fully qualified name of floating-ip
- :param fq_name_str: Fully qualified name string of floating-ip
- :param id: UUID of floating-ip
- :param ifmap_id: IFMAP id of floating-ip
- :returns: :class:`.FloatingIp` object
-
- """
- raise NotImplementedError, 'floating_ip_read is %s\'s responsibility' % (str(type (self)))
- #end floating_ip_read
-
- def floating_ip_update(self, obj):
- """Update floating-ip.
-
- :param obj: :class:`.FloatingIp` object
-
- """
- raise NotImplementedError, 'floating_ip_update is %s\'s responsibility' % (str(type (self)))
- #end floating_ip_update
-
- def floating_ips_list(self, parent_id = None, parent_fq_name = None, back_ref_id = None, obj_uuids = None, fields = None, detail = False, count = False):
- """List all floating-ips.
-
- :param parent_id: UUID of parent as optional search filter
- :param parent_fq_name: full qualified name of parent as optional search filter
- :returns: list of :class:`.FloatingIp` objects
-
- """
- raise NotImplementedError, 'floating_ips_list is %s\'s responsibility' % (str(type (self)))
- #end floating_ips_list
-
- def floating_ip_delete(self, fq_name = None, id = None, ifmap_id = None):
- """Delete floating-ip from the system.
-
- :param fq_name: Fully qualified name of floating-ip
- :param id: UUID of floating-ip
- :param ifmap_id: IFMAP id of floating-ip
-
- """
- raise NotImplementedError, 'floating_ip_delete is %s\'s responsibility' % (str(type (self)))
- #end floating_ip_delete
-
- def get_default_floating_ip_id(self):
- """Return UUID of default floating-ip."""
- raise NotImplementedError, 'get_default_floating_ip_delete is %s\'s responsibility' % (str(type (self)))
- #end get_default_floating_ip_delete
-
- def floating_ip_pool_create(self, obj):
- """Create new floating-ip-pool.
-
- :param obj: :class:`.FloatingIpPool` object
-
- """
- raise NotImplementedError, 'floating_ip_pool_create is %s\'s responsibility' % (str(type (self)))
- #end floating_ip_pool_create
-
- def floating_ip_pool_read(self, fq_name = None, fq_name_str = None, id = None, ifmap_id = None):
- """Return floating-ip-pool information.
-
- :param fq_name: Fully qualified name of floating-ip-pool
- :param fq_name_str: Fully qualified name string of floating-ip-pool
- :param id: UUID of floating-ip-pool
- :param ifmap_id: IFMAP id of floating-ip-pool
- :returns: :class:`.FloatingIpPool` object
-
- """
- raise NotImplementedError, 'floating_ip_pool_read is %s\'s responsibility' % (str(type (self)))
- #end floating_ip_pool_read
-
- def floating_ip_pool_update(self, obj):
- """Update floating-ip-pool.
-
- :param obj: :class:`.FloatingIpPool` object
-
- """
- raise NotImplementedError, 'floating_ip_pool_update is %s\'s responsibility' % (str(type (self)))
- #end floating_ip_pool_update
-
- def floating_ip_pools_list(self, parent_id = None, parent_fq_name = None, obj_uuids = None, fields = None, detail = False, count = False):
- """List all floating-ip-pools.
-
- :param parent_id: UUID of parent as optional search filter
- :param parent_fq_name: full qualified name of parent as optional search filter
- :returns: list of :class:`.FloatingIpPool` objects
-
- """
- raise NotImplementedError, 'floating_ip_pools_list is %s\'s responsibility' % (str(type (self)))
- #end floating_ip_pools_list
-
- def floating_ip_pool_delete(self, fq_name = None, id = None, ifmap_id = None):
- """Delete floating-ip-pool from the system.
-
- :param fq_name: Fully qualified name of floating-ip-pool
- :param id: UUID of floating-ip-pool
- :param ifmap_id: IFMAP id of floating-ip-pool
-
- """
- raise NotImplementedError, 'floating_ip_pool_delete is %s\'s responsibility' % (str(type (self)))
- #end floating_ip_pool_delete
-
- def get_default_floating_ip_pool_id(self):
- """Return UUID of default floating-ip-pool."""
- raise NotImplementedError, 'get_default_floating_ip_pool_delete is %s\'s responsibility' % (str(type (self)))
- #end get_default_floating_ip_pool_delete
-
- def physical_router_create(self, obj):
- """Create new physical-router.
-
- :param obj: :class:`.PhysicalRouter` object
-
- """
- raise NotImplementedError, 'physical_router_create is %s\'s responsibility' % (str(type (self)))
- #end physical_router_create
-
- def physical_router_read(self, fq_name = None, fq_name_str = None, id = None, ifmap_id = None):
- """Return physical-router information.
-
- :param fq_name: Fully qualified name of physical-router
- :param fq_name_str: Fully qualified name string of physical-router
- :param id: UUID of physical-router
- :param ifmap_id: IFMAP id of physical-router
- :returns: :class:`.PhysicalRouter` object
-
- """
- raise NotImplementedError, 'physical_router_read is %s\'s responsibility' % (str(type (self)))
- #end physical_router_read
-
- def physical_router_update(self, obj):
- """Update physical-router.
-
- :param obj: :class:`.PhysicalRouter` object
-
- """
- raise NotImplementedError, 'physical_router_update is %s\'s responsibility' % (str(type (self)))
- #end physical_router_update
-
- def physical_routers_list(self, parent_id = None, parent_fq_name = None, back_ref_id = None, obj_uuids = None, fields = None, detail = False, count = False):
- """List all physical-routers.
-
- :param parent_id: UUID of parent as optional search filter
- :param parent_fq_name: full qualified name of parent as optional search filter
- :returns: list of :class:`.PhysicalRouter` objects
-
- """
- raise NotImplementedError, 'physical_routers_list is %s\'s responsibility' % (str(type (self)))
- #end physical_routers_list
-
- def physical_router_delete(self, fq_name = None, id = None, ifmap_id = None):
- """Delete physical-router from the system.
-
- :param fq_name: Fully qualified name of physical-router
- :param id: UUID of physical-router
- :param ifmap_id: IFMAP id of physical-router
-
- """
- raise NotImplementedError, 'physical_router_delete is %s\'s responsibility' % (str(type (self)))
- #end physical_router_delete
-
- def get_default_physical_router_id(self):
- """Return UUID of default physical-router."""
- raise NotImplementedError, 'get_default_physical_router_delete is %s\'s responsibility' % (str(type (self)))
- #end get_default_physical_router_delete
-
- def bgp_router_create(self, obj):
- """Create new bgp-router.
-
- :param obj: :class:`.BgpRouter` object
-
- """
- raise NotImplementedError, 'bgp_router_create is %s\'s responsibility' % (str(type (self)))
- #end bgp_router_create
-
- def bgp_router_read(self, fq_name = None, fq_name_str = None, id = None, ifmap_id = None):
- """Return bgp-router information.
-
- :param fq_name: Fully qualified name of bgp-router
- :param fq_name_str: Fully qualified name string of bgp-router
- :param id: UUID of bgp-router
- :param ifmap_id: IFMAP id of bgp-router
- :returns: :class:`.BgpRouter` object
-
- """
- raise NotImplementedError, 'bgp_router_read is %s\'s responsibility' % (str(type (self)))
- #end bgp_router_read
-
- def bgp_router_update(self, obj):
- """Update bgp-router.
-
- :param obj: :class:`.BgpRouter` object
-
- """
- raise NotImplementedError, 'bgp_router_update is %s\'s responsibility' % (str(type (self)))
- #end bgp_router_update
-
- def bgp_routers_list(self, parent_id = None, parent_fq_name = None, back_ref_id = None, obj_uuids = None, fields = None, detail = False, count = False):
- """List all bgp-routers.
-
- :param parent_id: UUID of parent as optional search filter
- :param parent_fq_name: full qualified name of parent as optional search filter
- :returns: list of :class:`.BgpRouter` objects
-
- """
- raise NotImplementedError, 'bgp_routers_list is %s\'s responsibility' % (str(type (self)))
- #end bgp_routers_list
-
- def bgp_router_delete(self, fq_name = None, id = None, ifmap_id = None):
- """Delete bgp-router from the system.
-
- :param fq_name: Fully qualified name of bgp-router
- :param id: UUID of bgp-router
- :param ifmap_id: IFMAP id of bgp-router
-
- """
- raise NotImplementedError, 'bgp_router_delete is %s\'s responsibility' % (str(type (self)))
- #end bgp_router_delete
-
- def get_default_bgp_router_id(self):
- """Return UUID of default bgp-router."""
- raise NotImplementedError, 'get_default_bgp_router_delete is %s\'s responsibility' % (str(type (self)))
- #end get_default_bgp_router_delete
-
- def virtual_router_create(self, obj):
- """Create new virtual-router.
-
- :param obj: :class:`.VirtualRouter` object
-
- """
- raise NotImplementedError, 'virtual_router_create is %s\'s responsibility' % (str(type (self)))
- #end virtual_router_create
-
- def virtual_router_read(self, fq_name = None, fq_name_str = None, id = None, ifmap_id = None):
- """Return virtual-router information.
-
- :param fq_name: Fully qualified name of virtual-router
- :param fq_name_str: Fully qualified name string of virtual-router
- :param id: UUID of virtual-router
- :param ifmap_id: IFMAP id of virtual-router
- :returns: :class:`.VirtualRouter` object
-
- """
- raise NotImplementedError, 'virtual_router_read is %s\'s responsibility' % (str(type (self)))
- #end virtual_router_read
-
- def virtual_router_update(self, obj):
- """Update virtual-router.
-
- :param obj: :class:`.VirtualRouter` object
-
- """
- raise NotImplementedError, 'virtual_router_update is %s\'s responsibility' % (str(type (self)))
- #end virtual_router_update
-
- def virtual_routers_list(self, parent_id = None, parent_fq_name = None, back_ref_id = None, obj_uuids = None, fields = None, detail = False, count = False):
- """List all virtual-routers.
-
- :param parent_id: UUID of parent as optional search filter
- :param parent_fq_name: full qualified name of parent as optional search filter
- :returns: list of :class:`.VirtualRouter` objects
-
- """
- raise NotImplementedError, 'virtual_routers_list is %s\'s responsibility' % (str(type (self)))
- #end virtual_routers_list
-
- def virtual_router_delete(self, fq_name = None, id = None, ifmap_id = None):
- """Delete virtual-router from the system.
-
- :param fq_name: Fully qualified name of virtual-router
- :param id: UUID of virtual-router
- :param ifmap_id: IFMAP id of virtual-router
-
- """
- raise NotImplementedError, 'virtual_router_delete is %s\'s responsibility' % (str(type (self)))
- #end virtual_router_delete
-
- def get_default_virtual_router_id(self):
- """Return UUID of default virtual-router."""
- raise NotImplementedError, 'get_default_virtual_router_delete is %s\'s responsibility' % (str(type (self)))
- #end get_default_virtual_router_delete
-
- def config_root_create(self, obj):
- """Create new config-root.
-
- :param obj: :class:`.ConfigRoot` object
-
- """
- raise NotImplementedError, 'config_root_create is %s\'s responsibility' % (str(type (self)))
- #end config_root_create
-
- def config_root_read(self, fq_name = None, fq_name_str = None, id = None, ifmap_id = None):
- """Return config-root information.
-
- :param fq_name: Fully qualified name of config-root
- :param fq_name_str: Fully qualified name string of config-root
- :param id: UUID of config-root
- :param ifmap_id: IFMAP id of config-root
- :returns: :class:`.ConfigRoot` object
-
- """
- raise NotImplementedError, 'config_root_read is %s\'s responsibility' % (str(type (self)))
- #end config_root_read
-
- def config_root_update(self, obj):
- """Update config-root.
-
- :param obj: :class:`.ConfigRoot` object
-
- """
- raise NotImplementedError, 'config_root_update is %s\'s responsibility' % (str(type (self)))
- #end config_root_update
-
- def config_roots_list(self, obj_uuids = None, fields = None, detail = False, count = False):
- """List all config-roots."""
- raise NotImplementedError, 'config_roots_list is %s\'s responsibility' % (str(type (self)))
- #end config_roots_list
-
- def config_root_delete(self, fq_name = None, id = None, ifmap_id = None):
- """Delete config-root from the system.
-
- :param fq_name: Fully qualified name of config-root
- :param id: UUID of config-root
- :param ifmap_id: IFMAP id of config-root
-
- """
- raise NotImplementedError, 'config_root_delete is %s\'s responsibility' % (str(type (self)))
- #end config_root_delete
-
- def get_default_config_root_id(self):
- """Return UUID of default config-root."""
- raise NotImplementedError, 'get_default_config_root_delete is %s\'s responsibility' % (str(type (self)))
- #end get_default_config_root_delete
-
- def subnet_create(self, obj):
- """Create new subnet.
-
- :param obj: :class:`.Subnet` object
-
- """
- raise NotImplementedError, 'subnet_create is %s\'s responsibility' % (str(type (self)))
- #end subnet_create
-
- def subnet_read(self, fq_name = None, fq_name_str = None, id = None, ifmap_id = None):
- """Return subnet information.
-
- :param fq_name: Fully qualified name of subnet
- :param fq_name_str: Fully qualified name string of subnet
- :param id: UUID of subnet
- :param ifmap_id: IFMAP id of subnet
- :returns: :class:`.Subnet` object
-
- """
- raise NotImplementedError, 'subnet_read is %s\'s responsibility' % (str(type (self)))
- #end subnet_read
-
- def subnet_update(self, obj):
- """Update subnet.
-
- :param obj: :class:`.Subnet` object
-
- """
- raise NotImplementedError, 'subnet_update is %s\'s responsibility' % (str(type (self)))
- #end subnet_update
-
- def subnets_list(self, back_ref_id = None, obj_uuids = None, fields = None, detail = False, count = False):
- """List all subnets."""
- raise NotImplementedError, 'subnets_list is %s\'s responsibility' % (str(type (self)))
- #end subnets_list
-
- def subnet_delete(self, fq_name = None, id = None, ifmap_id = None):
- """Delete subnet from the system.
-
- :param fq_name: Fully qualified name of subnet
- :param id: UUID of subnet
- :param ifmap_id: IFMAP id of subnet
-
- """
- raise NotImplementedError, 'subnet_delete is %s\'s responsibility' % (str(type (self)))
- #end subnet_delete
-
- def get_default_subnet_id(self):
- """Return UUID of default subnet."""
- raise NotImplementedError, 'get_default_subnet_delete is %s\'s responsibility' % (str(type (self)))
- #end get_default_subnet_delete
-
- def global_system_config_create(self, obj):
- """Create new global-system-config.
-
- :param obj: :class:`.GlobalSystemConfig` object
-
- """
- raise NotImplementedError, 'global_system_config_create is %s\'s responsibility' % (str(type (self)))
- #end global_system_config_create
-
- def global_system_config_read(self, fq_name = None, fq_name_str = None, id = None, ifmap_id = None):
- """Return global-system-config information.
-
- :param fq_name: Fully qualified name of global-system-config
- :param fq_name_str: Fully qualified name string of global-system-config
- :param id: UUID of global-system-config
- :param ifmap_id: IFMAP id of global-system-config
- :returns: :class:`.GlobalSystemConfig` object
-
- """
- raise NotImplementedError, 'global_system_config_read is %s\'s responsibility' % (str(type (self)))
- #end global_system_config_read
-
- def global_system_config_update(self, obj):
- """Update global-system-config.
-
- :param obj: :class:`.GlobalSystemConfig` object
-
- """
- raise NotImplementedError, 'global_system_config_update is %s\'s responsibility' % (str(type (self)))
- #end global_system_config_update
-
- def global_system_configs_list(self, parent_id = None, parent_fq_name = None, back_ref_id = None, obj_uuids = None, fields = None, detail = False, count = False):
- """List all global-system-configs.
-
- :param parent_id: UUID of parent as optional search filter
- :param parent_fq_name: full qualified name of parent as optional search filter
- :returns: list of :class:`.GlobalSystemConfig` objects
-
- """
- raise NotImplementedError, 'global_system_configs_list is %s\'s responsibility' % (str(type (self)))
- #end global_system_configs_list
-
- def global_system_config_delete(self, fq_name = None, id = None, ifmap_id = None):
- """Delete global-system-config from the system.
-
- :param fq_name: Fully qualified name of global-system-config
- :param id: UUID of global-system-config
- :param ifmap_id: IFMAP id of global-system-config
-
- """
- raise NotImplementedError, 'global_system_config_delete is %s\'s responsibility' % (str(type (self)))
- #end global_system_config_delete
-
- def get_default_global_system_config_id(self):
- """Return UUID of default global-system-config."""
- raise NotImplementedError, 'get_default_global_system_config_delete is %s\'s responsibility' % (str(type (self)))
- #end get_default_global_system_config_delete
-
- def service_appliance_create(self, obj):
- """Create new service-appliance.
-
- :param obj: :class:`.ServiceAppliance` object
-
- """
- raise NotImplementedError, 'service_appliance_create is %s\'s responsibility' % (str(type (self)))
- #end service_appliance_create
-
- def service_appliance_read(self, fq_name = None, fq_name_str = None, id = None, ifmap_id = None):
- """Return service-appliance information.
-
- :param fq_name: Fully qualified name of service-appliance
- :param fq_name_str: Fully qualified name string of service-appliance
- :param id: UUID of service-appliance
- :param ifmap_id: IFMAP id of service-appliance
- :returns: :class:`.ServiceAppliance` object
-
- """
- raise NotImplementedError, 'service_appliance_read is %s\'s responsibility' % (str(type (self)))
- #end service_appliance_read
-
- def service_appliance_update(self, obj):
- """Update service-appliance.
-
- :param obj: :class:`.ServiceAppliance` object
-
- """
- raise NotImplementedError, 'service_appliance_update is %s\'s responsibility' % (str(type (self)))
- #end service_appliance_update
-
- def service_appliances_list(self, parent_id = None, parent_fq_name = None, obj_uuids = None, fields = None, detail = False, count = False):
- """List all service-appliances.
-
- :param parent_id: UUID of parent as optional search filter
- :param parent_fq_name: full qualified name of parent as optional search filter
- :returns: list of :class:`.ServiceAppliance` objects
-
- """
- raise NotImplementedError, 'service_appliances_list is %s\'s responsibility' % (str(type (self)))
- #end service_appliances_list
-
- def service_appliance_delete(self, fq_name = None, id = None, ifmap_id = None):
- """Delete service-appliance from the system.
-
- :param fq_name: Fully qualified name of service-appliance
- :param id: UUID of service-appliance
- :param ifmap_id: IFMAP id of service-appliance
-
- """
- raise NotImplementedError, 'service_appliance_delete is %s\'s responsibility' % (str(type (self)))
- #end service_appliance_delete
-
- def get_default_service_appliance_id(self):
- """Return UUID of default service-appliance."""
- raise NotImplementedError, 'get_default_service_appliance_delete is %s\'s responsibility' % (str(type (self)))
- #end get_default_service_appliance_delete
-
- def service_instance_create(self, obj):
- """Create new service-instance.
-
- :param obj: :class:`.ServiceInstance` object
-
- """
- raise NotImplementedError, 'service_instance_create is %s\'s responsibility' % (str(type (self)))
- #end service_instance_create
-
- def service_instance_read(self, fq_name = None, fq_name_str = None, id = None, ifmap_id = None):
- """Return service-instance information.
-
- :param fq_name: Fully qualified name of service-instance
- :param fq_name_str: Fully qualified name string of service-instance
- :param id: UUID of service-instance
- :param ifmap_id: IFMAP id of service-instance
- :returns: :class:`.ServiceInstance` object
-
- """
- raise NotImplementedError, 'service_instance_read is %s\'s responsibility' % (str(type (self)))
- #end service_instance_read
-
- def service_instance_update(self, obj):
- """Update service-instance.
-
- :param obj: :class:`.ServiceInstance` object
-
- """
- raise NotImplementedError, 'service_instance_update is %s\'s responsibility' % (str(type (self)))
- #end service_instance_update
-
- def service_instances_list(self, parent_id = None, parent_fq_name = None, back_ref_id = None, obj_uuids = None, fields = None, detail = False, count = False):
- """List all service-instances.
-
- :param parent_id: UUID of parent as optional search filter
- :param parent_fq_name: full qualified name of parent as optional search filter
- :returns: list of :class:`.ServiceInstance` objects
-
- """
- raise NotImplementedError, 'service_instances_list is %s\'s responsibility' % (str(type (self)))
- #end service_instances_list
-
- def service_instance_delete(self, fq_name = None, id = None, ifmap_id = None):
- """Delete service-instance from the system.
-
- :param fq_name: Fully qualified name of service-instance
- :param id: UUID of service-instance
- :param ifmap_id: IFMAP id of service-instance
-
- """
- raise NotImplementedError, 'service_instance_delete is %s\'s responsibility' % (str(type (self)))
- #end service_instance_delete
-
- def get_default_service_instance_id(self):
- """Return UUID of default service-instance."""
- raise NotImplementedError, 'get_default_service_instance_delete is %s\'s responsibility' % (str(type (self)))
- #end get_default_service_instance_delete
-
- def namespace_create(self, obj):
- """Create new namespace.
-
- :param obj: :class:`.Namespace` object
-
- """
- raise NotImplementedError, 'namespace_create is %s\'s responsibility' % (str(type (self)))
- #end namespace_create
-
- def namespace_read(self, fq_name = None, fq_name_str = None, id = None, ifmap_id = None):
- """Return namespace information.
-
- :param fq_name: Fully qualified name of namespace
- :param fq_name_str: Fully qualified name string of namespace
- :param id: UUID of namespace
- :param ifmap_id: IFMAP id of namespace
- :returns: :class:`.Namespace` object
-
- """
- raise NotImplementedError, 'namespace_read is %s\'s responsibility' % (str(type (self)))
- #end namespace_read
-
- def namespace_update(self, obj):
- """Update namespace.
-
- :param obj: :class:`.Namespace` object
-
- """
- raise NotImplementedError, 'namespace_update is %s\'s responsibility' % (str(type (self)))
- #end namespace_update
-
- def namespaces_list(self, parent_id = None, parent_fq_name = None, obj_uuids = None, fields = None, detail = False, count = False):
- """List all namespaces.
-
- :param parent_id: UUID of parent as optional search filter
- :param parent_fq_name: full qualified name of parent as optional search filter
- :returns: list of :class:`.Namespace` objects
-
- """
- raise NotImplementedError, 'namespaces_list is %s\'s responsibility' % (str(type (self)))
- #end namespaces_list
-
- def namespace_delete(self, fq_name = None, id = None, ifmap_id = None):
- """Delete namespace from the system.
-
- :param fq_name: Fully qualified name of namespace
- :param id: UUID of namespace
- :param ifmap_id: IFMAP id of namespace
-
- """
- raise NotImplementedError, 'namespace_delete is %s\'s responsibility' % (str(type (self)))
- #end namespace_delete
-
- def get_default_namespace_id(self):
- """Return UUID of default namespace."""
- raise NotImplementedError, 'get_default_namespace_delete is %s\'s responsibility' % (str(type (self)))
- #end get_default_namespace_delete
-
- def logical_interface_create(self, obj):
- """Create new logical-interface.
-
- :param obj: :class:`.LogicalInterface` object
-
- """
- raise NotImplementedError, 'logical_interface_create is %s\'s responsibility' % (str(type (self)))
- #end logical_interface_create
-
- def logical_interface_read(self, fq_name = None, fq_name_str = None, id = None, ifmap_id = None):
- """Return logical-interface information.
-
- :param fq_name: Fully qualified name of logical-interface
- :param fq_name_str: Fully qualified name string of logical-interface
- :param id: UUID of logical-interface
- :param ifmap_id: IFMAP id of logical-interface
- :returns: :class:`.LogicalInterface` object
-
- """
- raise NotImplementedError, 'logical_interface_read is %s\'s responsibility' % (str(type (self)))
- #end logical_interface_read
-
- def logical_interface_update(self, obj):
- """Update logical-interface.
-
- :param obj: :class:`.LogicalInterface` object
-
- """
- raise NotImplementedError, 'logical_interface_update is %s\'s responsibility' % (str(type (self)))
- #end logical_interface_update
-
- def logical_interfaces_list(self, parent_id = None, parent_fq_name = None, back_ref_id = None, obj_uuids = None, fields = None, detail = False, count = False):
- """List all logical-interfaces.
-
- :param parent_id: UUID of parent as optional search filter
- :param parent_fq_name: full qualified name of parent as optional search filter
- :returns: list of :class:`.LogicalInterface` objects
-
- """
- raise NotImplementedError, 'logical_interfaces_list is %s\'s responsibility' % (str(type (self)))
- #end logical_interfaces_list
-
- def logical_interface_delete(self, fq_name = None, id = None, ifmap_id = None):
- """Delete logical-interface from the system.
-
- :param fq_name: Fully qualified name of logical-interface
- :param id: UUID of logical-interface
- :param ifmap_id: IFMAP id of logical-interface
-
- """
- raise NotImplementedError, 'logical_interface_delete is %s\'s responsibility' % (str(type (self)))
- #end logical_interface_delete
-
- def get_default_logical_interface_id(self):
- """Return UUID of default logical-interface."""
- raise NotImplementedError, 'get_default_logical_interface_delete is %s\'s responsibility' % (str(type (self)))
- #end get_default_logical_interface_delete
-
- def route_table_create(self, obj):
- """Create new route-table.
-
- :param obj: :class:`.RouteTable` object
-
- """
- raise NotImplementedError, 'route_table_create is %s\'s responsibility' % (str(type (self)))
- #end route_table_create
-
- def route_table_read(self, fq_name = None, fq_name_str = None, id = None, ifmap_id = None):
- """Return route-table information.
-
- :param fq_name: Fully qualified name of route-table
- :param fq_name_str: Fully qualified name string of route-table
- :param id: UUID of route-table
- :param ifmap_id: IFMAP id of route-table
- :returns: :class:`.RouteTable` object
-
- """
- raise NotImplementedError, 'route_table_read is %s\'s responsibility' % (str(type (self)))
- #end route_table_read
-
- def route_table_update(self, obj):
- """Update route-table.
-
- :param obj: :class:`.RouteTable` object
-
- """
- raise NotImplementedError, 'route_table_update is %s\'s responsibility' % (str(type (self)))
- #end route_table_update
-
- def route_tables_list(self, parent_id = None, parent_fq_name = None, obj_uuids = None, fields = None, detail = False, count = False):
- """List all route-tables.
-
- :param parent_id: UUID of parent as optional search filter
- :param parent_fq_name: full qualified name of parent as optional search filter
- :returns: list of :class:`.RouteTable` objects
-
- """
- raise NotImplementedError, 'route_tables_list is %s\'s responsibility' % (str(type (self)))
- #end route_tables_list
-
- def route_table_delete(self, fq_name = None, id = None, ifmap_id = None):
- """Delete route-table from the system.
-
- :param fq_name: Fully qualified name of route-table
- :param id: UUID of route-table
- :param ifmap_id: IFMAP id of route-table
-
- """
- raise NotImplementedError, 'route_table_delete is %s\'s responsibility' % (str(type (self)))
- #end route_table_delete
-
- def get_default_route_table_id(self):
- """Return UUID of default route-table."""
- raise NotImplementedError, 'get_default_route_table_delete is %s\'s responsibility' % (str(type (self)))
- #end get_default_route_table_delete
-
- def physical_interface_create(self, obj):
- """Create new physical-interface.
-
- :param obj: :class:`.PhysicalInterface` object
-
- """
- raise NotImplementedError, 'physical_interface_create is %s\'s responsibility' % (str(type (self)))
- #end physical_interface_create
-
- def physical_interface_read(self, fq_name = None, fq_name_str = None, id = None, ifmap_id = None):
- """Return physical-interface information.
-
- :param fq_name: Fully qualified name of physical-interface
- :param fq_name_str: Fully qualified name string of physical-interface
- :param id: UUID of physical-interface
- :param ifmap_id: IFMAP id of physical-interface
- :returns: :class:`.PhysicalInterface` object
-
- """
- raise NotImplementedError, 'physical_interface_read is %s\'s responsibility' % (str(type (self)))
- #end physical_interface_read
-
- def physical_interface_update(self, obj):
- """Update physical-interface.
-
- :param obj: :class:`.PhysicalInterface` object
-
- """
- raise NotImplementedError, 'physical_interface_update is %s\'s responsibility' % (str(type (self)))
- #end physical_interface_update
-
- def physical_interfaces_list(self, parent_id = None, parent_fq_name = None, obj_uuids = None, fields = None, detail = False, count = False):
- """List all physical-interfaces.
-
- :param parent_id: UUID of parent as optional search filter
- :param parent_fq_name: full qualified name of parent as optional search filter
- :returns: list of :class:`.PhysicalInterface` objects
-
- """
- raise NotImplementedError, 'physical_interfaces_list is %s\'s responsibility' % (str(type (self)))
- #end physical_interfaces_list
-
- def physical_interface_delete(self, fq_name = None, id = None, ifmap_id = None):
- """Delete physical-interface from the system.
-
- :param fq_name: Fully qualified name of physical-interface
- :param id: UUID of physical-interface
- :param ifmap_id: IFMAP id of physical-interface
-
- """
- raise NotImplementedError, 'physical_interface_delete is %s\'s responsibility' % (str(type (self)))
- #end physical_interface_delete
-
- def get_default_physical_interface_id(self):
- """Return UUID of default physical-interface."""
- raise NotImplementedError, 'get_default_physical_interface_delete is %s\'s responsibility' % (str(type (self)))
- #end get_default_physical_interface_delete
-
- def access_control_list_create(self, obj):
- """Create new access-control-list.
-
- :param obj: :class:`.AccessControlList` object
-
- """
- raise NotImplementedError, 'access_control_list_create is %s\'s responsibility' % (str(type (self)))
- #end access_control_list_create
-
- def access_control_list_read(self, fq_name = None, fq_name_str = None, id = None, ifmap_id = None):
- """Return access-control-list information.
-
- :param fq_name: Fully qualified name of access-control-list
- :param fq_name_str: Fully qualified name string of access-control-list
- :param id: UUID of access-control-list
- :param ifmap_id: IFMAP id of access-control-list
- :returns: :class:`.AccessControlList` object
-
- """
- raise NotImplementedError, 'access_control_list_read is %s\'s responsibility' % (str(type (self)))
- #end access_control_list_read
-
- def access_control_list_update(self, obj):
- """Update access-control-list.
-
- :param obj: :class:`.AccessControlList` object
-
- """
- raise NotImplementedError, 'access_control_list_update is %s\'s responsibility' % (str(type (self)))
- #end access_control_list_update
-
- def access_control_lists_list(self, parent_id = None, parent_fq_name = None, obj_uuids = None, fields = None, detail = False, count = False):
- """List all access-control-lists.
-
- :param parent_id: UUID of parent as optional search filter
- :param parent_fq_name: full qualified name of parent as optional search filter
- :returns: list of :class:`.AccessControlList` objects
-
- """
- raise NotImplementedError, 'access_control_lists_list is %s\'s responsibility' % (str(type (self)))
- #end access_control_lists_list
-
- def access_control_list_delete(self, fq_name = None, id = None, ifmap_id = None):
- """Delete access-control-list from the system.
-
- :param fq_name: Fully qualified name of access-control-list
- :param id: UUID of access-control-list
- :param ifmap_id: IFMAP id of access-control-list
-
- """
- raise NotImplementedError, 'access_control_list_delete is %s\'s responsibility' % (str(type (self)))
- #end access_control_list_delete
-
- def get_default_access_control_list_id(self):
- """Return UUID of default access-control-list."""
- raise NotImplementedError, 'get_default_access_control_list_delete is %s\'s responsibility' % (str(type (self)))
- #end get_default_access_control_list_delete
-
- def analytics_node_create(self, obj):
- """Create new analytics-node.
-
- :param obj: :class:`.AnalyticsNode` object
-
- """
- raise NotImplementedError, 'analytics_node_create is %s\'s responsibility' % (str(type (self)))
- #end analytics_node_create
-
- def analytics_node_read(self, fq_name = None, fq_name_str = None, id = None, ifmap_id = None):
- """Return analytics-node information.
-
- :param fq_name: Fully qualified name of analytics-node
- :param fq_name_str: Fully qualified name string of analytics-node
- :param id: UUID of analytics-node
- :param ifmap_id: IFMAP id of analytics-node
- :returns: :class:`.AnalyticsNode` object
-
- """
- raise NotImplementedError, 'analytics_node_read is %s\'s responsibility' % (str(type (self)))
- #end analytics_node_read
-
- def analytics_node_update(self, obj):
- """Update analytics-node.
-
- :param obj: :class:`.AnalyticsNode` object
-
- """
- raise NotImplementedError, 'analytics_node_update is %s\'s responsibility' % (str(type (self)))
- #end analytics_node_update
-
- def analytics_nodes_list(self, parent_id = None, parent_fq_name = None, obj_uuids = None, fields = None, detail = False, count = False):
- """List all analytics-nodes.
-
- :param parent_id: UUID of parent as optional search filter
- :param parent_fq_name: full qualified name of parent as optional search filter
- :returns: list of :class:`.AnalyticsNode` objects
-
- """
- raise NotImplementedError, 'analytics_nodes_list is %s\'s responsibility' % (str(type (self)))
- #end analytics_nodes_list
-
- def analytics_node_delete(self, fq_name = None, id = None, ifmap_id = None):
- """Delete analytics-node from the system.
-
- :param fq_name: Fully qualified name of analytics-node
- :param id: UUID of analytics-node
- :param ifmap_id: IFMAP id of analytics-node
-
- """
- raise NotImplementedError, 'analytics_node_delete is %s\'s responsibility' % (str(type (self)))
- #end analytics_node_delete
-
- def get_default_analytics_node_id(self):
- """Return UUID of default analytics-node."""
- raise NotImplementedError, 'get_default_analytics_node_delete is %s\'s responsibility' % (str(type (self)))
- #end get_default_analytics_node_delete
-
- def virtual_DNS_create(self, obj):
- """Create new virtual-DNS.
-
- :param obj: :class:`.VirtualDns` object
-
- """
- raise NotImplementedError, 'virtual_DNS_create is %s\'s responsibility' % (str(type (self)))
- #end virtual_DNS_create
-
- def virtual_DNS_read(self, fq_name = None, fq_name_str = None, id = None, ifmap_id = None):
- """Return virtual-DNS information.
-
- :param fq_name: Fully qualified name of virtual-DNS
- :param fq_name_str: Fully qualified name string of virtual-DNS
- :param id: UUID of virtual-DNS
- :param ifmap_id: IFMAP id of virtual-DNS
- :returns: :class:`.VirtualDns` object
-
- """
- raise NotImplementedError, 'virtual_DNS_read is %s\'s responsibility' % (str(type (self)))
- #end virtual_DNS_read
-
- def virtual_DNS_update(self, obj):
- """Update virtual-DNS.
-
- :param obj: :class:`.VirtualDns` object
-
- """
- raise NotImplementedError, 'virtual_DNS_update is %s\'s responsibility' % (str(type (self)))
- #end virtual_DNS_update
-
- def virtual_DNSs_list(self, parent_id = None, parent_fq_name = None, obj_uuids = None, fields = None, detail = False, count = False):
- """List all virtual-DNSs.
-
- :param parent_id: UUID of parent as optional search filter
- :param parent_fq_name: full qualified name of parent as optional search filter
- :returns: list of :class:`.VirtualDns` objects
-
- """
- raise NotImplementedError, 'virtual_DNSs_list is %s\'s responsibility' % (str(type (self)))
- #end virtual_DNSs_list
-
- def virtual_DNS_delete(self, fq_name = None, id = None, ifmap_id = None):
- """Delete virtual-DNS from the system.
-
- :param fq_name: Fully qualified name of virtual-DNS
- :param id: UUID of virtual-DNS
- :param ifmap_id: IFMAP id of virtual-DNS
-
- """
- raise NotImplementedError, 'virtual_DNS_delete is %s\'s responsibility' % (str(type (self)))
- #end virtual_DNS_delete
-
- def get_default_virtual_DNS_id(self):
- """Return UUID of default virtual-DNS."""
- raise NotImplementedError, 'get_default_virtual_DNS_delete is %s\'s responsibility' % (str(type (self)))
- #end get_default_virtual_DNS_delete
-
- def customer_attachment_create(self, obj):
- """Create new customer-attachment.
-
- :param obj: :class:`.CustomerAttachment` object
-
- """
- raise NotImplementedError, 'customer_attachment_create is %s\'s responsibility' % (str(type (self)))
- #end customer_attachment_create
-
- def customer_attachment_read(self, fq_name = None, fq_name_str = None, id = None, ifmap_id = None):
- """Return customer-attachment information.
-
- :param fq_name: Fully qualified name of customer-attachment
- :param fq_name_str: Fully qualified name string of customer-attachment
- :param id: UUID of customer-attachment
- :param ifmap_id: IFMAP id of customer-attachment
- :returns: :class:`.CustomerAttachment` object
-
- """
- raise NotImplementedError, 'customer_attachment_read is %s\'s responsibility' % (str(type (self)))
- #end customer_attachment_read
-
- def customer_attachment_update(self, obj):
- """Update customer-attachment.
-
- :param obj: :class:`.CustomerAttachment` object
-
- """
- raise NotImplementedError, 'customer_attachment_update is %s\'s responsibility' % (str(type (self)))
- #end customer_attachment_update
-
- def customer_attachments_list(self, back_ref_id = None, obj_uuids = None, fields = None, detail = False, count = False):
- """List all customer-attachments."""
- raise NotImplementedError, 'customer_attachments_list is %s\'s responsibility' % (str(type (self)))
- #end customer_attachments_list
-
- def customer_attachment_delete(self, fq_name = None, id = None, ifmap_id = None):
- """Delete customer-attachment from the system.
-
- :param fq_name: Fully qualified name of customer-attachment
- :param id: UUID of customer-attachment
- :param ifmap_id: IFMAP id of customer-attachment
-
- """
- raise NotImplementedError, 'customer_attachment_delete is %s\'s responsibility' % (str(type (self)))
- #end customer_attachment_delete
-
- def get_default_customer_attachment_id(self):
- """Return UUID of default customer-attachment."""
- raise NotImplementedError, 'get_default_customer_attachment_delete is %s\'s responsibility' % (str(type (self)))
- #end get_default_customer_attachment_delete
-
- def service_appliance_set_create(self, obj):
- """Create new service-appliance-set.
-
- :param obj: :class:`.ServiceApplianceSet` object
-
- """
- raise NotImplementedError, 'service_appliance_set_create is %s\'s responsibility' % (str(type (self)))
- #end service_appliance_set_create
-
- def service_appliance_set_read(self, fq_name = None, fq_name_str = None, id = None, ifmap_id = None):
- """Return service-appliance-set information.
-
- :param fq_name: Fully qualified name of service-appliance-set
- :param fq_name_str: Fully qualified name string of service-appliance-set
- :param id: UUID of service-appliance-set
- :param ifmap_id: IFMAP id of service-appliance-set
- :returns: :class:`.ServiceApplianceSet` object
-
- """
- raise NotImplementedError, 'service_appliance_set_read is %s\'s responsibility' % (str(type (self)))
- #end service_appliance_set_read
-
- def service_appliance_set_update(self, obj):
- """Update service-appliance-set.
-
- :param obj: :class:`.ServiceApplianceSet` object
-
- """
- raise NotImplementedError, 'service_appliance_set_update is %s\'s responsibility' % (str(type (self)))
- #end service_appliance_set_update
-
- def service_appliance_sets_list(self, parent_id = None, parent_fq_name = None, obj_uuids = None, fields = None, detail = False, count = False):
- """List all service-appliance-sets.
-
- :param parent_id: UUID of parent as optional search filter
- :param parent_fq_name: full qualified name of parent as optional search filter
- :returns: list of :class:`.ServiceApplianceSet` objects
-
- """
- raise NotImplementedError, 'service_appliance_sets_list is %s\'s responsibility' % (str(type (self)))
- #end service_appliance_sets_list
-
- def service_appliance_set_delete(self, fq_name = None, id = None, ifmap_id = None):
- """Delete service-appliance-set from the system.
-
- :param fq_name: Fully qualified name of service-appliance-set
- :param id: UUID of service-appliance-set
- :param ifmap_id: IFMAP id of service-appliance-set
-
- """
- raise NotImplementedError, 'service_appliance_set_delete is %s\'s responsibility' % (str(type (self)))
- #end service_appliance_set_delete
-
- def get_default_service_appliance_set_id(self):
- """Return UUID of default service-appliance-set."""
- raise NotImplementedError, 'get_default_service_appliance_set_delete is %s\'s responsibility' % (str(type (self)))
- #end get_default_service_appliance_set_delete
-
- def config_node_create(self, obj):
- """Create new config-node.
-
- :param obj: :class:`.ConfigNode` object
-
- """
- raise NotImplementedError, 'config_node_create is %s\'s responsibility' % (str(type (self)))
- #end config_node_create
-
- def config_node_read(self, fq_name = None, fq_name_str = None, id = None, ifmap_id = None):
- """Return config-node information.
-
- :param fq_name: Fully qualified name of config-node
- :param fq_name_str: Fully qualified name string of config-node
- :param id: UUID of config-node
- :param ifmap_id: IFMAP id of config-node
- :returns: :class:`.ConfigNode` object
-
- """
- raise NotImplementedError, 'config_node_read is %s\'s responsibility' % (str(type (self)))
- #end config_node_read
-
- def config_node_update(self, obj):
- """Update config-node.
-
- :param obj: :class:`.ConfigNode` object
-
- """
- raise NotImplementedError, 'config_node_update is %s\'s responsibility' % (str(type (self)))
- #end config_node_update
-
- def config_nodes_list(self, parent_id = None, parent_fq_name = None, obj_uuids = None, fields = None, detail = False, count = False):
- """List all config-nodes.
-
- :param parent_id: UUID of parent as optional search filter
- :param parent_fq_name: full qualified name of parent as optional search filter
- :returns: list of :class:`.ConfigNode` objects
-
- """
- raise NotImplementedError, 'config_nodes_list is %s\'s responsibility' % (str(type (self)))
- #end config_nodes_list
-
- def config_node_delete(self, fq_name = None, id = None, ifmap_id = None):
- """Delete config-node from the system.
-
- :param fq_name: Fully qualified name of config-node
- :param id: UUID of config-node
- :param ifmap_id: IFMAP id of config-node
-
- """
- raise NotImplementedError, 'config_node_delete is %s\'s responsibility' % (str(type (self)))
- #end config_node_delete
-
- def get_default_config_node_id(self):
- """Return UUID of default config-node."""
- raise NotImplementedError, 'get_default_config_node_delete is %s\'s responsibility' % (str(type (self)))
- #end get_default_config_node_delete
-
- def qos_queue_create(self, obj):
- """Create new qos-queue.
-
- :param obj: :class:`.QosQueue` object
-
- """
- raise NotImplementedError, 'qos_queue_create is %s\'s responsibility' % (str(type (self)))
- #end qos_queue_create
-
- def qos_queue_read(self, fq_name = None, fq_name_str = None, id = None, ifmap_id = None):
- """Return qos-queue information.
-
- :param fq_name: Fully qualified name of qos-queue
- :param fq_name_str: Fully qualified name string of qos-queue
- :param id: UUID of qos-queue
- :param ifmap_id: IFMAP id of qos-queue
- :returns: :class:`.QosQueue` object
-
- """
- raise NotImplementedError, 'qos_queue_read is %s\'s responsibility' % (str(type (self)))
- #end qos_queue_read
-
- def qos_queue_update(self, obj):
- """Update qos-queue.
-
- :param obj: :class:`.QosQueue` object
-
- """
- raise NotImplementedError, 'qos_queue_update is %s\'s responsibility' % (str(type (self)))
- #end qos_queue_update
-
- def qos_queues_list(self, parent_id = None, parent_fq_name = None, obj_uuids = None, fields = None, detail = False, count = False):
- """List all qos-queues.
-
- :param parent_id: UUID of parent as optional search filter
- :param parent_fq_name: full qualified name of parent as optional search filter
- :returns: list of :class:`.QosQueue` objects
-
- """
- raise NotImplementedError, 'qos_queues_list is %s\'s responsibility' % (str(type (self)))
- #end qos_queues_list
-
- def qos_queue_delete(self, fq_name = None, id = None, ifmap_id = None):
- """Delete qos-queue from the system.
-
- :param fq_name: Fully qualified name of qos-queue
- :param id: UUID of qos-queue
- :param ifmap_id: IFMAP id of qos-queue
-
- """
- raise NotImplementedError, 'qos_queue_delete is %s\'s responsibility' % (str(type (self)))
- #end qos_queue_delete
-
- def get_default_qos_queue_id(self):
- """Return UUID of default qos-queue."""
- raise NotImplementedError, 'get_default_qos_queue_delete is %s\'s responsibility' % (str(type (self)))
- #end get_default_qos_queue_delete
-
- def virtual_machine_create(self, obj):
- """Create new virtual-machine.
-
- :param obj: :class:`.VirtualMachine` object
-
- """
- raise NotImplementedError, 'virtual_machine_create is %s\'s responsibility' % (str(type (self)))
- #end virtual_machine_create
-
- def virtual_machine_read(self, fq_name = None, fq_name_str = None, id = None, ifmap_id = None):
- """Return virtual-machine information.
-
- :param fq_name: Fully qualified name of virtual-machine
- :param fq_name_str: Fully qualified name string of virtual-machine
- :param id: UUID of virtual-machine
- :param ifmap_id: IFMAP id of virtual-machine
- :returns: :class:`.VirtualMachine` object
-
- """
- raise NotImplementedError, 'virtual_machine_read is %s\'s responsibility' % (str(type (self)))
- #end virtual_machine_read
-
- def virtual_machine_update(self, obj):
- """Update virtual-machine.
-
- :param obj: :class:`.VirtualMachine` object
-
- """
- raise NotImplementedError, 'virtual_machine_update is %s\'s responsibility' % (str(type (self)))
- #end virtual_machine_update
-
- def virtual_machines_list(self, back_ref_id = None, obj_uuids = None, fields = None, detail = False, count = False):
- """List all virtual-machines."""
- raise NotImplementedError, 'virtual_machines_list is %s\'s responsibility' % (str(type (self)))
- #end virtual_machines_list
-
- def virtual_machine_delete(self, fq_name = None, id = None, ifmap_id = None):
- """Delete virtual-machine from the system.
-
- :param fq_name: Fully qualified name of virtual-machine
- :param id: UUID of virtual-machine
- :param ifmap_id: IFMAP id of virtual-machine
-
- """
- raise NotImplementedError, 'virtual_machine_delete is %s\'s responsibility' % (str(type (self)))
- #end virtual_machine_delete
-
- def get_default_virtual_machine_id(self):
- """Return UUID of default virtual-machine."""
- raise NotImplementedError, 'get_default_virtual_machine_delete is %s\'s responsibility' % (str(type (self)))
- #end get_default_virtual_machine_delete
-
- def interface_route_table_create(self, obj):
- """Create new interface-route-table.
-
- :param obj: :class:`.InterfaceRouteTable` object
-
- """
- raise NotImplementedError, 'interface_route_table_create is %s\'s responsibility' % (str(type (self)))
- #end interface_route_table_create
-
- def interface_route_table_read(self, fq_name = None, fq_name_str = None, id = None, ifmap_id = None):
- """Return interface-route-table information.
-
- :param fq_name: Fully qualified name of interface-route-table
- :param fq_name_str: Fully qualified name string of interface-route-table
- :param id: UUID of interface-route-table
- :param ifmap_id: IFMAP id of interface-route-table
- :returns: :class:`.InterfaceRouteTable` object
-
- """
- raise NotImplementedError, 'interface_route_table_read is %s\'s responsibility' % (str(type (self)))
- #end interface_route_table_read
-
- def interface_route_table_update(self, obj):
- """Update interface-route-table.
-
- :param obj: :class:`.InterfaceRouteTable` object
-
- """
- raise NotImplementedError, 'interface_route_table_update is %s\'s responsibility' % (str(type (self)))
- #end interface_route_table_update
-
- def interface_route_tables_list(self, parent_id = None, parent_fq_name = None, obj_uuids = None, fields = None, detail = False, count = False):
- """List all interface-route-tables.
-
- :param parent_id: UUID of parent as optional search filter
- :param parent_fq_name: full qualified name of parent as optional search filter
- :returns: list of :class:`.InterfaceRouteTable` objects
-
- """
- raise NotImplementedError, 'interface_route_tables_list is %s\'s responsibility' % (str(type (self)))
- #end interface_route_tables_list
-
- def interface_route_table_delete(self, fq_name = None, id = None, ifmap_id = None):
- """Delete interface-route-table from the system.
-
- :param fq_name: Fully qualified name of interface-route-table
- :param id: UUID of interface-route-table
- :param ifmap_id: IFMAP id of interface-route-table
-
- """
- raise NotImplementedError, 'interface_route_table_delete is %s\'s responsibility' % (str(type (self)))
- #end interface_route_table_delete
-
- def get_default_interface_route_table_id(self):
- """Return UUID of default interface-route-table."""
- raise NotImplementedError, 'get_default_interface_route_table_delete is %s\'s responsibility' % (str(type (self)))
- #end get_default_interface_route_table_delete
-
- def service_template_create(self, obj):
- """Create new service-template.
-
- :param obj: :class:`.ServiceTemplate` object
-
- """
- raise NotImplementedError, 'service_template_create is %s\'s responsibility' % (str(type (self)))
- #end service_template_create
-
- def service_template_read(self, fq_name = None, fq_name_str = None, id = None, ifmap_id = None):
- """Return service-template information.
-
- :param fq_name: Fully qualified name of service-template
- :param fq_name_str: Fully qualified name string of service-template
- :param id: UUID of service-template
- :param ifmap_id: IFMAP id of service-template
- :returns: :class:`.ServiceTemplate` object
-
- """
- raise NotImplementedError, 'service_template_read is %s\'s responsibility' % (str(type (self)))
- #end service_template_read
-
- def service_template_update(self, obj):
- """Update service-template.
-
- :param obj: :class:`.ServiceTemplate` object
-
- """
- raise NotImplementedError, 'service_template_update is %s\'s responsibility' % (str(type (self)))
- #end service_template_update
-
- def service_templates_list(self, parent_id = None, parent_fq_name = None, obj_uuids = None, fields = None, detail = False, count = False):
- """List all service-templates.
-
- :param parent_id: UUID of parent as optional search filter
- :param parent_fq_name: full qualified name of parent as optional search filter
- :returns: list of :class:`.ServiceTemplate` objects
-
- """
- raise NotImplementedError, 'service_templates_list is %s\'s responsibility' % (str(type (self)))
- #end service_templates_list
-
- def service_template_delete(self, fq_name = None, id = None, ifmap_id = None):
- """Delete service-template from the system.
-
- :param fq_name: Fully qualified name of service-template
- :param id: UUID of service-template
- :param ifmap_id: IFMAP id of service-template
-
- """
- raise NotImplementedError, 'service_template_delete is %s\'s responsibility' % (str(type (self)))
- #end service_template_delete
-
- def get_default_service_template_id(self):
- """Return UUID of default service-template."""
- raise NotImplementedError, 'get_default_service_template_delete is %s\'s responsibility' % (str(type (self)))
- #end get_default_service_template_delete
-
- def virtual_ip_create(self, obj):
- """Create new virtual-ip.
-
- :param obj: :class:`.VirtualIp` object
-
- """
- raise NotImplementedError, 'virtual_ip_create is %s\'s responsibility' % (str(type (self)))
- #end virtual_ip_create
-
- def virtual_ip_read(self, fq_name = None, fq_name_str = None, id = None, ifmap_id = None):
- """Return virtual-ip information.
-
- :param fq_name: Fully qualified name of virtual-ip
- :param fq_name_str: Fully qualified name string of virtual-ip
- :param id: UUID of virtual-ip
- :param ifmap_id: IFMAP id of virtual-ip
- :returns: :class:`.VirtualIp` object
-
- """
- raise NotImplementedError, 'virtual_ip_read is %s\'s responsibility' % (str(type (self)))
- #end virtual_ip_read
-
- def virtual_ip_update(self, obj):
- """Update virtual-ip.
-
- :param obj: :class:`.VirtualIp` object
-
- """
- raise NotImplementedError, 'virtual_ip_update is %s\'s responsibility' % (str(type (self)))
- #end virtual_ip_update
-
- def virtual_ips_list(self, parent_id = None, parent_fq_name = None, back_ref_id = None, obj_uuids = None, fields = None, detail = False, count = False):
- """List all virtual-ips.
-
- :param parent_id: UUID of parent as optional search filter
- :param parent_fq_name: full qualified name of parent as optional search filter
- :returns: list of :class:`.VirtualIp` objects
-
- """
- raise NotImplementedError, 'virtual_ips_list is %s\'s responsibility' % (str(type (self)))
- #end virtual_ips_list
-
- def virtual_ip_delete(self, fq_name = None, id = None, ifmap_id = None):
- """Delete virtual-ip from the system.
-
- :param fq_name: Fully qualified name of virtual-ip
- :param id: UUID of virtual-ip
- :param ifmap_id: IFMAP id of virtual-ip
-
- """
- raise NotImplementedError, 'virtual_ip_delete is %s\'s responsibility' % (str(type (self)))
- #end virtual_ip_delete
-
- def get_default_virtual_ip_id(self):
- """Return UUID of default virtual-ip."""
- raise NotImplementedError, 'get_default_virtual_ip_delete is %s\'s responsibility' % (str(type (self)))
- #end get_default_virtual_ip_delete
-
- def loadbalancer_member_create(self, obj):
- """Create new loadbalancer-member.
-
- :param obj: :class:`.LoadbalancerMember` object
-
- """
- raise NotImplementedError, 'loadbalancer_member_create is %s\'s responsibility' % (str(type (self)))
- #end loadbalancer_member_create
-
- def loadbalancer_member_read(self, fq_name = None, fq_name_str = None, id = None, ifmap_id = None):
- """Return loadbalancer-member information.
-
- :param fq_name: Fully qualified name of loadbalancer-member
- :param fq_name_str: Fully qualified name string of loadbalancer-member
- :param id: UUID of loadbalancer-member
- :param ifmap_id: IFMAP id of loadbalancer-member
- :returns: :class:`.LoadbalancerMember` object
-
- """
- raise NotImplementedError, 'loadbalancer_member_read is %s\'s responsibility' % (str(type (self)))
- #end loadbalancer_member_read
-
- def loadbalancer_member_update(self, obj):
- """Update loadbalancer-member.
-
- :param obj: :class:`.LoadbalancerMember` object
-
- """
- raise NotImplementedError, 'loadbalancer_member_update is %s\'s responsibility' % (str(type (self)))
- #end loadbalancer_member_update
-
- def loadbalancer_members_list(self, parent_id = None, parent_fq_name = None, obj_uuids = None, fields = None, detail = False, count = False):
- """List all loadbalancer-members.
-
- :param parent_id: UUID of parent as optional search filter
- :param parent_fq_name: full qualified name of parent as optional search filter
- :returns: list of :class:`.LoadbalancerMember` objects
-
- """
- raise NotImplementedError, 'loadbalancer_members_list is %s\'s responsibility' % (str(type (self)))
- #end loadbalancer_members_list
-
- def loadbalancer_member_delete(self, fq_name = None, id = None, ifmap_id = None):
- """Delete loadbalancer-member from the system.
-
- :param fq_name: Fully qualified name of loadbalancer-member
- :param id: UUID of loadbalancer-member
- :param ifmap_id: IFMAP id of loadbalancer-member
-
- """
- raise NotImplementedError, 'loadbalancer_member_delete is %s\'s responsibility' % (str(type (self)))
- #end loadbalancer_member_delete
-
- def get_default_loadbalancer_member_id(self):
- """Return UUID of default loadbalancer-member."""
- raise NotImplementedError, 'get_default_loadbalancer_member_delete is %s\'s responsibility' % (str(type (self)))
- #end get_default_loadbalancer_member_delete
-
- def security_group_create(self, obj):
- """Create new security-group.
-
- :param obj: :class:`.SecurityGroup` object
-
- """
- raise NotImplementedError, 'security_group_create is %s\'s responsibility' % (str(type (self)))
- #end security_group_create
-
- def security_group_read(self, fq_name = None, fq_name_str = None, id = None, ifmap_id = None):
- """Return security-group information.
-
- :param fq_name: Fully qualified name of security-group
- :param fq_name_str: Fully qualified name string of security-group
- :param id: UUID of security-group
- :param ifmap_id: IFMAP id of security-group
- :returns: :class:`.SecurityGroup` object
-
- """
- raise NotImplementedError, 'security_group_read is %s\'s responsibility' % (str(type (self)))
- #end security_group_read
-
- def security_group_update(self, obj):
- """Update security-group.
-
- :param obj: :class:`.SecurityGroup` object
-
- """
- raise NotImplementedError, 'security_group_update is %s\'s responsibility' % (str(type (self)))
- #end security_group_update
-
- def security_groups_list(self, parent_id = None, parent_fq_name = None, obj_uuids = None, fields = None, detail = False, count = False):
- """List all security-groups.
-
- :param parent_id: UUID of parent as optional search filter
- :param parent_fq_name: full qualified name of parent as optional search filter
- :returns: list of :class:`.SecurityGroup` objects
-
- """
- raise NotImplementedError, 'security_groups_list is %s\'s responsibility' % (str(type (self)))
- #end security_groups_list
-
- def security_group_delete(self, fq_name = None, id = None, ifmap_id = None):
- """Delete security-group from the system.
-
- :param fq_name: Fully qualified name of security-group
- :param id: UUID of security-group
- :param ifmap_id: IFMAP id of security-group
-
- """
- raise NotImplementedError, 'security_group_delete is %s\'s responsibility' % (str(type (self)))
- #end security_group_delete
-
- def get_default_security_group_id(self):
- """Return UUID of default security-group."""
- raise NotImplementedError, 'get_default_security_group_delete is %s\'s responsibility' % (str(type (self)))
- #end get_default_security_group_delete
-
- def provider_attachment_create(self, obj):
- """Create new provider-attachment.
-
- :param obj: :class:`.ProviderAttachment` object
-
- """
- raise NotImplementedError, 'provider_attachment_create is %s\'s responsibility' % (str(type (self)))
- #end provider_attachment_create
-
- def provider_attachment_read(self, fq_name = None, fq_name_str = None, id = None, ifmap_id = None):
- """Return provider-attachment information.
-
- :param fq_name: Fully qualified name of provider-attachment
- :param fq_name_str: Fully qualified name string of provider-attachment
- :param id: UUID of provider-attachment
- :param ifmap_id: IFMAP id of provider-attachment
- :returns: :class:`.ProviderAttachment` object
-
- """
- raise NotImplementedError, 'provider_attachment_read is %s\'s responsibility' % (str(type (self)))
- #end provider_attachment_read
-
- def provider_attachment_update(self, obj):
- """Update provider-attachment.
-
- :param obj: :class:`.ProviderAttachment` object
-
- """
- raise NotImplementedError, 'provider_attachment_update is %s\'s responsibility' % (str(type (self)))
- #end provider_attachment_update
-
- def provider_attachments_list(self, back_ref_id = None, obj_uuids = None, fields = None, detail = False, count = False):
- """List all provider-attachments."""
- raise NotImplementedError, 'provider_attachments_list is %s\'s responsibility' % (str(type (self)))
- #end provider_attachments_list
-
- def provider_attachment_delete(self, fq_name = None, id = None, ifmap_id = None):
- """Delete provider-attachment from the system.
-
- :param fq_name: Fully qualified name of provider-attachment
- :param id: UUID of provider-attachment
- :param ifmap_id: IFMAP id of provider-attachment
-
- """
- raise NotImplementedError, 'provider_attachment_delete is %s\'s responsibility' % (str(type (self)))
- #end provider_attachment_delete
-
- def get_default_provider_attachment_id(self):
- """Return UUID of default provider-attachment."""
- raise NotImplementedError, 'get_default_provider_attachment_delete is %s\'s responsibility' % (str(type (self)))
- #end get_default_provider_attachment_delete
-
- def virtual_machine_interface_create(self, obj):
- """Create new virtual-machine-interface.
-
- :param obj: :class:`.VirtualMachineInterface` object
-
- """
- raise NotImplementedError, 'virtual_machine_interface_create is %s\'s responsibility' % (str(type (self)))
- #end virtual_machine_interface_create
-
- def virtual_machine_interface_read(self, fq_name = None, fq_name_str = None, id = None, ifmap_id = None):
- """Return virtual-machine-interface information.
-
- :param fq_name: Fully qualified name of virtual-machine-interface
- :param fq_name_str: Fully qualified name string of virtual-machine-interface
- :param id: UUID of virtual-machine-interface
- :param ifmap_id: IFMAP id of virtual-machine-interface
- :returns: :class:`.VirtualMachineInterface` object
-
- """
- raise NotImplementedError, 'virtual_machine_interface_read is %s\'s responsibility' % (str(type (self)))
- #end virtual_machine_interface_read
-
- def virtual_machine_interface_update(self, obj):
- """Update virtual-machine-interface.
-
- :param obj: :class:`.VirtualMachineInterface` object
-
- """
- raise NotImplementedError, 'virtual_machine_interface_update is %s\'s responsibility' % (str(type (self)))
- #end virtual_machine_interface_update
-
- def virtual_machine_interfaces_list(self, parent_id = None, parent_fq_name = None, back_ref_id = None, obj_uuids = None, fields = None, detail = False, count = False):
- """List all virtual-machine-interfaces.
-
- :param parent_id: UUID of parent as optional search filter
- :param parent_fq_name: full qualified name of parent as optional search filter
- :returns: list of :class:`.VirtualMachineInterface` objects
-
- """
- raise NotImplementedError, 'virtual_machine_interfaces_list is %s\'s responsibility' % (str(type (self)))
- #end virtual_machine_interfaces_list
-
- def virtual_machine_interface_delete(self, fq_name = None, id = None, ifmap_id = None):
- """Delete virtual-machine-interface from the system.
-
- :param fq_name: Fully qualified name of virtual-machine-interface
- :param id: UUID of virtual-machine-interface
- :param ifmap_id: IFMAP id of virtual-machine-interface
-
- """
- raise NotImplementedError, 'virtual_machine_interface_delete is %s\'s responsibility' % (str(type (self)))
- #end virtual_machine_interface_delete
-
- def get_default_virtual_machine_interface_id(self):
- """Return UUID of default virtual-machine-interface."""
- raise NotImplementedError, 'get_default_virtual_machine_interface_delete is %s\'s responsibility' % (str(type (self)))
- #end get_default_virtual_machine_interface_delete
-
- def loadbalancer_healthmonitor_create(self, obj):
- """Create new loadbalancer-healthmonitor.
-
- :param obj: :class:`.LoadbalancerHealthmonitor` object
-
- """
- raise NotImplementedError, 'loadbalancer_healthmonitor_create is %s\'s responsibility' % (str(type (self)))
- #end loadbalancer_healthmonitor_create
-
- def loadbalancer_healthmonitor_read(self, fq_name = None, fq_name_str = None, id = None, ifmap_id = None):
- """Return loadbalancer-healthmonitor information.
-
- :param fq_name: Fully qualified name of loadbalancer-healthmonitor
- :param fq_name_str: Fully qualified name string of loadbalancer-healthmonitor
- :param id: UUID of loadbalancer-healthmonitor
- :param ifmap_id: IFMAP id of loadbalancer-healthmonitor
- :returns: :class:`.LoadbalancerHealthmonitor` object
-
- """
- raise NotImplementedError, 'loadbalancer_healthmonitor_read is %s\'s responsibility' % (str(type (self)))
- #end loadbalancer_healthmonitor_read
-
- def loadbalancer_healthmonitor_update(self, obj):
- """Update loadbalancer-healthmonitor.
-
- :param obj: :class:`.LoadbalancerHealthmonitor` object
-
- """
- raise NotImplementedError, 'loadbalancer_healthmonitor_update is %s\'s responsibility' % (str(type (self)))
- #end loadbalancer_healthmonitor_update
-
- def loadbalancer_healthmonitors_list(self, parent_id = None, parent_fq_name = None, obj_uuids = None, fields = None, detail = False, count = False):
- """List all loadbalancer-healthmonitors.
-
- :param parent_id: UUID of parent as optional search filter
- :param parent_fq_name: full qualified name of parent as optional search filter
- :returns: list of :class:`.LoadbalancerHealthmonitor` objects
-
- """
- raise NotImplementedError, 'loadbalancer_healthmonitors_list is %s\'s responsibility' % (str(type (self)))
- #end loadbalancer_healthmonitors_list
-
- def loadbalancer_healthmonitor_delete(self, fq_name = None, id = None, ifmap_id = None):
- """Delete loadbalancer-healthmonitor from the system.
-
- :param fq_name: Fully qualified name of loadbalancer-healthmonitor
- :param id: UUID of loadbalancer-healthmonitor
- :param ifmap_id: IFMAP id of loadbalancer-healthmonitor
-
- """
- raise NotImplementedError, 'loadbalancer_healthmonitor_delete is %s\'s responsibility' % (str(type (self)))
- #end loadbalancer_healthmonitor_delete
-
- def get_default_loadbalancer_healthmonitor_id(self):
- """Return UUID of default loadbalancer-healthmonitor."""
- raise NotImplementedError, 'get_default_loadbalancer_healthmonitor_delete is %s\'s responsibility' % (str(type (self)))
- #end get_default_loadbalancer_healthmonitor_delete
-
- def virtual_network_create(self, obj):
- """Create new virtual-network.
-
- :param obj: :class:`.VirtualNetwork` object
-
- """
- raise NotImplementedError, 'virtual_network_create is %s\'s responsibility' % (str(type (self)))
- #end virtual_network_create
-
- def virtual_network_read(self, fq_name = None, fq_name_str = None, id = None, ifmap_id = None):
- """Return virtual-network information.
-
- :param fq_name: Fully qualified name of virtual-network
- :param fq_name_str: Fully qualified name string of virtual-network
- :param id: UUID of virtual-network
- :param ifmap_id: IFMAP id of virtual-network
- :returns: :class:`.VirtualNetwork` object
-
- """
- raise NotImplementedError, 'virtual_network_read is %s\'s responsibility' % (str(type (self)))
- #end virtual_network_read
-
- def virtual_network_update(self, obj):
- """Update virtual-network.
-
- :param obj: :class:`.VirtualNetwork` object
-
- """
- raise NotImplementedError, 'virtual_network_update is %s\'s responsibility' % (str(type (self)))
- #end virtual_network_update
-
- def virtual_networks_list(self, parent_id = None, parent_fq_name = None, back_ref_id = None, obj_uuids = None, fields = None, detail = False, count = False):
- """List all virtual-networks.
-
- :param parent_id: UUID of parent as optional search filter
- :param parent_fq_name: full qualified name of parent as optional search filter
- :returns: list of :class:`.VirtualNetwork` objects
-
- """
- raise NotImplementedError, 'virtual_networks_list is %s\'s responsibility' % (str(type (self)))
- #end virtual_networks_list
-
- def virtual_network_delete(self, fq_name = None, id = None, ifmap_id = None):
- """Delete virtual-network from the system.
-
- :param fq_name: Fully qualified name of virtual-network
- :param id: UUID of virtual-network
- :param ifmap_id: IFMAP id of virtual-network
-
- """
- raise NotImplementedError, 'virtual_network_delete is %s\'s responsibility' % (str(type (self)))
- #end virtual_network_delete
-
- def get_default_virtual_network_id(self):
- """Return UUID of default virtual-network."""
- raise NotImplementedError, 'get_default_virtual_network_delete is %s\'s responsibility' % (str(type (self)))
- #end get_default_virtual_network_delete
-
- def project_create(self, obj):
- """Create new project.
-
- :param obj: :class:`.Project` object
-
- """
- raise NotImplementedError, 'project_create is %s\'s responsibility' % (str(type (self)))
- #end project_create
-
- def project_read(self, fq_name = None, fq_name_str = None, id = None, ifmap_id = None):
- """Return project information.
-
- :param fq_name: Fully qualified name of project
- :param fq_name_str: Fully qualified name string of project
- :param id: UUID of project
- :param ifmap_id: IFMAP id of project
- :returns: :class:`.Project` object
-
- """
- raise NotImplementedError, 'project_read is %s\'s responsibility' % (str(type (self)))
- #end project_read
-
- def project_update(self, obj):
- """Update project.
-
- :param obj: :class:`.Project` object
-
- """
- raise NotImplementedError, 'project_update is %s\'s responsibility' % (str(type (self)))
- #end project_update
-
- def projects_list(self, parent_id = None, parent_fq_name = None, back_ref_id = None, obj_uuids = None, fields = None, detail = False, count = False):
- """List all projects.
-
- :param parent_id: UUID of parent as optional search filter
- :param parent_fq_name: full qualified name of parent as optional search filter
- :returns: list of :class:`.Project` objects
-
- """
- raise NotImplementedError, 'projects_list is %s\'s responsibility' % (str(type (self)))
- #end projects_list
-
- def project_delete(self, fq_name = None, id = None, ifmap_id = None):
- """Delete project from the system.
-
- :param fq_name: Fully qualified name of project
- :param id: UUID of project
- :param ifmap_id: IFMAP id of project
-
- """
- raise NotImplementedError, 'project_delete is %s\'s responsibility' % (str(type (self)))
- #end project_delete
-
- def get_default_project_id(self):
- """Return UUID of default project."""
- raise NotImplementedError, 'get_default_project_delete is %s\'s responsibility' % (str(type (self)))
- #end get_default_project_delete
-
- def qos_forwarding_class_create(self, obj):
- """Create new qos-forwarding-class.
-
- :param obj: :class:`.QosForwardingClass` object
-
- """
- raise NotImplementedError, 'qos_forwarding_class_create is %s\'s responsibility' % (str(type (self)))
- #end qos_forwarding_class_create
-
- def qos_forwarding_class_read(self, fq_name = None, fq_name_str = None, id = None, ifmap_id = None):
- """Return qos-forwarding-class information.
-
- :param fq_name: Fully qualified name of qos-forwarding-class
- :param fq_name_str: Fully qualified name string of qos-forwarding-class
- :param id: UUID of qos-forwarding-class
- :param ifmap_id: IFMAP id of qos-forwarding-class
- :returns: :class:`.QosForwardingClass` object
-
- """
- raise NotImplementedError, 'qos_forwarding_class_read is %s\'s responsibility' % (str(type (self)))
- #end qos_forwarding_class_read
-
- def qos_forwarding_class_update(self, obj):
- """Update qos-forwarding-class.
-
- :param obj: :class:`.QosForwardingClass` object
-
- """
- raise NotImplementedError, 'qos_forwarding_class_update is %s\'s responsibility' % (str(type (self)))
- #end qos_forwarding_class_update
-
- def qos_forwarding_classs_list(self, parent_id = None, parent_fq_name = None, back_ref_id = None, obj_uuids = None, fields = None, detail = False, count = False):
- """List all qos-forwarding-classs.
-
- :param parent_id: UUID of parent as optional search filter
- :param parent_fq_name: full qualified name of parent as optional search filter
- :returns: list of :class:`.QosForwardingClass` objects
-
- """
- raise NotImplementedError, 'qos_forwarding_classs_list is %s\'s responsibility' % (str(type (self)))
- #end qos_forwarding_classs_list
-
- def qos_forwarding_class_delete(self, fq_name = None, id = None, ifmap_id = None):
- """Delete qos-forwarding-class from the system.
-
- :param fq_name: Fully qualified name of qos-forwarding-class
- :param id: UUID of qos-forwarding-class
- :param ifmap_id: IFMAP id of qos-forwarding-class
-
- """
- raise NotImplementedError, 'qos_forwarding_class_delete is %s\'s responsibility' % (str(type (self)))
- #end qos_forwarding_class_delete
-
- def get_default_qos_forwarding_class_id(self):
- """Return UUID of default qos-forwarding-class."""
- raise NotImplementedError, 'get_default_qos_forwarding_class_delete is %s\'s responsibility' % (str(type (self)))
- #end get_default_qos_forwarding_class_delete
-
- def database_node_create(self, obj):
- """Create new database-node.
-
- :param obj: :class:`.DatabaseNode` object
-
- """
- raise NotImplementedError, 'database_node_create is %s\'s responsibility' % (str(type (self)))
- #end database_node_create
-
- def database_node_read(self, fq_name = None, fq_name_str = None, id = None, ifmap_id = None):
- """Return database-node information.
-
- :param fq_name: Fully qualified name of database-node
- :param fq_name_str: Fully qualified name string of database-node
- :param id: UUID of database-node
- :param ifmap_id: IFMAP id of database-node
- :returns: :class:`.DatabaseNode` object
-
- """
- raise NotImplementedError, 'database_node_read is %s\'s responsibility' % (str(type (self)))
- #end database_node_read
-
- def database_node_update(self, obj):
- """Update database-node.
-
- :param obj: :class:`.DatabaseNode` object
-
- """
- raise NotImplementedError, 'database_node_update is %s\'s responsibility' % (str(type (self)))
- #end database_node_update
-
- def database_nodes_list(self, parent_id = None, parent_fq_name = None, obj_uuids = None, fields = None, detail = False, count = False):
- """List all database-nodes.
-
- :param parent_id: UUID of parent as optional search filter
- :param parent_fq_name: full qualified name of parent as optional search filter
- :returns: list of :class:`.DatabaseNode` objects
-
- """
- raise NotImplementedError, 'database_nodes_list is %s\'s responsibility' % (str(type (self)))
- #end database_nodes_list
-
- def database_node_delete(self, fq_name = None, id = None, ifmap_id = None):
- """Delete database-node from the system.
-
- :param fq_name: Fully qualified name of database-node
- :param id: UUID of database-node
- :param ifmap_id: IFMAP id of database-node
-
- """
- raise NotImplementedError, 'database_node_delete is %s\'s responsibility' % (str(type (self)))
- #end database_node_delete
-
- def get_default_database_node_id(self):
- """Return UUID of default database-node."""
- raise NotImplementedError, 'get_default_database_node_delete is %s\'s responsibility' % (str(type (self)))
- #end get_default_database_node_delete
-
- def routing_instance_create(self, obj):
- """Create new routing-instance.
-
- :param obj: :class:`.RoutingInstance` object
-
- """
- raise NotImplementedError, 'routing_instance_create is %s\'s responsibility' % (str(type (self)))
- #end routing_instance_create
-
- def routing_instance_read(self, fq_name = None, fq_name_str = None, id = None, ifmap_id = None):
- """Return routing-instance information.
-
- :param fq_name: Fully qualified name of routing-instance
- :param fq_name_str: Fully qualified name string of routing-instance
- :param id: UUID of routing-instance
- :param ifmap_id: IFMAP id of routing-instance
- :returns: :class:`.RoutingInstance` object
-
- """
- raise NotImplementedError, 'routing_instance_read is %s\'s responsibility' % (str(type (self)))
- #end routing_instance_read
-
- def routing_instance_update(self, obj):
- """Update routing-instance.
-
- :param obj: :class:`.RoutingInstance` object
-
- """
- raise NotImplementedError, 'routing_instance_update is %s\'s responsibility' % (str(type (self)))
- #end routing_instance_update
-
- def routing_instances_list(self, parent_id = None, parent_fq_name = None, back_ref_id = None, obj_uuids = None, fields = None, detail = False, count = False):
- """List all routing-instances.
-
- :param parent_id: UUID of parent as optional search filter
- :param parent_fq_name: full qualified name of parent as optional search filter
- :returns: list of :class:`.RoutingInstance` objects
-
- """
- raise NotImplementedError, 'routing_instances_list is %s\'s responsibility' % (str(type (self)))
- #end routing_instances_list
-
- def routing_instance_delete(self, fq_name = None, id = None, ifmap_id = None):
- """Delete routing-instance from the system.
-
- :param fq_name: Fully qualified name of routing-instance
- :param id: UUID of routing-instance
- :param ifmap_id: IFMAP id of routing-instance
-
- """
- raise NotImplementedError, 'routing_instance_delete is %s\'s responsibility' % (str(type (self)))
- #end routing_instance_delete
-
- def get_default_routing_instance_id(self):
- """Return UUID of default routing-instance."""
- raise NotImplementedError, 'get_default_routing_instance_delete is %s\'s responsibility' % (str(type (self)))
- #end get_default_routing_instance_delete
-
- def network_ipam_create(self, obj):
- """Create new network-ipam.
-
- :param obj: :class:`.NetworkIpam` object
-
- """
- raise NotImplementedError, 'network_ipam_create is %s\'s responsibility' % (str(type (self)))
- #end network_ipam_create
-
- def network_ipam_read(self, fq_name = None, fq_name_str = None, id = None, ifmap_id = None):
- """Return network-ipam information.
-
- :param fq_name: Fully qualified name of network-ipam
- :param fq_name_str: Fully qualified name string of network-ipam
- :param id: UUID of network-ipam
- :param ifmap_id: IFMAP id of network-ipam
- :returns: :class:`.NetworkIpam` object
-
- """
- raise NotImplementedError, 'network_ipam_read is %s\'s responsibility' % (str(type (self)))
- #end network_ipam_read
-
- def network_ipam_update(self, obj):
- """Update network-ipam.
-
- :param obj: :class:`.NetworkIpam` object
-
- """
- raise NotImplementedError, 'network_ipam_update is %s\'s responsibility' % (str(type (self)))
- #end network_ipam_update
-
- def network_ipams_list(self, parent_id = None, parent_fq_name = None, back_ref_id = None, obj_uuids = None, fields = None, detail = False, count = False):
- """List all network-ipams.
-
- :param parent_id: UUID of parent as optional search filter
- :param parent_fq_name: full qualified name of parent as optional search filter
- :returns: list of :class:`.NetworkIpam` objects
-
- """
- raise NotImplementedError, 'network_ipams_list is %s\'s responsibility' % (str(type (self)))
- #end network_ipams_list
-
- def network_ipam_delete(self, fq_name = None, id = None, ifmap_id = None):
- """Delete network-ipam from the system.
-
- :param fq_name: Fully qualified name of network-ipam
- :param id: UUID of network-ipam
- :param ifmap_id: IFMAP id of network-ipam
-
- """
- raise NotImplementedError, 'network_ipam_delete is %s\'s responsibility' % (str(type (self)))
- #end network_ipam_delete
-
- def get_default_network_ipam_id(self):
- """Return UUID of default network-ipam."""
- raise NotImplementedError, 'get_default_network_ipam_delete is %s\'s responsibility' % (str(type (self)))
- #end get_default_network_ipam_delete
-
- def logical_router_create(self, obj):
- """Create new logical-router.
-
- :param obj: :class:`.LogicalRouter` object
-
- """
- raise NotImplementedError, 'logical_router_create is %s\'s responsibility' % (str(type (self)))
- #end logical_router_create
-
- def logical_router_read(self, fq_name = None, fq_name_str = None, id = None, ifmap_id = None):
- """Return logical-router information.
-
- :param fq_name: Fully qualified name of logical-router
- :param fq_name_str: Fully qualified name string of logical-router
- :param id: UUID of logical-router
- :param ifmap_id: IFMAP id of logical-router
- :returns: :class:`.LogicalRouter` object
-
- """
- raise NotImplementedError, 'logical_router_read is %s\'s responsibility' % (str(type (self)))
- #end logical_router_read
-
- def logical_router_update(self, obj):
- """Update logical-router.
-
- :param obj: :class:`.LogicalRouter` object
-
- """
- raise NotImplementedError, 'logical_router_update is %s\'s responsibility' % (str(type (self)))
- #end logical_router_update
-
- def logical_routers_list(self, parent_id = None, parent_fq_name = None, back_ref_id = None, obj_uuids = None, fields = None, detail = False, count = False):
- """List all logical-routers.
-
- :param parent_id: UUID of parent as optional search filter
- :param parent_fq_name: full qualified name of parent as optional search filter
- :returns: list of :class:`.LogicalRouter` objects
-
- """
- raise NotImplementedError, 'logical_routers_list is %s\'s responsibility' % (str(type (self)))
- #end logical_routers_list
-
- def logical_router_delete(self, fq_name = None, id = None, ifmap_id = None):
- """Delete logical-router from the system.
-
- :param fq_name: Fully qualified name of logical-router
- :param id: UUID of logical-router
- :param ifmap_id: IFMAP id of logical-router
-
- """
- raise NotImplementedError, 'logical_router_delete is %s\'s responsibility' % (str(type (self)))
- #end logical_router_delete
-
- def get_default_logical_router_id(self):
- """Return UUID of default logical-router."""
- raise NotImplementedError, 'get_default_logical_router_delete is %s\'s responsibility' % (str(type (self)))
- #end get_default_logical_router_delete
-
-#end class ConnectionDriverBase
-
diff --git a/Testcases/vnc_api/gen/connection_drv_gen.pyc b/Testcases/vnc_api/gen/connection_drv_gen.pyc
deleted file mode 100644
index 53c7841..0000000
--- a/Testcases/vnc_api/gen/connection_drv_gen.pyc
+++ /dev/null
Binary files differ
diff --git a/Testcases/vnc_api/gen/generatedssuper.py b/Testcases/vnc_api/gen/generatedssuper.py
deleted file mode 100644
index 34efb7f..0000000
--- a/Testcases/vnc_api/gen/generatedssuper.py
+++ /dev/null
@@ -1,226 +0,0 @@
-#
-# Copyright (c) 2013 Juniper Networks, Inc. All rights reserved.
-#
-import re
-import sys
-
-ExternalEncoding = sys.getdefaultencoding()
-Tag_pattern_ = re.compile(r'({.*})?(.*)')
-
-
-def showIndent(outfile, level, pretty_print=False):
- for i in range(level - 1):
- outfile.write(" ")
-
-
-def quote_xml(inStr):
- if not inStr:
- return ''
- s1 = (isinstance(inStr, basestring) and inStr or
- '%s' % inStr)
- s1 = s1.replace('&', '&amp;')
- s1 = s1.replace('<', '&lt;')
- s1 = s1.replace('>', '&gt;')
- return s1
-
-
-def quote_attrib(inStr):
- s1 = (isinstance(inStr, basestring) and inStr or
- '%s' % inStr)
- s1 = s1.replace('&', '&amp;')
- s1 = s1.replace('<', '&lt;')
- s1 = s1.replace('>', '&gt;')
- if '"' in s1:
- if "'" in s1:
- s1 = '"%s"' % s1.replace('"', "&quot;")
- else:
- s1 = "'%s'" % s1
- else:
- s1 = '"%s"' % s1
- return s1
-
-
-def quote_python(inStr):
- s1 = inStr
- if s1.find("'") == -1:
- if s1.find('\\n') == -1:
- return "'%s'" % s1
- else:
- return "'''%s'''" % s1
- else:
- if s1.find('"') != -1:
- s1 = s1.replace('"', '\\\\"')
- if s1.find('\\n') == -1:
- return '"%s"' % s1
- else:
- return '\"\"\"%s\"\"\"' % s1
-
-
-class GeneratedsSuper(object):
-
- def gds_format_string(self, input_data, input_name=''):
- return input_data
-
- def gds_validate_string(self, input_data, node, input_name=''):
- if input_data is None:
- return ""
- return input_data
-
- def gds_format_integer(self, input_data, input_name=''):
- return '%d' % input_data
-
- def gds_validate_integer(self, input_data, node, input_name=''):
- return input_data
-
- def gds_format_integer_list(self, input_data, input_name=''):
- return '%s' % input_data
-
- def gds_validate_integer_list(self, input_data, node, input_name=''):
- values = input_data.split()
- for value in values:
- try:
- fvalue = float(value)
- except (TypeError, ValueError), exp:
- raise_parse_error(node, 'Requires sequence of integers')
- return input_data
-
- def gds_format_float(self, input_data, input_name=''):
- return '%f' % input_data
-
- def gds_validate_float(self, input_data, node, input_name=''):
- return input_data
-
- def gds_format_float_list(self, input_data, input_name=''):
- return '%s' % input_data
-
- def gds_validate_float_list(self, input_data, node, input_name=''):
- values = input_data.split()
- for value in values:
- try:
- fvalue = float(value)
- except (TypeError, ValueError), exp:
- raise_parse_error(node, 'Requires sequence of floats')
- return input_data
-
- def gds_format_double(self, input_data, input_name=''):
- return '%e' % input_data
-
- def gds_validate_double(self, input_data, node, input_name=''):
- return input_data
-
- def gds_format_double_list(self, input_data, input_name=''):
- return '%s' % input_data
-
- def gds_validate_double_list(self, input_data, node, input_name=''):
- values = input_data.split()
- for value in values:
- try:
- fvalue = float(value)
- except (TypeError, ValueError), exp:
- raise_parse_error(node, 'Requires sequence of doubles')
- return input_data
-
- def gds_format_boolean(self, input_data, input_name=''):
- return '%s' % input_data
-
- def gds_validate_boolean(self, input_data, node, input_name=''):
- return input_data
-
- def gds_format_boolean_list(self, input_data, input_name=''):
- return '%s' % input_data
-
- def gds_validate_boolean_list(self, input_data, node, input_name=''):
- values = input_data.split()
- for value in values:
- if value not in ('true', '1', 'false', '0', ):
- raise_parse_error(
- node,
- 'Requires sequence of booleans'
- ' ("true", "1", "false", "0")')
- return input_data
-
- def gds_str_lower(self, instring):
- return instring.lower()
-
- def get_path_(self, node):
- path_list = []
- self.get_path_list_(node, path_list)
- path_list.reverse()
- path = '/'.join(path_list)
- return path
- Tag_strip_pattern_ = re.compile(r'\{.*\}')
-
- def get_path_list_(self, node, path_list):
- if node is None:
- return
- tag = GeneratedsSuper.Tag_strip_pattern_.sub('', node.tag)
- if tag:
- path_list.append(tag)
- self.get_path_list_(node.getparent(), path_list)
-
- def get_class_obj_(self, node, default_class=None):
- class_obj1 = default_class
- if 'xsi' in node.nsmap:
- classname = node.get('{%s}type' % node.nsmap['xsi'])
- if classname is not None:
- names = classname.split(':')
- if len(names) == 2:
- classname = names[1]
- class_obj2 = globals().get(classname)
- if class_obj2 is not None:
- class_obj1 = class_obj2
- return class_obj1
-
- def gds_build_any(self, node, type_name=None):
- return None
-
- @staticmethod
- def populate_string(name):
- if "mac_address" in name:
- return '00:ca:fe:00:ba:be'
- elif "prefix" in name:
- return '10.5.6.0'
- elif "_network" in name or 'subnet' in name:
- return '10.5.6.0/24'
- elif ("address" in name or 'gateway' in name or
- "router" in name):
- return '10.5.6.253'
- elif "uuid" in name:
- return '0797d558-1d98-479e-a941-a05ae88dc159'
- elif "protocol" in name:
- return 'udp'
- elif "route_target" in name:
- return '192.168.1.42/32''192.168.1.42/32'
- elif "creator" in name:
- return 'test'
- else:
- return 'test-' + name
-
- @staticmethod
- def populate_unsignedLong(name):
- return 42
-
- @staticmethod
- def populate_unsignedInt(name):
- return 42
-
- @staticmethod
- def populate_integer(name):
- if "prefix" in name:
- return 24
- if name.endswith('_access'):
- return 7
- else:
- return 42
-
- @staticmethod
- def populate_dateTime(name):
- return "2002-05-30T09:30:10.5"
-
- @staticmethod
- def populate_time(name):
- return "09:30:10Z"
-
- @staticmethod
- def populate_boolean(name):
- return False
diff --git a/Testcases/vnc_api/gen/generatedssuper.pyc b/Testcases/vnc_api/gen/generatedssuper.pyc
deleted file mode 100644
index b884622..0000000
--- a/Testcases/vnc_api/gen/generatedssuper.pyc
+++ /dev/null
Binary files differ
diff --git a/Testcases/vnc_api/gen/resource_client.py b/Testcases/vnc_api/gen/resource_client.py
deleted file mode 100644
index 89310ea..0000000
--- a/Testcases/vnc_api/gen/resource_client.py
+++ /dev/null
@@ -1,10026 +0,0 @@
-
-# AUTO-GENERATED file from IFMapApiGenerator. Do Not Edit!
-
-import vnc_api.gen.resource_common
-import vnc_api.gen.resource_xsd
-
-
-class Domain(vnc_api.gen.resource_common.Domain):
- create_uri = ''
- resource_uri_base = {}
- def __init__(self, name = None, parent_obj = None, domain_limits = None, api_access_list = None, id_perms = None, display_name = None, *args, **kwargs):
- pending_fields = ['fq_name', 'parent_type']
-
- self._server_conn = None
-
- if domain_limits:
- pending_fields.append('domain_limits')
- if api_access_list:
- pending_fields.append('api_access_list')
- if id_perms:
- pending_fields.append('id_perms')
- if display_name:
- pending_fields.append('display_name')
-
- self._pending_field_updates = set(pending_fields)
- self._pending_ref_updates = set([])
-
- super(Domain, self).__init__(name, parent_obj, domain_limits, api_access_list, id_perms, display_name, *args, **kwargs)
- #end __init__
-
- def get_pending_updates(self):
- return self._pending_field_updates
- #end get_pending_updates
-
- def get_ref_updates(self):
- return self._pending_ref_updates
- #end get_ref_updates
-
- def clear_pending_updates(self):
- self._pending_field_updates = set([])
- self._pending_ref_updates = set([])
- #end clear_pending_updates
-
- def set_server_conn(self, vnc_api_handle):
- self._server_conn = vnc_api_handle
- #end set_server_conn
-
- @classmethod
- def from_dict(cls, **kwargs):
- props_dict = {}
- if 'domain_limits' in kwargs:
- props_dict['domain_limits'] = vnc_api.gen.resource_xsd.DomainLimitsType(**kwargs['domain_limits'])
- if 'api_access_list' in kwargs:
- props_dict['api_access_list'] = vnc_api.gen.resource_xsd.ApiAccessListType(**kwargs['api_access_list'])
- if 'id_perms' in kwargs:
- props_dict['id_perms'] = vnc_api.gen.resource_xsd.IdPermsType(**kwargs['id_perms'])
- if 'display_name' in kwargs:
- props_dict['display_name'] = kwargs['display_name']
-
- # obj constructor takes only props
- parent_type = kwargs.get('parent_type', None)
- fq_name = kwargs['fq_name']
- props_dict.update({'parent_type': parent_type, 'fq_name': fq_name})
- obj = Domain(fq_name[-1], **props_dict)
- obj.uuid = kwargs['uuid']
- if 'parent_uuid' in kwargs:
- obj.parent_uuid = kwargs['parent_uuid']
-
- # add summary of any children...
- if 'projects' in kwargs:
- obj.projects = kwargs['projects']
- if 'namespaces' in kwargs:
- obj.namespaces = kwargs['namespaces']
- if 'service_templates' in kwargs:
- obj.service_templates = kwargs['service_templates']
- if 'virtual_DNSs' in kwargs:
- obj.virtual_DNSs = kwargs['virtual_DNSs']
-
- # add any specified references...
-
- # and back references but no obj api for it...
-
- return obj
- #end from_dict
-
- @vnc_api.gen.resource_common.Domain.uuid.setter
- def uuid(self, uuid_val):
- self._uuid = uuid_val
- if 'uuid' not in self._pending_field_updates:
- self._pending_field_updates.add('uuid')
- #end uuid
-
- def set_uuid(self, uuid_val):
- self.uuid = uuid_val
- #end set_uuid
-
- @vnc_api.gen.resource_common.Domain.domain_limits.setter
- def domain_limits(self, domain_limits):
- """Set domain-limits for domain.
-
- :param domain_limits: DomainLimitsType object
-
- """
- if 'domain_limits' not in self._pending_field_updates:
- self._pending_field_updates.add('domain_limits')
-
- self._domain_limits = domain_limits
- #end domain_limits
-
- def set_domain_limits(self, value):
- self.domain_limits = value
- #end set_domain_limits
-
- @vnc_api.gen.resource_common.Domain.api_access_list.setter
- def api_access_list(self, api_access_list):
- """Set api-access-list for domain.
-
- :param api_access_list: ApiAccessListType object
-
- """
- if 'api_access_list' not in self._pending_field_updates:
- self._pending_field_updates.add('api_access_list')
-
- self._api_access_list = api_access_list
- #end api_access_list
-
- def set_api_access_list(self, value):
- self.api_access_list = value
- #end set_api_access_list
-
- @vnc_api.gen.resource_common.Domain.id_perms.setter
- def id_perms(self, id_perms):
- """Set id-perms for domain.
-
- :param id_perms: IdPermsType object
-
- """
- if 'id_perms' not in self._pending_field_updates:
- self._pending_field_updates.add('id_perms')
-
- self._id_perms = id_perms
- #end id_perms
-
- def set_id_perms(self, value):
- self.id_perms = value
- #end set_id_perms
-
- @vnc_api.gen.resource_common.Domain.display_name.setter
- def display_name(self, display_name):
- """Set display-name for domain.
-
- :param display_name: xsd:string object
-
- """
- if 'display_name' not in self._pending_field_updates:
- self._pending_field_updates.add('display_name')
-
- self._display_name = display_name
- #end display_name
-
- def set_display_name(self, value):
- self.display_name = value
- #end set_display_name
-
- def get_projects(self):
- # if object not created/read from lib can't service
- svr_conn = self._server_conn
- if not svr_conn:
- return None
-
- children = super(Domain, self).get_projects()
- if not children: # read it for first time
- obj = svr_conn.domain_read(id = self.uuid, fields = ['projects'])
- children = getattr(obj, 'projects', None)
- self.projects = children
-
- return children
- #end get_projects
-
- def get_namespaces(self):
- # if object not created/read from lib can't service
- svr_conn = self._server_conn
- if not svr_conn:
- return None
-
- children = super(Domain, self).get_namespaces()
- if not children: # read it for first time
- obj = svr_conn.domain_read(id = self.uuid, fields = ['namespaces'])
- children = getattr(obj, 'namespaces', None)
- self.namespaces = children
-
- return children
- #end get_namespaces
-
- def get_service_templates(self):
- # if object not created/read from lib can't service
- svr_conn = self._server_conn
- if not svr_conn:
- return None
-
- children = super(Domain, self).get_service_templates()
- if not children: # read it for first time
- obj = svr_conn.domain_read(id = self.uuid, fields = ['service_templates'])
- children = getattr(obj, 'service_templates', None)
- self.service_templates = children
-
- return children
- #end get_service_templates
-
- def get_virtual_DNSs(self):
- # if object not created/read from lib can't service
- svr_conn = self._server_conn
- if not svr_conn:
- return None
-
- children = super(Domain, self).get_virtual_DNSs()
- if not children: # read it for first time
- obj = svr_conn.domain_read(id = self.uuid, fields = ['virtual_DNSs'])
- children = getattr(obj, 'virtual_DNSs', None)
- self.virtual_DNSs = children
-
- return children
- #end get_virtual_DNSs
-
-
-#end class Domain
-
-class GlobalVrouterConfig(vnc_api.gen.resource_common.GlobalVrouterConfig):
- create_uri = ''
- resource_uri_base = {}
- def __init__(self, name = None, parent_obj = None, linklocal_services = None, encapsulation_priorities = None, vxlan_network_identifier_mode = None, id_perms = None, display_name = None, *args, **kwargs):
- pending_fields = ['fq_name', 'parent_type']
-
- self._server_conn = None
-
- if linklocal_services:
- pending_fields.append('linklocal_services')
- if encapsulation_priorities:
- pending_fields.append('encapsulation_priorities')
- if vxlan_network_identifier_mode:
- pending_fields.append('vxlan_network_identifier_mode')
- if id_perms:
- pending_fields.append('id_perms')
- if display_name:
- pending_fields.append('display_name')
-
- self._pending_field_updates = set(pending_fields)
- self._pending_ref_updates = set([])
-
- super(GlobalVrouterConfig, self).__init__(name, parent_obj, linklocal_services, encapsulation_priorities, vxlan_network_identifier_mode, id_perms, display_name, *args, **kwargs)
- #end __init__
-
- def get_pending_updates(self):
- return self._pending_field_updates
- #end get_pending_updates
-
- def get_ref_updates(self):
- return self._pending_ref_updates
- #end get_ref_updates
-
- def clear_pending_updates(self):
- self._pending_field_updates = set([])
- self._pending_ref_updates = set([])
- #end clear_pending_updates
-
- def set_server_conn(self, vnc_api_handle):
- self._server_conn = vnc_api_handle
- #end set_server_conn
-
- @classmethod
- def from_dict(cls, **kwargs):
- props_dict = {}
- if 'linklocal_services' in kwargs:
- props_dict['linklocal_services'] = vnc_api.gen.resource_xsd.LinklocalServicesTypes(**kwargs['linklocal_services'])
- if 'encapsulation_priorities' in kwargs:
- props_dict['encapsulation_priorities'] = vnc_api.gen.resource_xsd.EncapsulationPrioritiesType(**kwargs['encapsulation_priorities'])
- if 'vxlan_network_identifier_mode' in kwargs:
- props_dict['vxlan_network_identifier_mode'] = kwargs['vxlan_network_identifier_mode']
- if 'id_perms' in kwargs:
- props_dict['id_perms'] = vnc_api.gen.resource_xsd.IdPermsType(**kwargs['id_perms'])
- if 'display_name' in kwargs:
- props_dict['display_name'] = kwargs['display_name']
-
- # obj constructor takes only props
- parent_type = kwargs.get('parent_type', None)
- fq_name = kwargs['fq_name']
- props_dict.update({'parent_type': parent_type, 'fq_name': fq_name})
- obj = GlobalVrouterConfig(fq_name[-1], **props_dict)
- obj.uuid = kwargs['uuid']
- if 'parent_uuid' in kwargs:
- obj.parent_uuid = kwargs['parent_uuid']
-
- # add summary of any children...
-
- # add any specified references...
-
- # and back references but no obj api for it...
-
- return obj
- #end from_dict
-
- @vnc_api.gen.resource_common.GlobalVrouterConfig.uuid.setter
- def uuid(self, uuid_val):
- self._uuid = uuid_val
- if 'uuid' not in self._pending_field_updates:
- self._pending_field_updates.add('uuid')
- #end uuid
-
- def set_uuid(self, uuid_val):
- self.uuid = uuid_val
- #end set_uuid
-
- @vnc_api.gen.resource_common.GlobalVrouterConfig.linklocal_services.setter
- def linklocal_services(self, linklocal_services):
- """Set linklocal-services for global-vrouter-config.
-
- :param linklocal_services: LinklocalServicesTypes object
-
- """
- if 'linklocal_services' not in self._pending_field_updates:
- self._pending_field_updates.add('linklocal_services')
-
- self._linklocal_services = linklocal_services
- #end linklocal_services
-
- def set_linklocal_services(self, value):
- self.linklocal_services = value
- #end set_linklocal_services
-
- @vnc_api.gen.resource_common.GlobalVrouterConfig.encapsulation_priorities.setter
- def encapsulation_priorities(self, encapsulation_priorities):
- """Set encapsulation-priorities for global-vrouter-config.
-
- :param encapsulation_priorities: EncapsulationPrioritiesType object
-
- """
- if 'encapsulation_priorities' not in self._pending_field_updates:
- self._pending_field_updates.add('encapsulation_priorities')
-
- self._encapsulation_priorities = encapsulation_priorities
- #end encapsulation_priorities
-
- def set_encapsulation_priorities(self, value):
- self.encapsulation_priorities = value
- #end set_encapsulation_priorities
-
- @vnc_api.gen.resource_common.GlobalVrouterConfig.vxlan_network_identifier_mode.setter
- def vxlan_network_identifier_mode(self, vxlan_network_identifier_mode):
- """Set vxlan-network-identifier-mode for global-vrouter-config.
-
- :param vxlan_network_identifier_mode: VxlanNetworkIdentifierModeType object
-
- """
- if 'vxlan_network_identifier_mode' not in self._pending_field_updates:
- self._pending_field_updates.add('vxlan_network_identifier_mode')
-
- self._vxlan_network_identifier_mode = vxlan_network_identifier_mode
- #end vxlan_network_identifier_mode
-
- def set_vxlan_network_identifier_mode(self, value):
- self.vxlan_network_identifier_mode = value
- #end set_vxlan_network_identifier_mode
-
- @vnc_api.gen.resource_common.GlobalVrouterConfig.id_perms.setter
- def id_perms(self, id_perms):
- """Set id-perms for global-vrouter-config.
-
- :param id_perms: IdPermsType object
-
- """
- if 'id_perms' not in self._pending_field_updates:
- self._pending_field_updates.add('id_perms')
-
- self._id_perms = id_perms
- #end id_perms
-
- def set_id_perms(self, value):
- self.id_perms = value
- #end set_id_perms
-
- @vnc_api.gen.resource_common.GlobalVrouterConfig.display_name.setter
- def display_name(self, display_name):
- """Set display-name for global-vrouter-config.
-
- :param display_name: xsd:string object
-
- """
- if 'display_name' not in self._pending_field_updates:
- self._pending_field_updates.add('display_name')
-
- self._display_name = display_name
- #end display_name
-
- def set_display_name(self, value):
- self.display_name = value
- #end set_display_name
-
-
-#end class GlobalVrouterConfig
-
-class InstanceIp(vnc_api.gen.resource_common.InstanceIp):
- create_uri = ''
- resource_uri_base = {}
- def __init__(self, name = None, instance_ip_address = None, instance_ip_family = None, instance_ip_mode = None, subnet_uuid = None, id_perms = None, display_name = None, *args, **kwargs):
- pending_fields = ['fq_name']
-
- self._server_conn = None
-
- if instance_ip_address:
- pending_fields.append('instance_ip_address')
- if instance_ip_family:
- pending_fields.append('instance_ip_family')
- if instance_ip_mode:
- pending_fields.append('instance_ip_mode')
- if subnet_uuid:
- pending_fields.append('subnet_uuid')
- if id_perms:
- pending_fields.append('id_perms')
- if display_name:
- pending_fields.append('display_name')
-
- self._pending_field_updates = set(pending_fields)
- self._pending_ref_updates = set([])
-
- super(InstanceIp, self).__init__(name, instance_ip_address, instance_ip_family, instance_ip_mode, subnet_uuid, id_perms, display_name, *args, **kwargs)
- #end __init__
-
- def get_pending_updates(self):
- return self._pending_field_updates
- #end get_pending_updates
-
- def get_ref_updates(self):
- return self._pending_ref_updates
- #end get_ref_updates
-
- def clear_pending_updates(self):
- self._pending_field_updates = set([])
- self._pending_ref_updates = set([])
- #end clear_pending_updates
-
- def set_server_conn(self, vnc_api_handle):
- self._server_conn = vnc_api_handle
- #end set_server_conn
-
- @classmethod
- def from_dict(cls, **kwargs):
- props_dict = {}
- if 'instance_ip_address' in kwargs:
- props_dict['instance_ip_address'] = kwargs['instance_ip_address']
- if 'instance_ip_family' in kwargs:
- props_dict['instance_ip_family'] = kwargs['instance_ip_family']
- if 'instance_ip_mode' in kwargs:
- props_dict['instance_ip_mode'] = kwargs['instance_ip_mode']
- if 'subnet_uuid' in kwargs:
- props_dict['subnet_uuid'] = kwargs['subnet_uuid']
- if 'id_perms' in kwargs:
- props_dict['id_perms'] = vnc_api.gen.resource_xsd.IdPermsType(**kwargs['id_perms'])
- if 'display_name' in kwargs:
- props_dict['display_name'] = kwargs['display_name']
-
- # obj constructor takes only props
- parent_type = kwargs.get('parent_type', None)
- fq_name = kwargs['fq_name']
- props_dict.update({'parent_type': parent_type, 'fq_name': fq_name})
- obj = InstanceIp(fq_name[-1], **props_dict)
- obj.uuid = kwargs['uuid']
- if 'parent_uuid' in kwargs:
- obj.parent_uuid = kwargs['parent_uuid']
-
- # add summary of any children...
-
- # add any specified references...
- if 'virtual_network_refs' in kwargs:
- obj.virtual_network_refs = kwargs['virtual_network_refs']
- if 'virtual_machine_interface_refs' in kwargs:
- obj.virtual_machine_interface_refs = kwargs['virtual_machine_interface_refs']
-
- # and back references but no obj api for it...
-
- return obj
- #end from_dict
-
- @vnc_api.gen.resource_common.InstanceIp.uuid.setter
- def uuid(self, uuid_val):
- self._uuid = uuid_val
- if 'uuid' not in self._pending_field_updates:
- self._pending_field_updates.add('uuid')
- #end uuid
-
- def set_uuid(self, uuid_val):
- self.uuid = uuid_val
- #end set_uuid
-
- @vnc_api.gen.resource_common.InstanceIp.instance_ip_address.setter
- def instance_ip_address(self, instance_ip_address):
- """Set instance-ip-address for instance-ip.
-
- :param instance_ip_address: IpAddressType object
-
- """
- if 'instance_ip_address' not in self._pending_field_updates:
- self._pending_field_updates.add('instance_ip_address')
-
- self._instance_ip_address = instance_ip_address
- #end instance_ip_address
-
- def set_instance_ip_address(self, value):
- self.instance_ip_address = value
- #end set_instance_ip_address
-
- @vnc_api.gen.resource_common.InstanceIp.instance_ip_family.setter
- def instance_ip_family(self, instance_ip_family):
- """Set instance-ip-family for instance-ip.
-
- :param instance_ip_family: IpAddressFamilyType object
-
- """
- if 'instance_ip_family' not in self._pending_field_updates:
- self._pending_field_updates.add('instance_ip_family')
-
- self._instance_ip_family = instance_ip_family
- #end instance_ip_family
-
- def set_instance_ip_family(self, value):
- self.instance_ip_family = value
- #end set_instance_ip_family
-
- @vnc_api.gen.resource_common.InstanceIp.instance_ip_mode.setter
- def instance_ip_mode(self, instance_ip_mode):
- """Set instance-ip-mode for instance-ip.
-
- :param instance_ip_mode: AddressMode object
-
- """
- if 'instance_ip_mode' not in self._pending_field_updates:
- self._pending_field_updates.add('instance_ip_mode')
-
- self._instance_ip_mode = instance_ip_mode
- #end instance_ip_mode
-
- def set_instance_ip_mode(self, value):
- self.instance_ip_mode = value
- #end set_instance_ip_mode
-
- @vnc_api.gen.resource_common.InstanceIp.subnet_uuid.setter
- def subnet_uuid(self, subnet_uuid):
- """Set subnet-uuid for instance-ip.
-
- :param subnet_uuid: xsd:string object
-
- """
- if 'subnet_uuid' not in self._pending_field_updates:
- self._pending_field_updates.add('subnet_uuid')
-
- self._subnet_uuid = subnet_uuid
- #end subnet_uuid
-
- def set_subnet_uuid(self, value):
- self.subnet_uuid = value
- #end set_subnet_uuid
-
- @vnc_api.gen.resource_common.InstanceIp.id_perms.setter
- def id_perms(self, id_perms):
- """Set id-perms for instance-ip.
-
- :param id_perms: IdPermsType object
-
- """
- if 'id_perms' not in self._pending_field_updates:
- self._pending_field_updates.add('id_perms')
-
- self._id_perms = id_perms
- #end id_perms
-
- def set_id_perms(self, value):
- self.id_perms = value
- #end set_id_perms
-
- @vnc_api.gen.resource_common.InstanceIp.display_name.setter
- def display_name(self, display_name):
- """Set display-name for instance-ip.
-
- :param display_name: xsd:string object
-
- """
- if 'display_name' not in self._pending_field_updates:
- self._pending_field_updates.add('display_name')
-
- self._display_name = display_name
- #end display_name
-
- def set_display_name(self, value):
- self.display_name = value
- #end set_display_name
-
- def set_virtual_network(self, *args, **kwargs):
- """Set virtual-network for instance-ip.
-
- :param ref_obj: VirtualNetwork object
-
- """
- self._pending_field_updates.add('virtual_network_refs')
- self._pending_ref_updates.discard('virtual_network_refs')
- super(InstanceIp, self).set_virtual_network(*args, **kwargs)
-
- #end set_virtual_network
-
- def add_virtual_network(self, *args, **kwargs):
- """Add virtual-network to instance-ip.
-
- :param ref_obj: VirtualNetwork object
-
- """
- if 'virtual_network_refs' not in self._pending_ref_updates|self._pending_field_updates:
- self._pending_ref_updates.add('virtual_network_refs')
- self._original_virtual_network_refs = (self.get_virtual_network_refs() or [])[:]
- super(InstanceIp, self).add_virtual_network(*args, **kwargs)
- #end add_virtual_network
-
- def del_virtual_network(self, *args, **kwargs):
- if 'virtual_network_refs' not in self._pending_ref_updates:
- self._pending_ref_updates.add('virtual_network_refs')
- self._original_virtual_network_refs = (self.get_virtual_network_refs() or [])[:]
- super(InstanceIp, self).del_virtual_network(*args, **kwargs)
- #end del_virtual_network
-
- def set_virtual_network_list(self, *args, **kwargs):
- """Set virtual-network list for instance-ip.
-
- :param ref_obj_list: list of VirtualNetwork object
-
- """
- self._pending_field_updates.add('virtual_network_refs')
- self._pending_ref_updates.discard('virtual_network_refs')
- super(InstanceIp, self).set_virtual_network_list(*args, **kwargs)
- #end set_virtual_network_list
-
- def set_virtual_machine_interface(self, *args, **kwargs):
- """Set virtual-machine-interface for instance-ip.
-
- :param ref_obj: VirtualMachineInterface object
-
- """
- self._pending_field_updates.add('virtual_machine_interface_refs')
- self._pending_ref_updates.discard('virtual_machine_interface_refs')
- super(InstanceIp, self).set_virtual_machine_interface(*args, **kwargs)
-
- #end set_virtual_machine_interface
-
- def add_virtual_machine_interface(self, *args, **kwargs):
- """Add virtual-machine-interface to instance-ip.
-
- :param ref_obj: VirtualMachineInterface object
-
- """
- if 'virtual_machine_interface_refs' not in self._pending_ref_updates|self._pending_field_updates:
- self._pending_ref_updates.add('virtual_machine_interface_refs')
- self._original_virtual_machine_interface_refs = (self.get_virtual_machine_interface_refs() or [])[:]
- super(InstanceIp, self).add_virtual_machine_interface(*args, **kwargs)
- #end add_virtual_machine_interface
-
- def del_virtual_machine_interface(self, *args, **kwargs):
- if 'virtual_machine_interface_refs' not in self._pending_ref_updates:
- self._pending_ref_updates.add('virtual_machine_interface_refs')
- self._original_virtual_machine_interface_refs = (self.get_virtual_machine_interface_refs() or [])[:]
- super(InstanceIp, self).del_virtual_machine_interface(*args, **kwargs)
- #end del_virtual_machine_interface
-
- def set_virtual_machine_interface_list(self, *args, **kwargs):
- """Set virtual-machine-interface list for instance-ip.
-
- :param ref_obj_list: list of VirtualMachineInterface object
-
- """
- self._pending_field_updates.add('virtual_machine_interface_refs')
- self._pending_ref_updates.discard('virtual_machine_interface_refs')
- super(InstanceIp, self).set_virtual_machine_interface_list(*args, **kwargs)
- #end set_virtual_machine_interface_list
-
-
-#end class InstanceIp
-
-class NetworkPolicy(vnc_api.gen.resource_common.NetworkPolicy):
- create_uri = ''
- resource_uri_base = {}
- def __init__(self, name = None, parent_obj = None, network_policy_entries = None, id_perms = None, display_name = None, *args, **kwargs):
- pending_fields = ['fq_name', 'parent_type']
-
- self._server_conn = None
-
- if network_policy_entries:
- pending_fields.append('network_policy_entries')
- if id_perms:
- pending_fields.append('id_perms')
- if display_name:
- pending_fields.append('display_name')
-
- self._pending_field_updates = set(pending_fields)
- self._pending_ref_updates = set([])
-
- super(NetworkPolicy, self).__init__(name, parent_obj, network_policy_entries, id_perms, display_name, *args, **kwargs)
- #end __init__
-
- def get_pending_updates(self):
- return self._pending_field_updates
- #end get_pending_updates
-
- def get_ref_updates(self):
- return self._pending_ref_updates
- #end get_ref_updates
-
- def clear_pending_updates(self):
- self._pending_field_updates = set([])
- self._pending_ref_updates = set([])
- #end clear_pending_updates
-
- def set_server_conn(self, vnc_api_handle):
- self._server_conn = vnc_api_handle
- #end set_server_conn
-
- @classmethod
- def from_dict(cls, **kwargs):
- props_dict = {}
- if 'network_policy_entries' in kwargs:
- props_dict['network_policy_entries'] = vnc_api.gen.resource_xsd.PolicyEntriesType(**kwargs['network_policy_entries'])
- if 'id_perms' in kwargs:
- props_dict['id_perms'] = vnc_api.gen.resource_xsd.IdPermsType(**kwargs['id_perms'])
- if 'display_name' in kwargs:
- props_dict['display_name'] = kwargs['display_name']
-
- # obj constructor takes only props
- parent_type = kwargs.get('parent_type', None)
- fq_name = kwargs['fq_name']
- props_dict.update({'parent_type': parent_type, 'fq_name': fq_name})
- obj = NetworkPolicy(fq_name[-1], **props_dict)
- obj.uuid = kwargs['uuid']
- if 'parent_uuid' in kwargs:
- obj.parent_uuid = kwargs['parent_uuid']
-
- # add summary of any children...
-
- # add any specified references...
-
- # and back references but no obj api for it...
- if 'virtual_network_back_refs' in kwargs:
- obj.virtual_network_back_refs = kwargs['virtual_network_back_refs']
-
- return obj
- #end from_dict
-
- @vnc_api.gen.resource_common.NetworkPolicy.uuid.setter
- def uuid(self, uuid_val):
- self._uuid = uuid_val
- if 'uuid' not in self._pending_field_updates:
- self._pending_field_updates.add('uuid')
- #end uuid
-
- def set_uuid(self, uuid_val):
- self.uuid = uuid_val
- #end set_uuid
-
- @vnc_api.gen.resource_common.NetworkPolicy.network_policy_entries.setter
- def network_policy_entries(self, network_policy_entries):
- """Set network-policy-entries for network-policy.
-
- :param network_policy_entries: PolicyEntriesType object
-
- """
- if 'network_policy_entries' not in self._pending_field_updates:
- self._pending_field_updates.add('network_policy_entries')
-
- self._network_policy_entries = network_policy_entries
- #end network_policy_entries
-
- def set_network_policy_entries(self, value):
- self.network_policy_entries = value
- #end set_network_policy_entries
-
- @vnc_api.gen.resource_common.NetworkPolicy.id_perms.setter
- def id_perms(self, id_perms):
- """Set id-perms for network-policy.
-
- :param id_perms: IdPermsType object
-
- """
- if 'id_perms' not in self._pending_field_updates:
- self._pending_field_updates.add('id_perms')
-
- self._id_perms = id_perms
- #end id_perms
-
- def set_id_perms(self, value):
- self.id_perms = value
- #end set_id_perms
-
- @vnc_api.gen.resource_common.NetworkPolicy.display_name.setter
- def display_name(self, display_name):
- """Set display-name for network-policy.
-
- :param display_name: xsd:string object
-
- """
- if 'display_name' not in self._pending_field_updates:
- self._pending_field_updates.add('display_name')
-
- self._display_name = display_name
- #end display_name
-
- def set_display_name(self, value):
- self.display_name = value
- #end set_display_name
-
-
- def get_virtual_network_back_refs(self):
- """Return list of all virtual-networks using this network-policy"""
- # if object not created/read from lib can't service
- svr_conn = self._server_conn
- if not svr_conn:
- return None
-
- obj = svr_conn.network_policy_read(id = self.uuid, fields = ['virtual_network_back_refs'])
- back_refs = getattr(obj, 'virtual_network_back_refs', None)
- self.virtual_network_back_refs = back_refs
-
- return back_refs
- #end get_virtual_network_back_refs
-
-#end class NetworkPolicy
-
-class LoadbalancerPool(vnc_api.gen.resource_common.LoadbalancerPool):
- create_uri = ''
- resource_uri_base = {}
- def __init__(self, name = None, parent_obj = None, loadbalancer_pool_properties = None, loadbalancer_pool_provider = None, id_perms = None, display_name = None, *args, **kwargs):
- pending_fields = ['fq_name', 'parent_type']
-
- self._server_conn = None
-
- if loadbalancer_pool_properties:
- pending_fields.append('loadbalancer_pool_properties')
- if loadbalancer_pool_provider:
- pending_fields.append('loadbalancer_pool_provider')
- if id_perms:
- pending_fields.append('id_perms')
- if display_name:
- pending_fields.append('display_name')
-
- self._pending_field_updates = set(pending_fields)
- self._pending_ref_updates = set([])
-
- super(LoadbalancerPool, self).__init__(name, parent_obj, loadbalancer_pool_properties, loadbalancer_pool_provider, id_perms, display_name, *args, **kwargs)
- #end __init__
-
- def get_pending_updates(self):
- return self._pending_field_updates
- #end get_pending_updates
-
- def get_ref_updates(self):
- return self._pending_ref_updates
- #end get_ref_updates
-
- def clear_pending_updates(self):
- self._pending_field_updates = set([])
- self._pending_ref_updates = set([])
- #end clear_pending_updates
-
- def set_server_conn(self, vnc_api_handle):
- self._server_conn = vnc_api_handle
- #end set_server_conn
-
- @classmethod
- def from_dict(cls, **kwargs):
- props_dict = {}
- if 'loadbalancer_pool_properties' in kwargs:
- props_dict['loadbalancer_pool_properties'] = vnc_api.gen.resource_xsd.LoadbalancerPoolType(**kwargs['loadbalancer_pool_properties'])
- if 'loadbalancer_pool_provider' in kwargs:
- props_dict['loadbalancer_pool_provider'] = kwargs['loadbalancer_pool_provider']
- if 'id_perms' in kwargs:
- props_dict['id_perms'] = vnc_api.gen.resource_xsd.IdPermsType(**kwargs['id_perms'])
- if 'display_name' in kwargs:
- props_dict['display_name'] = kwargs['display_name']
-
- # obj constructor takes only props
- parent_type = kwargs.get('parent_type', None)
- fq_name = kwargs['fq_name']
- props_dict.update({'parent_type': parent_type, 'fq_name': fq_name})
- obj = LoadbalancerPool(fq_name[-1], **props_dict)
- obj.uuid = kwargs['uuid']
- if 'parent_uuid' in kwargs:
- obj.parent_uuid = kwargs['parent_uuid']
-
- # add summary of any children...
- if 'loadbalancer_members' in kwargs:
- obj.loadbalancer_members = kwargs['loadbalancer_members']
-
- # add any specified references...
- if 'service_instance_refs' in kwargs:
- obj.service_instance_refs = kwargs['service_instance_refs']
- if 'virtual_machine_interface_refs' in kwargs:
- obj.virtual_machine_interface_refs = kwargs['virtual_machine_interface_refs']
- if 'service_appliance_set_refs' in kwargs:
- obj.service_appliance_set_refs = kwargs['service_appliance_set_refs']
- if 'loadbalancer_healthmonitor_refs' in kwargs:
- obj.loadbalancer_healthmonitor_refs = kwargs['loadbalancer_healthmonitor_refs']
-
- # and back references but no obj api for it...
- if 'virtual_ip_back_refs' in kwargs:
- obj.virtual_ip_back_refs = kwargs['virtual_ip_back_refs']
-
- return obj
- #end from_dict
-
- @vnc_api.gen.resource_common.LoadbalancerPool.uuid.setter
- def uuid(self, uuid_val):
- self._uuid = uuid_val
- if 'uuid' not in self._pending_field_updates:
- self._pending_field_updates.add('uuid')
- #end uuid
-
- def set_uuid(self, uuid_val):
- self.uuid = uuid_val
- #end set_uuid
-
- @vnc_api.gen.resource_common.LoadbalancerPool.loadbalancer_pool_properties.setter
- def loadbalancer_pool_properties(self, loadbalancer_pool_properties):
- """Set loadbalancer-pool-properties for loadbalancer-pool.
-
- :param loadbalancer_pool_properties: LoadbalancerPoolType object
-
- """
- if 'loadbalancer_pool_properties' not in self._pending_field_updates:
- self._pending_field_updates.add('loadbalancer_pool_properties')
-
- self._loadbalancer_pool_properties = loadbalancer_pool_properties
- #end loadbalancer_pool_properties
-
- def set_loadbalancer_pool_properties(self, value):
- self.loadbalancer_pool_properties = value
- #end set_loadbalancer_pool_properties
-
- @vnc_api.gen.resource_common.LoadbalancerPool.loadbalancer_pool_provider.setter
- def loadbalancer_pool_provider(self, loadbalancer_pool_provider):
- """Set loadbalancer-pool-provider for loadbalancer-pool.
-
- :param loadbalancer_pool_provider: xsd:string object
-
- """
- if 'loadbalancer_pool_provider' not in self._pending_field_updates:
- self._pending_field_updates.add('loadbalancer_pool_provider')
-
- self._loadbalancer_pool_provider = loadbalancer_pool_provider
- #end loadbalancer_pool_provider
-
- def set_loadbalancer_pool_provider(self, value):
- self.loadbalancer_pool_provider = value
- #end set_loadbalancer_pool_provider
-
- @vnc_api.gen.resource_common.LoadbalancerPool.id_perms.setter
- def id_perms(self, id_perms):
- """Set id-perms for loadbalancer-pool.
-
- :param id_perms: IdPermsType object
-
- """
- if 'id_perms' not in self._pending_field_updates:
- self._pending_field_updates.add('id_perms')
-
- self._id_perms = id_perms
- #end id_perms
-
- def set_id_perms(self, value):
- self.id_perms = value
- #end set_id_perms
-
- @vnc_api.gen.resource_common.LoadbalancerPool.display_name.setter
- def display_name(self, display_name):
- """Set display-name for loadbalancer-pool.
-
- :param display_name: xsd:string object
-
- """
- if 'display_name' not in self._pending_field_updates:
- self._pending_field_updates.add('display_name')
-
- self._display_name = display_name
- #end display_name
-
- def set_display_name(self, value):
- self.display_name = value
- #end set_display_name
-
- def set_service_instance(self, *args, **kwargs):
- """Set service-instance for loadbalancer-pool.
-
- :param ref_obj: ServiceInstance object
-
- """
- self._pending_field_updates.add('service_instance_refs')
- self._pending_ref_updates.discard('service_instance_refs')
- super(LoadbalancerPool, self).set_service_instance(*args, **kwargs)
-
- #end set_service_instance
-
- def add_service_instance(self, *args, **kwargs):
- """Add service-instance to loadbalancer-pool.
-
- :param ref_obj: ServiceInstance object
-
- """
- if 'service_instance_refs' not in self._pending_ref_updates|self._pending_field_updates:
- self._pending_ref_updates.add('service_instance_refs')
- self._original_service_instance_refs = (self.get_service_instance_refs() or [])[:]
- super(LoadbalancerPool, self).add_service_instance(*args, **kwargs)
- #end add_service_instance
-
- def del_service_instance(self, *args, **kwargs):
- if 'service_instance_refs' not in self._pending_ref_updates:
- self._pending_ref_updates.add('service_instance_refs')
- self._original_service_instance_refs = (self.get_service_instance_refs() or [])[:]
- super(LoadbalancerPool, self).del_service_instance(*args, **kwargs)
- #end del_service_instance
-
- def set_service_instance_list(self, *args, **kwargs):
- """Set service-instance list for loadbalancer-pool.
-
- :param ref_obj_list: list of ServiceInstance object
-
- """
- self._pending_field_updates.add('service_instance_refs')
- self._pending_ref_updates.discard('service_instance_refs')
- super(LoadbalancerPool, self).set_service_instance_list(*args, **kwargs)
- #end set_service_instance_list
-
- def set_virtual_machine_interface(self, *args, **kwargs):
- """Set virtual-machine-interface for loadbalancer-pool.
-
- :param ref_obj: VirtualMachineInterface object
-
- """
- self._pending_field_updates.add('virtual_machine_interface_refs')
- self._pending_ref_updates.discard('virtual_machine_interface_refs')
- super(LoadbalancerPool, self).set_virtual_machine_interface(*args, **kwargs)
-
- #end set_virtual_machine_interface
-
- def add_virtual_machine_interface(self, *args, **kwargs):
- """Add virtual-machine-interface to loadbalancer-pool.
-
- :param ref_obj: VirtualMachineInterface object
-
- """
- if 'virtual_machine_interface_refs' not in self._pending_ref_updates|self._pending_field_updates:
- self._pending_ref_updates.add('virtual_machine_interface_refs')
- self._original_virtual_machine_interface_refs = (self.get_virtual_machine_interface_refs() or [])[:]
- super(LoadbalancerPool, self).add_virtual_machine_interface(*args, **kwargs)
- #end add_virtual_machine_interface
-
- def del_virtual_machine_interface(self, *args, **kwargs):
- if 'virtual_machine_interface_refs' not in self._pending_ref_updates:
- self._pending_ref_updates.add('virtual_machine_interface_refs')
- self._original_virtual_machine_interface_refs = (self.get_virtual_machine_interface_refs() or [])[:]
- super(LoadbalancerPool, self).del_virtual_machine_interface(*args, **kwargs)
- #end del_virtual_machine_interface
-
- def set_virtual_machine_interface_list(self, *args, **kwargs):
- """Set virtual-machine-interface list for loadbalancer-pool.
-
- :param ref_obj_list: list of VirtualMachineInterface object
-
- """
- self._pending_field_updates.add('virtual_machine_interface_refs')
- self._pending_ref_updates.discard('virtual_machine_interface_refs')
- super(LoadbalancerPool, self).set_virtual_machine_interface_list(*args, **kwargs)
- #end set_virtual_machine_interface_list
-
- def set_service_appliance_set(self, *args, **kwargs):
- """Set service-appliance-set for loadbalancer-pool.
-
- :param ref_obj: ServiceApplianceSet object
-
- """
- self._pending_field_updates.add('service_appliance_set_refs')
- self._pending_ref_updates.discard('service_appliance_set_refs')
- super(LoadbalancerPool, self).set_service_appliance_set(*args, **kwargs)
-
- #end set_service_appliance_set
-
- def add_service_appliance_set(self, *args, **kwargs):
- """Add service-appliance-set to loadbalancer-pool.
-
- :param ref_obj: ServiceApplianceSet object
-
- """
- if 'service_appliance_set_refs' not in self._pending_ref_updates|self._pending_field_updates:
- self._pending_ref_updates.add('service_appliance_set_refs')
- self._original_service_appliance_set_refs = (self.get_service_appliance_set_refs() or [])[:]
- super(LoadbalancerPool, self).add_service_appliance_set(*args, **kwargs)
- #end add_service_appliance_set
-
- def del_service_appliance_set(self, *args, **kwargs):
- if 'service_appliance_set_refs' not in self._pending_ref_updates:
- self._pending_ref_updates.add('service_appliance_set_refs')
- self._original_service_appliance_set_refs = (self.get_service_appliance_set_refs() or [])[:]
- super(LoadbalancerPool, self).del_service_appliance_set(*args, **kwargs)
- #end del_service_appliance_set
-
- def set_service_appliance_set_list(self, *args, **kwargs):
- """Set service-appliance-set list for loadbalancer-pool.
-
- :param ref_obj_list: list of ServiceApplianceSet object
-
- """
- self._pending_field_updates.add('service_appliance_set_refs')
- self._pending_ref_updates.discard('service_appliance_set_refs')
- super(LoadbalancerPool, self).set_service_appliance_set_list(*args, **kwargs)
- #end set_service_appliance_set_list
-
- def set_loadbalancer_healthmonitor(self, *args, **kwargs):
- """Set loadbalancer-healthmonitor for loadbalancer-pool.
-
- :param ref_obj: LoadbalancerHealthmonitor object
-
- """
- self._pending_field_updates.add('loadbalancer_healthmonitor_refs')
- self._pending_ref_updates.discard('loadbalancer_healthmonitor_refs')
- super(LoadbalancerPool, self).set_loadbalancer_healthmonitor(*args, **kwargs)
-
- #end set_loadbalancer_healthmonitor
-
- def add_loadbalancer_healthmonitor(self, *args, **kwargs):
- """Add loadbalancer-healthmonitor to loadbalancer-pool.
-
- :param ref_obj: LoadbalancerHealthmonitor object
-
- """
- if 'loadbalancer_healthmonitor_refs' not in self._pending_ref_updates|self._pending_field_updates:
- self._pending_ref_updates.add('loadbalancer_healthmonitor_refs')
- self._original_loadbalancer_healthmonitor_refs = (self.get_loadbalancer_healthmonitor_refs() or [])[:]
- super(LoadbalancerPool, self).add_loadbalancer_healthmonitor(*args, **kwargs)
- #end add_loadbalancer_healthmonitor
-
- def del_loadbalancer_healthmonitor(self, *args, **kwargs):
- if 'loadbalancer_healthmonitor_refs' not in self._pending_ref_updates:
- self._pending_ref_updates.add('loadbalancer_healthmonitor_refs')
- self._original_loadbalancer_healthmonitor_refs = (self.get_loadbalancer_healthmonitor_refs() or [])[:]
- super(LoadbalancerPool, self).del_loadbalancer_healthmonitor(*args, **kwargs)
- #end del_loadbalancer_healthmonitor
-
- def set_loadbalancer_healthmonitor_list(self, *args, **kwargs):
- """Set loadbalancer-healthmonitor list for loadbalancer-pool.
-
- :param ref_obj_list: list of LoadbalancerHealthmonitor object
-
- """
- self._pending_field_updates.add('loadbalancer_healthmonitor_refs')
- self._pending_ref_updates.discard('loadbalancer_healthmonitor_refs')
- super(LoadbalancerPool, self).set_loadbalancer_healthmonitor_list(*args, **kwargs)
- #end set_loadbalancer_healthmonitor_list
-
- def get_loadbalancer_members(self):
- # if object not created/read from lib can't service
- svr_conn = self._server_conn
- if not svr_conn:
- return None
-
- children = super(LoadbalancerPool, self).get_loadbalancer_members()
- if not children: # read it for first time
- obj = svr_conn.loadbalancer_pool_read(id = self.uuid, fields = ['loadbalancer_members'])
- children = getattr(obj, 'loadbalancer_members', None)
- self.loadbalancer_members = children
-
- return children
- #end get_loadbalancer_members
-
-
- def get_virtual_ip_back_refs(self):
- """Return list of all virtual-ips using this loadbalancer-pool"""
- # if object not created/read from lib can't service
- svr_conn = self._server_conn
- if not svr_conn:
- return None
-
- obj = svr_conn.loadbalancer_pool_read(id = self.uuid, fields = ['virtual_ip_back_refs'])
- back_refs = getattr(obj, 'virtual_ip_back_refs', None)
- self.virtual_ip_back_refs = back_refs
-
- return back_refs
- #end get_virtual_ip_back_refs
-
-#end class LoadbalancerPool
-
-class VirtualDnsRecord(vnc_api.gen.resource_common.VirtualDnsRecord):
- create_uri = ''
- resource_uri_base = {}
- def __init__(self, name = None, parent_obj = None, virtual_DNS_record_data = None, id_perms = None, display_name = None, *args, **kwargs):
- pending_fields = ['fq_name', 'parent_type']
-
- self._server_conn = None
-
- if virtual_DNS_record_data:
- pending_fields.append('virtual_DNS_record_data')
- if id_perms:
- pending_fields.append('id_perms')
- if display_name:
- pending_fields.append('display_name')
-
- self._pending_field_updates = set(pending_fields)
- self._pending_ref_updates = set([])
-
- super(VirtualDnsRecord, self).__init__(name, parent_obj, virtual_DNS_record_data, id_perms, display_name, *args, **kwargs)
- #end __init__
-
- def get_pending_updates(self):
- return self._pending_field_updates
- #end get_pending_updates
-
- def get_ref_updates(self):
- return self._pending_ref_updates
- #end get_ref_updates
-
- def clear_pending_updates(self):
- self._pending_field_updates = set([])
- self._pending_ref_updates = set([])
- #end clear_pending_updates
-
- def set_server_conn(self, vnc_api_handle):
- self._server_conn = vnc_api_handle
- #end set_server_conn
-
- @classmethod
- def from_dict(cls, **kwargs):
- props_dict = {}
- if 'virtual_DNS_record_data' in kwargs:
- props_dict['virtual_DNS_record_data'] = vnc_api.gen.resource_xsd.VirtualDnsRecordType(**kwargs['virtual_DNS_record_data'])
- if 'id_perms' in kwargs:
- props_dict['id_perms'] = vnc_api.gen.resource_xsd.IdPermsType(**kwargs['id_perms'])
- if 'display_name' in kwargs:
- props_dict['display_name'] = kwargs['display_name']
-
- # obj constructor takes only props
- parent_type = kwargs.get('parent_type', None)
- fq_name = kwargs['fq_name']
- props_dict.update({'parent_type': parent_type, 'fq_name': fq_name})
- obj = VirtualDnsRecord(fq_name[-1], **props_dict)
- obj.uuid = kwargs['uuid']
- if 'parent_uuid' in kwargs:
- obj.parent_uuid = kwargs['parent_uuid']
-
- # add summary of any children...
-
- # add any specified references...
-
- # and back references but no obj api for it...
-
- return obj
- #end from_dict
-
- @vnc_api.gen.resource_common.VirtualDnsRecord.uuid.setter
- def uuid(self, uuid_val):
- self._uuid = uuid_val
- if 'uuid' not in self._pending_field_updates:
- self._pending_field_updates.add('uuid')
- #end uuid
-
- def set_uuid(self, uuid_val):
- self.uuid = uuid_val
- #end set_uuid
-
- @vnc_api.gen.resource_common.VirtualDnsRecord.virtual_DNS_record_data.setter
- def virtual_DNS_record_data(self, virtual_DNS_record_data):
- """Set virtual-DNS-record-data for virtual-DNS-record.
-
- :param virtual_DNS_record_data: VirtualDnsRecordType object
-
- """
- if 'virtual_DNS_record_data' not in self._pending_field_updates:
- self._pending_field_updates.add('virtual_DNS_record_data')
-
- self._virtual_DNS_record_data = virtual_DNS_record_data
- #end virtual_DNS_record_data
-
- def set_virtual_DNS_record_data(self, value):
- self.virtual_DNS_record_data = value
- #end set_virtual_DNS_record_data
-
- @vnc_api.gen.resource_common.VirtualDnsRecord.id_perms.setter
- def id_perms(self, id_perms):
- """Set id-perms for virtual-DNS-record.
-
- :param id_perms: IdPermsType object
-
- """
- if 'id_perms' not in self._pending_field_updates:
- self._pending_field_updates.add('id_perms')
-
- self._id_perms = id_perms
- #end id_perms
-
- def set_id_perms(self, value):
- self.id_perms = value
- #end set_id_perms
-
- @vnc_api.gen.resource_common.VirtualDnsRecord.display_name.setter
- def display_name(self, display_name):
- """Set display-name for virtual-DNS-record.
-
- :param display_name: xsd:string object
-
- """
- if 'display_name' not in self._pending_field_updates:
- self._pending_field_updates.add('display_name')
-
- self._display_name = display_name
- #end display_name
-
- def set_display_name(self, value):
- self.display_name = value
- #end set_display_name
-
-
-#end class VirtualDnsRecord
-
-class RouteTarget(vnc_api.gen.resource_common.RouteTarget):
- create_uri = ''
- resource_uri_base = {}
- def __init__(self, name = None, id_perms = None, display_name = None, *args, **kwargs):
- pending_fields = ['fq_name']
-
- self._server_conn = None
-
- if id_perms:
- pending_fields.append('id_perms')
- if display_name:
- pending_fields.append('display_name')
-
- self._pending_field_updates = set(pending_fields)
- self._pending_ref_updates = set([])
-
- super(RouteTarget, self).__init__(name, id_perms, display_name, *args, **kwargs)
- #end __init__
-
- def get_pending_updates(self):
- return self._pending_field_updates
- #end get_pending_updates
-
- def get_ref_updates(self):
- return self._pending_ref_updates
- #end get_ref_updates
-
- def clear_pending_updates(self):
- self._pending_field_updates = set([])
- self._pending_ref_updates = set([])
- #end clear_pending_updates
-
- def set_server_conn(self, vnc_api_handle):
- self._server_conn = vnc_api_handle
- #end set_server_conn
-
- @classmethod
- def from_dict(cls, **kwargs):
- props_dict = {}
- if 'id_perms' in kwargs:
- props_dict['id_perms'] = vnc_api.gen.resource_xsd.IdPermsType(**kwargs['id_perms'])
- if 'display_name' in kwargs:
- props_dict['display_name'] = kwargs['display_name']
-
- # obj constructor takes only props
- parent_type = kwargs.get('parent_type', None)
- fq_name = kwargs['fq_name']
- props_dict.update({'parent_type': parent_type, 'fq_name': fq_name})
- obj = RouteTarget(fq_name[-1], **props_dict)
- obj.uuid = kwargs['uuid']
- if 'parent_uuid' in kwargs:
- obj.parent_uuid = kwargs['parent_uuid']
-
- # add summary of any children...
-
- # add any specified references...
-
- # and back references but no obj api for it...
- if 'logical_router_back_refs' in kwargs:
- obj.logical_router_back_refs = kwargs['logical_router_back_refs']
- if 'routing_instance_back_refs' in kwargs:
- obj.routing_instance_back_refs = kwargs['routing_instance_back_refs']
-
- return obj
- #end from_dict
-
- @vnc_api.gen.resource_common.RouteTarget.uuid.setter
- def uuid(self, uuid_val):
- self._uuid = uuid_val
- if 'uuid' not in self._pending_field_updates:
- self._pending_field_updates.add('uuid')
- #end uuid
-
- def set_uuid(self, uuid_val):
- self.uuid = uuid_val
- #end set_uuid
-
- @vnc_api.gen.resource_common.RouteTarget.id_perms.setter
- def id_perms(self, id_perms):
- """Set id-perms for route-target.
-
- :param id_perms: IdPermsType object
-
- """
- if 'id_perms' not in self._pending_field_updates:
- self._pending_field_updates.add('id_perms')
-
- self._id_perms = id_perms
- #end id_perms
-
- def set_id_perms(self, value):
- self.id_perms = value
- #end set_id_perms
-
- @vnc_api.gen.resource_common.RouteTarget.display_name.setter
- def display_name(self, display_name):
- """Set display-name for route-target.
-
- :param display_name: xsd:string object
-
- """
- if 'display_name' not in self._pending_field_updates:
- self._pending_field_updates.add('display_name')
-
- self._display_name = display_name
- #end display_name
-
- def set_display_name(self, value):
- self.display_name = value
- #end set_display_name
-
-
- def get_logical_router_back_refs(self):
- """Return list of all logical-routers using this route-target"""
- # if object not created/read from lib can't service
- svr_conn = self._server_conn
- if not svr_conn:
- return None
-
- obj = svr_conn.route_target_read(id = self.uuid, fields = ['logical_router_back_refs'])
- back_refs = getattr(obj, 'logical_router_back_refs', None)
- self.logical_router_back_refs = back_refs
-
- return back_refs
- #end get_logical_router_back_refs
-
- def get_routing_instance_back_refs(self):
- """Return list of all routing-instances using this route-target"""
- # if object not created/read from lib can't service
- svr_conn = self._server_conn
- if not svr_conn:
- return None
-
- obj = svr_conn.route_target_read(id = self.uuid, fields = ['routing_instance_back_refs'])
- back_refs = getattr(obj, 'routing_instance_back_refs', None)
- self.routing_instance_back_refs = back_refs
-
- return back_refs
- #end get_routing_instance_back_refs
-
-#end class RouteTarget
-
-class FloatingIp(vnc_api.gen.resource_common.FloatingIp):
- create_uri = ''
- resource_uri_base = {}
- def __init__(self, name = None, parent_obj = None, floating_ip_address = None, floating_ip_is_virtual_ip = None, floating_ip_fixed_ip_address = None, floating_ip_address_family = None, id_perms = None, display_name = None, *args, **kwargs):
- pending_fields = ['fq_name', 'parent_type']
-
- self._server_conn = None
-
- if floating_ip_address:
- pending_fields.append('floating_ip_address')
- if floating_ip_is_virtual_ip:
- pending_fields.append('floating_ip_is_virtual_ip')
- if floating_ip_fixed_ip_address:
- pending_fields.append('floating_ip_fixed_ip_address')
- if floating_ip_address_family:
- pending_fields.append('floating_ip_address_family')
- if id_perms:
- pending_fields.append('id_perms')
- if display_name:
- pending_fields.append('display_name')
-
- self._pending_field_updates = set(pending_fields)
- self._pending_ref_updates = set([])
-
- super(FloatingIp, self).__init__(name, parent_obj, floating_ip_address, floating_ip_is_virtual_ip, floating_ip_fixed_ip_address, floating_ip_address_family, id_perms, display_name, *args, **kwargs)
- #end __init__
-
- def get_pending_updates(self):
- return self._pending_field_updates
- #end get_pending_updates
-
- def get_ref_updates(self):
- return self._pending_ref_updates
- #end get_ref_updates
-
- def clear_pending_updates(self):
- self._pending_field_updates = set([])
- self._pending_ref_updates = set([])
- #end clear_pending_updates
-
- def set_server_conn(self, vnc_api_handle):
- self._server_conn = vnc_api_handle
- #end set_server_conn
-
- @classmethod
- def from_dict(cls, **kwargs):
- props_dict = {}
- if 'floating_ip_address' in kwargs:
- props_dict['floating_ip_address'] = kwargs['floating_ip_address']
- if 'floating_ip_is_virtual_ip' in kwargs:
- props_dict['floating_ip_is_virtual_ip'] = kwargs['floating_ip_is_virtual_ip']
- if 'floating_ip_fixed_ip_address' in kwargs:
- props_dict['floating_ip_fixed_ip_address'] = kwargs['floating_ip_fixed_ip_address']
- if 'floating_ip_address_family' in kwargs:
- props_dict['floating_ip_address_family'] = kwargs['floating_ip_address_family']
- if 'id_perms' in kwargs:
- props_dict['id_perms'] = vnc_api.gen.resource_xsd.IdPermsType(**kwargs['id_perms'])
- if 'display_name' in kwargs:
- props_dict['display_name'] = kwargs['display_name']
-
- # obj constructor takes only props
- parent_type = kwargs.get('parent_type', None)
- fq_name = kwargs['fq_name']
- props_dict.update({'parent_type': parent_type, 'fq_name': fq_name})
- obj = FloatingIp(fq_name[-1], **props_dict)
- obj.uuid = kwargs['uuid']
- if 'parent_uuid' in kwargs:
- obj.parent_uuid = kwargs['parent_uuid']
-
- # add summary of any children...
-
- # add any specified references...
- if 'project_refs' in kwargs:
- obj.project_refs = kwargs['project_refs']
- if 'virtual_machine_interface_refs' in kwargs:
- obj.virtual_machine_interface_refs = kwargs['virtual_machine_interface_refs']
-
- # and back references but no obj api for it...
- if 'customer_attachment_back_refs' in kwargs:
- obj.customer_attachment_back_refs = kwargs['customer_attachment_back_refs']
-
- return obj
- #end from_dict
-
- @vnc_api.gen.resource_common.FloatingIp.uuid.setter
- def uuid(self, uuid_val):
- self._uuid = uuid_val
- if 'uuid' not in self._pending_field_updates:
- self._pending_field_updates.add('uuid')
- #end uuid
-
- def set_uuid(self, uuid_val):
- self.uuid = uuid_val
- #end set_uuid
-
- @vnc_api.gen.resource_common.FloatingIp.floating_ip_address.setter
- def floating_ip_address(self, floating_ip_address):
- """Set floating-ip-address for floating-ip.
-
- :param floating_ip_address: IpAddressType object
-
- """
- if 'floating_ip_address' not in self._pending_field_updates:
- self._pending_field_updates.add('floating_ip_address')
-
- self._floating_ip_address = floating_ip_address
- #end floating_ip_address
-
- def set_floating_ip_address(self, value):
- self.floating_ip_address = value
- #end set_floating_ip_address
-
- @vnc_api.gen.resource_common.FloatingIp.floating_ip_is_virtual_ip.setter
- def floating_ip_is_virtual_ip(self, floating_ip_is_virtual_ip):
- """Set floating-ip-is-virtual-ip for floating-ip.
-
- :param floating_ip_is_virtual_ip: xsd:boolean object
-
- """
- if 'floating_ip_is_virtual_ip' not in self._pending_field_updates:
- self._pending_field_updates.add('floating_ip_is_virtual_ip')
-
- self._floating_ip_is_virtual_ip = floating_ip_is_virtual_ip
- #end floating_ip_is_virtual_ip
-
- def set_floating_ip_is_virtual_ip(self, value):
- self.floating_ip_is_virtual_ip = value
- #end set_floating_ip_is_virtual_ip
-
- @vnc_api.gen.resource_common.FloatingIp.floating_ip_fixed_ip_address.setter
- def floating_ip_fixed_ip_address(self, floating_ip_fixed_ip_address):
- """Set floating-ip-fixed-ip-address for floating-ip.
-
- :param floating_ip_fixed_ip_address: IpAddressType object
-
- """
- if 'floating_ip_fixed_ip_address' not in self._pending_field_updates:
- self._pending_field_updates.add('floating_ip_fixed_ip_address')
-
- self._floating_ip_fixed_ip_address = floating_ip_fixed_ip_address
- #end floating_ip_fixed_ip_address
-
- def set_floating_ip_fixed_ip_address(self, value):
- self.floating_ip_fixed_ip_address = value
- #end set_floating_ip_fixed_ip_address
-
- @vnc_api.gen.resource_common.FloatingIp.floating_ip_address_family.setter
- def floating_ip_address_family(self, floating_ip_address_family):
- """Set floating-ip-address-family for floating-ip.
-
- :param floating_ip_address_family: IpAddressFamilyType object
-
- """
- if 'floating_ip_address_family' not in self._pending_field_updates:
- self._pending_field_updates.add('floating_ip_address_family')
-
- self._floating_ip_address_family = floating_ip_address_family
- #end floating_ip_address_family
-
- def set_floating_ip_address_family(self, value):
- self.floating_ip_address_family = value
- #end set_floating_ip_address_family
-
- @vnc_api.gen.resource_common.FloatingIp.id_perms.setter
- def id_perms(self, id_perms):
- """Set id-perms for floating-ip.
-
- :param id_perms: IdPermsType object
-
- """
- if 'id_perms' not in self._pending_field_updates:
- self._pending_field_updates.add('id_perms')
-
- self._id_perms = id_perms
- #end id_perms
-
- def set_id_perms(self, value):
- self.id_perms = value
- #end set_id_perms
-
- @vnc_api.gen.resource_common.FloatingIp.display_name.setter
- def display_name(self, display_name):
- """Set display-name for floating-ip.
-
- :param display_name: xsd:string object
-
- """
- if 'display_name' not in self._pending_field_updates:
- self._pending_field_updates.add('display_name')
-
- self._display_name = display_name
- #end display_name
-
- def set_display_name(self, value):
- self.display_name = value
- #end set_display_name
-
- def set_project(self, *args, **kwargs):
- """Set project for floating-ip.
-
- :param ref_obj: Project object
-
- """
- self._pending_field_updates.add('project_refs')
- self._pending_ref_updates.discard('project_refs')
- super(FloatingIp, self).set_project(*args, **kwargs)
-
- #end set_project
-
- def add_project(self, *args, **kwargs):
- """Add project to floating-ip.
-
- :param ref_obj: Project object
-
- """
- if 'project_refs' not in self._pending_ref_updates|self._pending_field_updates:
- self._pending_ref_updates.add('project_refs')
- self._original_project_refs = (self.get_project_refs() or [])[:]
- super(FloatingIp, self).add_project(*args, **kwargs)
- #end add_project
-
- def del_project(self, *args, **kwargs):
- if 'project_refs' not in self._pending_ref_updates:
- self._pending_ref_updates.add('project_refs')
- self._original_project_refs = (self.get_project_refs() or [])[:]
- super(FloatingIp, self).del_project(*args, **kwargs)
- #end del_project
-
- def set_project_list(self, *args, **kwargs):
- """Set project list for floating-ip.
-
- :param ref_obj_list: list of Project object
-
- """
- self._pending_field_updates.add('project_refs')
- self._pending_ref_updates.discard('project_refs')
- super(FloatingIp, self).set_project_list(*args, **kwargs)
- #end set_project_list
-
- def set_virtual_machine_interface(self, *args, **kwargs):
- """Set virtual-machine-interface for floating-ip.
-
- :param ref_obj: VirtualMachineInterface object
-
- """
- self._pending_field_updates.add('virtual_machine_interface_refs')
- self._pending_ref_updates.discard('virtual_machine_interface_refs')
- super(FloatingIp, self).set_virtual_machine_interface(*args, **kwargs)
-
- #end set_virtual_machine_interface
-
- def add_virtual_machine_interface(self, *args, **kwargs):
- """Add virtual-machine-interface to floating-ip.
-
- :param ref_obj: VirtualMachineInterface object
-
- """
- if 'virtual_machine_interface_refs' not in self._pending_ref_updates|self._pending_field_updates:
- self._pending_ref_updates.add('virtual_machine_interface_refs')
- self._original_virtual_machine_interface_refs = (self.get_virtual_machine_interface_refs() or [])[:]
- super(FloatingIp, self).add_virtual_machine_interface(*args, **kwargs)
- #end add_virtual_machine_interface
-
- def del_virtual_machine_interface(self, *args, **kwargs):
- if 'virtual_machine_interface_refs' not in self._pending_ref_updates:
- self._pending_ref_updates.add('virtual_machine_interface_refs')
- self._original_virtual_machine_interface_refs = (self.get_virtual_machine_interface_refs() or [])[:]
- super(FloatingIp, self).del_virtual_machine_interface(*args, **kwargs)
- #end del_virtual_machine_interface
-
- def set_virtual_machine_interface_list(self, *args, **kwargs):
- """Set virtual-machine-interface list for floating-ip.
-
- :param ref_obj_list: list of VirtualMachineInterface object
-
- """
- self._pending_field_updates.add('virtual_machine_interface_refs')
- self._pending_ref_updates.discard('virtual_machine_interface_refs')
- super(FloatingIp, self).set_virtual_machine_interface_list(*args, **kwargs)
- #end set_virtual_machine_interface_list
-
-
- def get_customer_attachment_back_refs(self):
- """Return list of all customer-attachments using this floating-ip"""
- # if object not created/read from lib can't service
- svr_conn = self._server_conn
- if not svr_conn:
- return None
-
- obj = svr_conn.floating_ip_read(id = self.uuid, fields = ['customer_attachment_back_refs'])
- back_refs = getattr(obj, 'customer_attachment_back_refs', None)
- self.customer_attachment_back_refs = back_refs
-
- return back_refs
- #end get_customer_attachment_back_refs
-
-#end class FloatingIp
-
-class FloatingIpPool(vnc_api.gen.resource_common.FloatingIpPool):
- create_uri = ''
- resource_uri_base = {}
- def __init__(self, name = None, parent_obj = None, floating_ip_pool_prefixes = None, id_perms = None, display_name = None, *args, **kwargs):
- pending_fields = ['fq_name', 'parent_type']
-
- self._server_conn = None
-
- if floating_ip_pool_prefixes:
- pending_fields.append('floating_ip_pool_prefixes')
- if id_perms:
- pending_fields.append('id_perms')
- if display_name:
- pending_fields.append('display_name')
-
- self._pending_field_updates = set(pending_fields)
- self._pending_ref_updates = set([])
-
- super(FloatingIpPool, self).__init__(name, parent_obj, floating_ip_pool_prefixes, id_perms, display_name, *args, **kwargs)
- #end __init__
-
- def get_pending_updates(self):
- return self._pending_field_updates
- #end get_pending_updates
-
- def get_ref_updates(self):
- return self._pending_ref_updates
- #end get_ref_updates
-
- def clear_pending_updates(self):
- self._pending_field_updates = set([])
- self._pending_ref_updates = set([])
- #end clear_pending_updates
-
- def set_server_conn(self, vnc_api_handle):
- self._server_conn = vnc_api_handle
- #end set_server_conn
-
- @classmethod
- def from_dict(cls, **kwargs):
- props_dict = {}
- if 'floating_ip_pool_prefixes' in kwargs:
- props_dict['floating_ip_pool_prefixes'] = vnc_api.gen.resource_xsd.FloatingIpPoolType(**kwargs['floating_ip_pool_prefixes'])
- if 'id_perms' in kwargs:
- props_dict['id_perms'] = vnc_api.gen.resource_xsd.IdPermsType(**kwargs['id_perms'])
- if 'display_name' in kwargs:
- props_dict['display_name'] = kwargs['display_name']
-
- # obj constructor takes only props
- parent_type = kwargs.get('parent_type', None)
- fq_name = kwargs['fq_name']
- props_dict.update({'parent_type': parent_type, 'fq_name': fq_name})
- obj = FloatingIpPool(fq_name[-1], **props_dict)
- obj.uuid = kwargs['uuid']
- if 'parent_uuid' in kwargs:
- obj.parent_uuid = kwargs['parent_uuid']
-
- # add summary of any children...
- if 'floating_ips' in kwargs:
- obj.floating_ips = kwargs['floating_ips']
-
- # add any specified references...
-
- # and back references but no obj api for it...
- if 'project_back_refs' in kwargs:
- obj.project_back_refs = kwargs['project_back_refs']
-
- return obj
- #end from_dict
-
- @vnc_api.gen.resource_common.FloatingIpPool.uuid.setter
- def uuid(self, uuid_val):
- self._uuid = uuid_val
- if 'uuid' not in self._pending_field_updates:
- self._pending_field_updates.add('uuid')
- #end uuid
-
- def set_uuid(self, uuid_val):
- self.uuid = uuid_val
- #end set_uuid
-
- @vnc_api.gen.resource_common.FloatingIpPool.floating_ip_pool_prefixes.setter
- def floating_ip_pool_prefixes(self, floating_ip_pool_prefixes):
- """Set floating-ip-pool-prefixes for floating-ip-pool.
-
- :param floating_ip_pool_prefixes: FloatingIpPoolType object
-
- """
- if 'floating_ip_pool_prefixes' not in self._pending_field_updates:
- self._pending_field_updates.add('floating_ip_pool_prefixes')
-
- self._floating_ip_pool_prefixes = floating_ip_pool_prefixes
- #end floating_ip_pool_prefixes
-
- def set_floating_ip_pool_prefixes(self, value):
- self.floating_ip_pool_prefixes = value
- #end set_floating_ip_pool_prefixes
-
- @vnc_api.gen.resource_common.FloatingIpPool.id_perms.setter
- def id_perms(self, id_perms):
- """Set id-perms for floating-ip-pool.
-
- :param id_perms: IdPermsType object
-
- """
- if 'id_perms' not in self._pending_field_updates:
- self._pending_field_updates.add('id_perms')
-
- self._id_perms = id_perms
- #end id_perms
-
- def set_id_perms(self, value):
- self.id_perms = value
- #end set_id_perms
-
- @vnc_api.gen.resource_common.FloatingIpPool.display_name.setter
- def display_name(self, display_name):
- """Set display-name for floating-ip-pool.
-
- :param display_name: xsd:string object
-
- """
- if 'display_name' not in self._pending_field_updates:
- self._pending_field_updates.add('display_name')
-
- self._display_name = display_name
- #end display_name
-
- def set_display_name(self, value):
- self.display_name = value
- #end set_display_name
-
- def get_floating_ips(self):
- # if object not created/read from lib can't service
- svr_conn = self._server_conn
- if not svr_conn:
- return None
-
- children = super(FloatingIpPool, self).get_floating_ips()
- if not children: # read it for first time
- obj = svr_conn.floating_ip_pool_read(id = self.uuid, fields = ['floating_ips'])
- children = getattr(obj, 'floating_ips', None)
- self.floating_ips = children
-
- return children
- #end get_floating_ips
-
-
- def get_project_back_refs(self):
- """Return list of all projects using this floating-ip-pool"""
- # if object not created/read from lib can't service
- svr_conn = self._server_conn
- if not svr_conn:
- return None
-
- obj = svr_conn.floating_ip_pool_read(id = self.uuid, fields = ['project_back_refs'])
- back_refs = getattr(obj, 'project_back_refs', None)
- self.project_back_refs = back_refs
-
- return back_refs
- #end get_project_back_refs
-
-#end class FloatingIpPool
-
-class PhysicalRouter(vnc_api.gen.resource_common.PhysicalRouter):
- create_uri = ''
- resource_uri_base = {}
- def __init__(self, name = None, parent_obj = None, physical_router_management_ip = None, physical_router_dataplane_ip = None, physical_router_vendor_name = None, physical_router_product_name = None, physical_router_vnc_managed = None, physical_router_user_credentials = None, physical_router_snmp_credentials = None, physical_router_junos_service_ports = None, id_perms = None, display_name = None, *args, **kwargs):
- pending_fields = ['fq_name', 'parent_type']
-
- self._server_conn = None
-
- if physical_router_management_ip:
- pending_fields.append('physical_router_management_ip')
- if physical_router_dataplane_ip:
- pending_fields.append('physical_router_dataplane_ip')
- if physical_router_vendor_name:
- pending_fields.append('physical_router_vendor_name')
- if physical_router_product_name:
- pending_fields.append('physical_router_product_name')
- if physical_router_vnc_managed:
- pending_fields.append('physical_router_vnc_managed')
- if physical_router_user_credentials:
- pending_fields.append('physical_router_user_credentials')
- if physical_router_snmp_credentials:
- pending_fields.append('physical_router_snmp_credentials')
- if physical_router_junos_service_ports:
- pending_fields.append('physical_router_junos_service_ports')
- if id_perms:
- pending_fields.append('id_perms')
- if display_name:
- pending_fields.append('display_name')
-
- self._pending_field_updates = set(pending_fields)
- self._pending_ref_updates = set([])
-
- super(PhysicalRouter, self).__init__(name, parent_obj, physical_router_management_ip, physical_router_dataplane_ip, physical_router_vendor_name, physical_router_product_name, physical_router_vnc_managed, physical_router_user_credentials, physical_router_snmp_credentials, physical_router_junos_service_ports, id_perms, display_name, *args, **kwargs)
- #end __init__
-
- def get_pending_updates(self):
- return self._pending_field_updates
- #end get_pending_updates
-
- def get_ref_updates(self):
- return self._pending_ref_updates
- #end get_ref_updates
-
- def clear_pending_updates(self):
- self._pending_field_updates = set([])
- self._pending_ref_updates = set([])
- #end clear_pending_updates
-
- def set_server_conn(self, vnc_api_handle):
- self._server_conn = vnc_api_handle
- #end set_server_conn
-
- @classmethod
- def from_dict(cls, **kwargs):
- props_dict = {}
- if 'physical_router_management_ip' in kwargs:
- props_dict['physical_router_management_ip'] = kwargs['physical_router_management_ip']
- if 'physical_router_dataplane_ip' in kwargs:
- props_dict['physical_router_dataplane_ip'] = kwargs['physical_router_dataplane_ip']
- if 'physical_router_vendor_name' in kwargs:
- props_dict['physical_router_vendor_name'] = kwargs['physical_router_vendor_name']
- if 'physical_router_product_name' in kwargs:
- props_dict['physical_router_product_name'] = kwargs['physical_router_product_name']
- if 'physical_router_vnc_managed' in kwargs:
- props_dict['physical_router_vnc_managed'] = kwargs['physical_router_vnc_managed']
- if 'physical_router_user_credentials' in kwargs:
- props_dict['physical_router_user_credentials'] = vnc_api.gen.resource_xsd.UserCredentials(**kwargs['physical_router_user_credentials'])
- if 'physical_router_snmp_credentials' in kwargs:
- props_dict['physical_router_snmp_credentials'] = vnc_api.gen.resource_xsd.SNMPCredentials(**kwargs['physical_router_snmp_credentials'])
- if 'physical_router_junos_service_ports' in kwargs:
- props_dict['physical_router_junos_service_ports'] = vnc_api.gen.resource_xsd.JunosServicePorts(**kwargs['physical_router_junos_service_ports'])
- if 'id_perms' in kwargs:
- props_dict['id_perms'] = vnc_api.gen.resource_xsd.IdPermsType(**kwargs['id_perms'])
- if 'display_name' in kwargs:
- props_dict['display_name'] = kwargs['display_name']
-
- # obj constructor takes only props
- parent_type = kwargs.get('parent_type', None)
- fq_name = kwargs['fq_name']
- props_dict.update({'parent_type': parent_type, 'fq_name': fq_name})
- obj = PhysicalRouter(fq_name[-1], **props_dict)
- obj.uuid = kwargs['uuid']
- if 'parent_uuid' in kwargs:
- obj.parent_uuid = kwargs['parent_uuid']
-
- # add summary of any children...
- if 'physical_interfaces' in kwargs:
- obj.physical_interfaces = kwargs['physical_interfaces']
- if 'logical_interfaces' in kwargs:
- obj.logical_interfaces = kwargs['logical_interfaces']
-
- # add any specified references...
- if 'virtual_router_refs' in kwargs:
- obj.virtual_router_refs = kwargs['virtual_router_refs']
- if 'bgp_router_refs' in kwargs:
- obj.bgp_router_refs = kwargs['bgp_router_refs']
- if 'virtual_network_refs' in kwargs:
- obj.virtual_network_refs = kwargs['virtual_network_refs']
-
- # and back references but no obj api for it...
-
- return obj
- #end from_dict
-
- @vnc_api.gen.resource_common.PhysicalRouter.uuid.setter
- def uuid(self, uuid_val):
- self._uuid = uuid_val
- if 'uuid' not in self._pending_field_updates:
- self._pending_field_updates.add('uuid')
- #end uuid
-
- def set_uuid(self, uuid_val):
- self.uuid = uuid_val
- #end set_uuid
-
- @vnc_api.gen.resource_common.PhysicalRouter.physical_router_management_ip.setter
- def physical_router_management_ip(self, physical_router_management_ip):
- """Set physical-router-management-ip for physical-router.
-
- :param physical_router_management_ip: IpAddress object
-
- """
- if 'physical_router_management_ip' not in self._pending_field_updates:
- self._pending_field_updates.add('physical_router_management_ip')
-
- self._physical_router_management_ip = physical_router_management_ip
- #end physical_router_management_ip
-
- def set_physical_router_management_ip(self, value):
- self.physical_router_management_ip = value
- #end set_physical_router_management_ip
-
- @vnc_api.gen.resource_common.PhysicalRouter.physical_router_dataplane_ip.setter
- def physical_router_dataplane_ip(self, physical_router_dataplane_ip):
- """Set physical-router-dataplane-ip for physical-router.
-
- :param physical_router_dataplane_ip: IpAddress object
-
- """
- if 'physical_router_dataplane_ip' not in self._pending_field_updates:
- self._pending_field_updates.add('physical_router_dataplane_ip')
-
- self._physical_router_dataplane_ip = physical_router_dataplane_ip
- #end physical_router_dataplane_ip
-
- def set_physical_router_dataplane_ip(self, value):
- self.physical_router_dataplane_ip = value
- #end set_physical_router_dataplane_ip
-
- @vnc_api.gen.resource_common.PhysicalRouter.physical_router_vendor_name.setter
- def physical_router_vendor_name(self, physical_router_vendor_name):
- """Set physical-router-vendor-name for physical-router.
-
- :param physical_router_vendor_name: xsd:string object
-
- """
- if 'physical_router_vendor_name' not in self._pending_field_updates:
- self._pending_field_updates.add('physical_router_vendor_name')
-
- self._physical_router_vendor_name = physical_router_vendor_name
- #end physical_router_vendor_name
-
- def set_physical_router_vendor_name(self, value):
- self.physical_router_vendor_name = value
- #end set_physical_router_vendor_name
-
- @vnc_api.gen.resource_common.PhysicalRouter.physical_router_product_name.setter
- def physical_router_product_name(self, physical_router_product_name):
- """Set physical-router-product-name for physical-router.
-
- :param physical_router_product_name: xsd:string object
-
- """
- if 'physical_router_product_name' not in self._pending_field_updates:
- self._pending_field_updates.add('physical_router_product_name')
-
- self._physical_router_product_name = physical_router_product_name
- #end physical_router_product_name
-
- def set_physical_router_product_name(self, value):
- self.physical_router_product_name = value
- #end set_physical_router_product_name
-
- @vnc_api.gen.resource_common.PhysicalRouter.physical_router_vnc_managed.setter
- def physical_router_vnc_managed(self, physical_router_vnc_managed):
- """Set physical-router-vnc-managed for physical-router.
-
- :param physical_router_vnc_managed: xsd:boolean object
-
- """
- if 'physical_router_vnc_managed' not in self._pending_field_updates:
- self._pending_field_updates.add('physical_router_vnc_managed')
-
- self._physical_router_vnc_managed = physical_router_vnc_managed
- #end physical_router_vnc_managed
-
- def set_physical_router_vnc_managed(self, value):
- self.physical_router_vnc_managed = value
- #end set_physical_router_vnc_managed
-
- @vnc_api.gen.resource_common.PhysicalRouter.physical_router_user_credentials.setter
- def physical_router_user_credentials(self, physical_router_user_credentials):
- """Set physical-router-user-credentials for physical-router.
-
- :param physical_router_user_credentials: UserCredentials object
-
- """
- if 'physical_router_user_credentials' not in self._pending_field_updates:
- self._pending_field_updates.add('physical_router_user_credentials')
-
- self._physical_router_user_credentials = physical_router_user_credentials
- #end physical_router_user_credentials
-
- def set_physical_router_user_credentials(self, value):
- self.physical_router_user_credentials = value
- #end set_physical_router_user_credentials
-
- @vnc_api.gen.resource_common.PhysicalRouter.physical_router_snmp_credentials.setter
- def physical_router_snmp_credentials(self, physical_router_snmp_credentials):
- """Set physical-router-snmp-credentials for physical-router.
-
- :param physical_router_snmp_credentials: SNMPCredentials object
-
- """
- if 'physical_router_snmp_credentials' not in self._pending_field_updates:
- self._pending_field_updates.add('physical_router_snmp_credentials')
-
- self._physical_router_snmp_credentials = physical_router_snmp_credentials
- #end physical_router_snmp_credentials
-
- def set_physical_router_snmp_credentials(self, value):
- self.physical_router_snmp_credentials = value
- #end set_physical_router_snmp_credentials
-
- @vnc_api.gen.resource_common.PhysicalRouter.physical_router_junos_service_ports.setter
- def physical_router_junos_service_ports(self, physical_router_junos_service_ports):
- """Set physical-router-junos-service-ports for physical-router.
-
- :param physical_router_junos_service_ports: JunosServicePorts object
-
- """
- if 'physical_router_junos_service_ports' not in self._pending_field_updates:
- self._pending_field_updates.add('physical_router_junos_service_ports')
-
- self._physical_router_junos_service_ports = physical_router_junos_service_ports
- #end physical_router_junos_service_ports
-
- def set_physical_router_junos_service_ports(self, value):
- self.physical_router_junos_service_ports = value
- #end set_physical_router_junos_service_ports
-
- @vnc_api.gen.resource_common.PhysicalRouter.id_perms.setter
- def id_perms(self, id_perms):
- """Set id-perms for physical-router.
-
- :param id_perms: IdPermsType object
-
- """
- if 'id_perms' not in self._pending_field_updates:
- self._pending_field_updates.add('id_perms')
-
- self._id_perms = id_perms
- #end id_perms
-
- def set_id_perms(self, value):
- self.id_perms = value
- #end set_id_perms
-
- @vnc_api.gen.resource_common.PhysicalRouter.display_name.setter
- def display_name(self, display_name):
- """Set display-name for physical-router.
-
- :param display_name: xsd:string object
-
- """
- if 'display_name' not in self._pending_field_updates:
- self._pending_field_updates.add('display_name')
-
- self._display_name = display_name
- #end display_name
-
- def set_display_name(self, value):
- self.display_name = value
- #end set_display_name
-
- def set_virtual_router(self, *args, **kwargs):
- """Set virtual-router for physical-router.
-
- :param ref_obj: VirtualRouter object
-
- """
- self._pending_field_updates.add('virtual_router_refs')
- self._pending_ref_updates.discard('virtual_router_refs')
- super(PhysicalRouter, self).set_virtual_router(*args, **kwargs)
-
- #end set_virtual_router
-
- def add_virtual_router(self, *args, **kwargs):
- """Add virtual-router to physical-router.
-
- :param ref_obj: VirtualRouter object
-
- """
- if 'virtual_router_refs' not in self._pending_ref_updates|self._pending_field_updates:
- self._pending_ref_updates.add('virtual_router_refs')
- self._original_virtual_router_refs = (self.get_virtual_router_refs() or [])[:]
- super(PhysicalRouter, self).add_virtual_router(*args, **kwargs)
- #end add_virtual_router
-
- def del_virtual_router(self, *args, **kwargs):
- if 'virtual_router_refs' not in self._pending_ref_updates:
- self._pending_ref_updates.add('virtual_router_refs')
- self._original_virtual_router_refs = (self.get_virtual_router_refs() or [])[:]
- super(PhysicalRouter, self).del_virtual_router(*args, **kwargs)
- #end del_virtual_router
-
- def set_virtual_router_list(self, *args, **kwargs):
- """Set virtual-router list for physical-router.
-
- :param ref_obj_list: list of VirtualRouter object
-
- """
- self._pending_field_updates.add('virtual_router_refs')
- self._pending_ref_updates.discard('virtual_router_refs')
- super(PhysicalRouter, self).set_virtual_router_list(*args, **kwargs)
- #end set_virtual_router_list
-
- def set_bgp_router(self, *args, **kwargs):
- """Set bgp-router for physical-router.
-
- :param ref_obj: BgpRouter object
-
- """
- self._pending_field_updates.add('bgp_router_refs')
- self._pending_ref_updates.discard('bgp_router_refs')
- super(PhysicalRouter, self).set_bgp_router(*args, **kwargs)
-
- #end set_bgp_router
-
- def add_bgp_router(self, *args, **kwargs):
- """Add bgp-router to physical-router.
-
- :param ref_obj: BgpRouter object
-
- """
- if 'bgp_router_refs' not in self._pending_ref_updates|self._pending_field_updates:
- self._pending_ref_updates.add('bgp_router_refs')
- self._original_bgp_router_refs = (self.get_bgp_router_refs() or [])[:]
- super(PhysicalRouter, self).add_bgp_router(*args, **kwargs)
- #end add_bgp_router
-
- def del_bgp_router(self, *args, **kwargs):
- if 'bgp_router_refs' not in self._pending_ref_updates:
- self._pending_ref_updates.add('bgp_router_refs')
- self._original_bgp_router_refs = (self.get_bgp_router_refs() or [])[:]
- super(PhysicalRouter, self).del_bgp_router(*args, **kwargs)
- #end del_bgp_router
-
- def set_bgp_router_list(self, *args, **kwargs):
- """Set bgp-router list for physical-router.
-
- :param ref_obj_list: list of BgpRouter object
-
- """
- self._pending_field_updates.add('bgp_router_refs')
- self._pending_ref_updates.discard('bgp_router_refs')
- super(PhysicalRouter, self).set_bgp_router_list(*args, **kwargs)
- #end set_bgp_router_list
-
- def set_virtual_network(self, *args, **kwargs):
- """Set virtual-network for physical-router.
-
- :param ref_obj: VirtualNetwork object
-
- """
- self._pending_field_updates.add('virtual_network_refs')
- self._pending_ref_updates.discard('virtual_network_refs')
- super(PhysicalRouter, self).set_virtual_network(*args, **kwargs)
-
- #end set_virtual_network
-
- def add_virtual_network(self, *args, **kwargs):
- """Add virtual-network to physical-router.
-
- :param ref_obj: VirtualNetwork object
-
- """
- if 'virtual_network_refs' not in self._pending_ref_updates|self._pending_field_updates:
- self._pending_ref_updates.add('virtual_network_refs')
- self._original_virtual_network_refs = (self.get_virtual_network_refs() or [])[:]
- super(PhysicalRouter, self).add_virtual_network(*args, **kwargs)
- #end add_virtual_network
-
- def del_virtual_network(self, *args, **kwargs):
- if 'virtual_network_refs' not in self._pending_ref_updates:
- self._pending_ref_updates.add('virtual_network_refs')
- self._original_virtual_network_refs = (self.get_virtual_network_refs() or [])[:]
- super(PhysicalRouter, self).del_virtual_network(*args, **kwargs)
- #end del_virtual_network
-
- def set_virtual_network_list(self, *args, **kwargs):
- """Set virtual-network list for physical-router.
-
- :param ref_obj_list: list of VirtualNetwork object
-
- """
- self._pending_field_updates.add('virtual_network_refs')
- self._pending_ref_updates.discard('virtual_network_refs')
- super(PhysicalRouter, self).set_virtual_network_list(*args, **kwargs)
- #end set_virtual_network_list
-
- def get_physical_interfaces(self):
- # if object not created/read from lib can't service
- svr_conn = self._server_conn
- if not svr_conn:
- return None
-
- children = super(PhysicalRouter, self).get_physical_interfaces()
- if not children: # read it for first time
- obj = svr_conn.physical_router_read(id = self.uuid, fields = ['physical_interfaces'])
- children = getattr(obj, 'physical_interfaces', None)
- self.physical_interfaces = children
-
- return children
- #end get_physical_interfaces
-
- def get_logical_interfaces(self):
- # if object not created/read from lib can't service
- svr_conn = self._server_conn
- if not svr_conn:
- return None
-
- children = super(PhysicalRouter, self).get_logical_interfaces()
- if not children: # read it for first time
- obj = svr_conn.physical_router_read(id = self.uuid, fields = ['logical_interfaces'])
- children = getattr(obj, 'logical_interfaces', None)
- self.logical_interfaces = children
-
- return children
- #end get_logical_interfaces
-
-
-#end class PhysicalRouter
-
-class BgpRouter(vnc_api.gen.resource_common.BgpRouter):
- create_uri = ''
- resource_uri_base = {}
- def __init__(self, name = None, parent_obj = None, bgp_router_parameters = None, id_perms = None, display_name = None, *args, **kwargs):
- pending_fields = ['fq_name', 'parent_type']
-
- self._server_conn = None
-
- if bgp_router_parameters:
- pending_fields.append('bgp_router_parameters')
- if id_perms:
- pending_fields.append('id_perms')
- if display_name:
- pending_fields.append('display_name')
-
- self._pending_field_updates = set(pending_fields)
- self._pending_ref_updates = set([])
-
- super(BgpRouter, self).__init__(name, parent_obj, bgp_router_parameters, id_perms, display_name, *args, **kwargs)
- #end __init__
-
- def get_pending_updates(self):
- return self._pending_field_updates
- #end get_pending_updates
-
- def get_ref_updates(self):
- return self._pending_ref_updates
- #end get_ref_updates
-
- def clear_pending_updates(self):
- self._pending_field_updates = set([])
- self._pending_ref_updates = set([])
- #end clear_pending_updates
-
- def set_server_conn(self, vnc_api_handle):
- self._server_conn = vnc_api_handle
- #end set_server_conn
-
- @classmethod
- def from_dict(cls, **kwargs):
- props_dict = {}
- if 'bgp_router_parameters' in kwargs:
- props_dict['bgp_router_parameters'] = vnc_api.gen.resource_xsd.BgpRouterParams(**kwargs['bgp_router_parameters'])
- if 'id_perms' in kwargs:
- props_dict['id_perms'] = vnc_api.gen.resource_xsd.IdPermsType(**kwargs['id_perms'])
- if 'display_name' in kwargs:
- props_dict['display_name'] = kwargs['display_name']
-
- # obj constructor takes only props
- parent_type = kwargs.get('parent_type', None)
- fq_name = kwargs['fq_name']
- props_dict.update({'parent_type': parent_type, 'fq_name': fq_name})
- obj = BgpRouter(fq_name[-1], **props_dict)
- obj.uuid = kwargs['uuid']
- if 'parent_uuid' in kwargs:
- obj.parent_uuid = kwargs['parent_uuid']
-
- # add summary of any children...
-
- # add any specified references...
- if 'bgp_router_refs' in kwargs:
- obj.bgp_router_refs = kwargs['bgp_router_refs']
- for ref in obj.bgp_router_refs:
- ref['attr'] = vnc_api.gen.resource_xsd.BgpPeeringAttributes(**ref['attr'])
-
- # and back references but no obj api for it...
- if 'global_system_config_back_refs' in kwargs:
- obj.global_system_config_back_refs = kwargs['global_system_config_back_refs']
- if 'physical_router_back_refs' in kwargs:
- obj.physical_router_back_refs = kwargs['physical_router_back_refs']
- if 'virtual_router_back_refs' in kwargs:
- obj.virtual_router_back_refs = kwargs['virtual_router_back_refs']
- if 'bgp_router_back_refs' in kwargs:
- obj.bgp_router_back_refs = kwargs['bgp_router_back_refs']
-
- return obj
- #end from_dict
-
- @vnc_api.gen.resource_common.BgpRouter.uuid.setter
- def uuid(self, uuid_val):
- self._uuid = uuid_val
- if 'uuid' not in self._pending_field_updates:
- self._pending_field_updates.add('uuid')
- #end uuid
-
- def set_uuid(self, uuid_val):
- self.uuid = uuid_val
- #end set_uuid
-
- @vnc_api.gen.resource_common.BgpRouter.bgp_router_parameters.setter
- def bgp_router_parameters(self, bgp_router_parameters):
- """Set bgp-router-parameters for bgp-router.
-
- :param bgp_router_parameters: BgpRouterParams object
-
- """
- if 'bgp_router_parameters' not in self._pending_field_updates:
- self._pending_field_updates.add('bgp_router_parameters')
-
- self._bgp_router_parameters = bgp_router_parameters
- #end bgp_router_parameters
-
- def set_bgp_router_parameters(self, value):
- self.bgp_router_parameters = value
- #end set_bgp_router_parameters
-
- @vnc_api.gen.resource_common.BgpRouter.id_perms.setter
- def id_perms(self, id_perms):
- """Set id-perms for bgp-router.
-
- :param id_perms: IdPermsType object
-
- """
- if 'id_perms' not in self._pending_field_updates:
- self._pending_field_updates.add('id_perms')
-
- self._id_perms = id_perms
- #end id_perms
-
- def set_id_perms(self, value):
- self.id_perms = value
- #end set_id_perms
-
- @vnc_api.gen.resource_common.BgpRouter.display_name.setter
- def display_name(self, display_name):
- """Set display-name for bgp-router.
-
- :param display_name: xsd:string object
-
- """
- if 'display_name' not in self._pending_field_updates:
- self._pending_field_updates.add('display_name')
-
- self._display_name = display_name
- #end display_name
-
- def set_display_name(self, value):
- self.display_name = value
- #end set_display_name
-
- def set_bgp_router(self, *args, **kwargs):
- """Set bgp-router for bgp-router.
-
- :param ref_obj: BgpRouter object
- :param ref_data: BgpPeeringAttributes object
-
- """
- self._pending_field_updates.add('bgp_router_refs')
- self._pending_ref_updates.discard('bgp_router_refs')
- super(BgpRouter, self).set_bgp_router(*args, **kwargs)
-
- #end set_bgp_router
-
- def add_bgp_router(self, *args, **kwargs):
- """Add bgp-router to bgp-router.
-
- :param ref_obj: BgpRouter object
- :param ref_data: BgpPeeringAttributes object
-
- """
- if 'bgp_router_refs' not in self._pending_ref_updates|self._pending_field_updates:
- self._pending_ref_updates.add('bgp_router_refs')
- self._original_bgp_router_refs = (self.get_bgp_router_refs() or [])[:]
- super(BgpRouter, self).add_bgp_router(*args, **kwargs)
- #end add_bgp_router
-
- def del_bgp_router(self, *args, **kwargs):
- if 'bgp_router_refs' not in self._pending_ref_updates:
- self._pending_ref_updates.add('bgp_router_refs')
- self._original_bgp_router_refs = (self.get_bgp_router_refs() or [])[:]
- super(BgpRouter, self).del_bgp_router(*args, **kwargs)
- #end del_bgp_router
-
- def set_bgp_router_list(self, *args, **kwargs):
- """Set bgp-router list for bgp-router.
-
- :param ref_obj_list: list of BgpRouter object
- :param ref_data_list: list of BgpPeeringAttributes summary
-
- """
- self._pending_field_updates.add('bgp_router_refs')
- self._pending_ref_updates.discard('bgp_router_refs')
- super(BgpRouter, self).set_bgp_router_list(*args, **kwargs)
- #end set_bgp_router_list
-
-
- def get_global_system_config_back_refs(self):
- """Return list of all global-system-configs using this bgp-router"""
- # if object not created/read from lib can't service
- svr_conn = self._server_conn
- if not svr_conn:
- return None
-
- obj = svr_conn.bgp_router_read(id = self.uuid, fields = ['global_system_config_back_refs'])
- back_refs = getattr(obj, 'global_system_config_back_refs', None)
- self.global_system_config_back_refs = back_refs
-
- return back_refs
- #end get_global_system_config_back_refs
-
- def get_physical_router_back_refs(self):
- """Return list of all physical-routers using this bgp-router"""
- # if object not created/read from lib can't service
- svr_conn = self._server_conn
- if not svr_conn:
- return None
-
- obj = svr_conn.bgp_router_read(id = self.uuid, fields = ['physical_router_back_refs'])
- back_refs = getattr(obj, 'physical_router_back_refs', None)
- self.physical_router_back_refs = back_refs
-
- return back_refs
- #end get_physical_router_back_refs
-
- def get_virtual_router_back_refs(self):
- """Return list of all virtual-routers using this bgp-router"""
- # if object not created/read from lib can't service
- svr_conn = self._server_conn
- if not svr_conn:
- return None
-
- obj = svr_conn.bgp_router_read(id = self.uuid, fields = ['virtual_router_back_refs'])
- back_refs = getattr(obj, 'virtual_router_back_refs', None)
- self.virtual_router_back_refs = back_refs
-
- return back_refs
- #end get_virtual_router_back_refs
-
- def get_bgp_router_back_refs(self):
- """Return list of all bgp-routers using this bgp-router"""
- # if object not created/read from lib can't service
- svr_conn = self._server_conn
- if not svr_conn:
- return None
-
- obj = svr_conn.bgp_router_read(id = self.uuid, fields = ['bgp_router_back_refs'])
- back_refs = getattr(obj, 'bgp_router_back_refs', None)
- self.bgp_router_back_refs = back_refs
-
- return back_refs
- #end get_bgp_router_back_refs
-
-#end class BgpRouter
-
-class VirtualRouter(vnc_api.gen.resource_common.VirtualRouter):
- create_uri = ''
- resource_uri_base = {}
- def __init__(self, name = None, parent_obj = None, virtual_router_type = None, virtual_router_ip_address = None, id_perms = None, display_name = None, *args, **kwargs):
- pending_fields = ['fq_name', 'parent_type']
-
- self._server_conn = None
-
- if virtual_router_type:
- pending_fields.append('virtual_router_type')
- if virtual_router_ip_address:
- pending_fields.append('virtual_router_ip_address')
- if id_perms:
- pending_fields.append('id_perms')
- if display_name:
- pending_fields.append('display_name')
-
- self._pending_field_updates = set(pending_fields)
- self._pending_ref_updates = set([])
-
- super(VirtualRouter, self).__init__(name, parent_obj, virtual_router_type, virtual_router_ip_address, id_perms, display_name, *args, **kwargs)
- #end __init__
-
- def get_pending_updates(self):
- return self._pending_field_updates
- #end get_pending_updates
-
- def get_ref_updates(self):
- return self._pending_ref_updates
- #end get_ref_updates
-
- def clear_pending_updates(self):
- self._pending_field_updates = set([])
- self._pending_ref_updates = set([])
- #end clear_pending_updates
-
- def set_server_conn(self, vnc_api_handle):
- self._server_conn = vnc_api_handle
- #end set_server_conn
-
- @classmethod
- def from_dict(cls, **kwargs):
- props_dict = {}
- if 'virtual_router_type' in kwargs:
- props_dict['virtual_router_type'] = kwargs['virtual_router_type']
- if 'virtual_router_ip_address' in kwargs:
- props_dict['virtual_router_ip_address'] = kwargs['virtual_router_ip_address']
- if 'id_perms' in kwargs:
- props_dict['id_perms'] = vnc_api.gen.resource_xsd.IdPermsType(**kwargs['id_perms'])
- if 'display_name' in kwargs:
- props_dict['display_name'] = kwargs['display_name']
-
- # obj constructor takes only props
- parent_type = kwargs.get('parent_type', None)
- fq_name = kwargs['fq_name']
- props_dict.update({'parent_type': parent_type, 'fq_name': fq_name})
- obj = VirtualRouter(fq_name[-1], **props_dict)
- obj.uuid = kwargs['uuid']
- if 'parent_uuid' in kwargs:
- obj.parent_uuid = kwargs['parent_uuid']
-
- # add summary of any children...
-
- # add any specified references...
- if 'bgp_router_refs' in kwargs:
- obj.bgp_router_refs = kwargs['bgp_router_refs']
- if 'virtual_machine_refs' in kwargs:
- obj.virtual_machine_refs = kwargs['virtual_machine_refs']
-
- # and back references but no obj api for it...
- if 'physical_router_back_refs' in kwargs:
- obj.physical_router_back_refs = kwargs['physical_router_back_refs']
- if 'provider_attachment_back_refs' in kwargs:
- obj.provider_attachment_back_refs = kwargs['provider_attachment_back_refs']
-
- return obj
- #end from_dict
-
- @vnc_api.gen.resource_common.VirtualRouter.uuid.setter
- def uuid(self, uuid_val):
- self._uuid = uuid_val
- if 'uuid' not in self._pending_field_updates:
- self._pending_field_updates.add('uuid')
- #end uuid
-
- def set_uuid(self, uuid_val):
- self.uuid = uuid_val
- #end set_uuid
-
- @vnc_api.gen.resource_common.VirtualRouter.virtual_router_type.setter
- def virtual_router_type(self, virtual_router_type):
- """Set virtual-router-type for virtual-router.
-
- :param virtual_router_type: VirtualRouterType object
-
- """
- if 'virtual_router_type' not in self._pending_field_updates:
- self._pending_field_updates.add('virtual_router_type')
-
- self._virtual_router_type = virtual_router_type
- #end virtual_router_type
-
- def set_virtual_router_type(self, value):
- self.virtual_router_type = value
- #end set_virtual_router_type
-
- @vnc_api.gen.resource_common.VirtualRouter.virtual_router_ip_address.setter
- def virtual_router_ip_address(self, virtual_router_ip_address):
- """Set virtual-router-ip-address for virtual-router.
-
- :param virtual_router_ip_address: IpAddressType object
-
- """
- if 'virtual_router_ip_address' not in self._pending_field_updates:
- self._pending_field_updates.add('virtual_router_ip_address')
-
- self._virtual_router_ip_address = virtual_router_ip_address
- #end virtual_router_ip_address
-
- def set_virtual_router_ip_address(self, value):
- self.virtual_router_ip_address = value
- #end set_virtual_router_ip_address
-
- @vnc_api.gen.resource_common.VirtualRouter.id_perms.setter
- def id_perms(self, id_perms):
- """Set id-perms for virtual-router.
-
- :param id_perms: IdPermsType object
-
- """
- if 'id_perms' not in self._pending_field_updates:
- self._pending_field_updates.add('id_perms')
-
- self._id_perms = id_perms
- #end id_perms
-
- def set_id_perms(self, value):
- self.id_perms = value
- #end set_id_perms
-
- @vnc_api.gen.resource_common.VirtualRouter.display_name.setter
- def display_name(self, display_name):
- """Set display-name for virtual-router.
-
- :param display_name: xsd:string object
-
- """
- if 'display_name' not in self._pending_field_updates:
- self._pending_field_updates.add('display_name')
-
- self._display_name = display_name
- #end display_name
-
- def set_display_name(self, value):
- self.display_name = value
- #end set_display_name
-
- def set_bgp_router(self, *args, **kwargs):
- """Set bgp-router for virtual-router.
-
- :param ref_obj: BgpRouter object
-
- """
- self._pending_field_updates.add('bgp_router_refs')
- self._pending_ref_updates.discard('bgp_router_refs')
- super(VirtualRouter, self).set_bgp_router(*args, **kwargs)
-
- #end set_bgp_router
-
- def add_bgp_router(self, *args, **kwargs):
- """Add bgp-router to virtual-router.
-
- :param ref_obj: BgpRouter object
-
- """
- if 'bgp_router_refs' not in self._pending_ref_updates|self._pending_field_updates:
- self._pending_ref_updates.add('bgp_router_refs')
- self._original_bgp_router_refs = (self.get_bgp_router_refs() or [])[:]
- super(VirtualRouter, self).add_bgp_router(*args, **kwargs)
- #end add_bgp_router
-
- def del_bgp_router(self, *args, **kwargs):
- if 'bgp_router_refs' not in self._pending_ref_updates:
- self._pending_ref_updates.add('bgp_router_refs')
- self._original_bgp_router_refs = (self.get_bgp_router_refs() or [])[:]
- super(VirtualRouter, self).del_bgp_router(*args, **kwargs)
- #end del_bgp_router
-
- def set_bgp_router_list(self, *args, **kwargs):
- """Set bgp-router list for virtual-router.
-
- :param ref_obj_list: list of BgpRouter object
-
- """
- self._pending_field_updates.add('bgp_router_refs')
- self._pending_ref_updates.discard('bgp_router_refs')
- super(VirtualRouter, self).set_bgp_router_list(*args, **kwargs)
- #end set_bgp_router_list
-
- def set_virtual_machine(self, *args, **kwargs):
- """Set virtual-machine for virtual-router.
-
- :param ref_obj: VirtualMachine object
-
- """
- self._pending_field_updates.add('virtual_machine_refs')
- self._pending_ref_updates.discard('virtual_machine_refs')
- super(VirtualRouter, self).set_virtual_machine(*args, **kwargs)
-
- #end set_virtual_machine
-
- def add_virtual_machine(self, *args, **kwargs):
- """Add virtual-machine to virtual-router.
-
- :param ref_obj: VirtualMachine object
-
- """
- if 'virtual_machine_refs' not in self._pending_ref_updates|self._pending_field_updates:
- self._pending_ref_updates.add('virtual_machine_refs')
- self._original_virtual_machine_refs = (self.get_virtual_machine_refs() or [])[:]
- super(VirtualRouter, self).add_virtual_machine(*args, **kwargs)
- #end add_virtual_machine
-
- def del_virtual_machine(self, *args, **kwargs):
- if 'virtual_machine_refs' not in self._pending_ref_updates:
- self._pending_ref_updates.add('virtual_machine_refs')
- self._original_virtual_machine_refs = (self.get_virtual_machine_refs() or [])[:]
- super(VirtualRouter, self).del_virtual_machine(*args, **kwargs)
- #end del_virtual_machine
-
- def set_virtual_machine_list(self, *args, **kwargs):
- """Set virtual-machine list for virtual-router.
-
- :param ref_obj_list: list of VirtualMachine object
-
- """
- self._pending_field_updates.add('virtual_machine_refs')
- self._pending_ref_updates.discard('virtual_machine_refs')
- super(VirtualRouter, self).set_virtual_machine_list(*args, **kwargs)
- #end set_virtual_machine_list
-
-
- def get_physical_router_back_refs(self):
- """Return list of all physical-routers using this virtual-router"""
- # if object not created/read from lib can't service
- svr_conn = self._server_conn
- if not svr_conn:
- return None
-
- obj = svr_conn.virtual_router_read(id = self.uuid, fields = ['physical_router_back_refs'])
- back_refs = getattr(obj, 'physical_router_back_refs', None)
- self.physical_router_back_refs = back_refs
-
- return back_refs
- #end get_physical_router_back_refs
-
- def get_provider_attachment_back_refs(self):
- """Return list of all provider-attachments using this virtual-router"""
- # if object not created/read from lib can't service
- svr_conn = self._server_conn
- if not svr_conn:
- return None
-
- obj = svr_conn.virtual_router_read(id = self.uuid, fields = ['provider_attachment_back_refs'])
- back_refs = getattr(obj, 'provider_attachment_back_refs', None)
- self.provider_attachment_back_refs = back_refs
-
- return back_refs
- #end get_provider_attachment_back_refs
-
-#end class VirtualRouter
-
-class ConfigRoot(vnc_api.gen.resource_common.ConfigRoot):
- create_uri = ''
- resource_uri_base = {}
- def __init__(self, name = None, id_perms = None, display_name = None, *args, **kwargs):
- pending_fields = ['fq_name']
-
- self._server_conn = None
-
- if id_perms:
- pending_fields.append('id_perms')
- if display_name:
- pending_fields.append('display_name')
-
- self._pending_field_updates = set(pending_fields)
- self._pending_ref_updates = set([])
-
- super(ConfigRoot, self).__init__(name, id_perms, display_name, *args, **kwargs)
- #end __init__
-
- def get_pending_updates(self):
- return self._pending_field_updates
- #end get_pending_updates
-
- def get_ref_updates(self):
- return self._pending_ref_updates
- #end get_ref_updates
-
- def clear_pending_updates(self):
- self._pending_field_updates = set([])
- self._pending_ref_updates = set([])
- #end clear_pending_updates
-
- def set_server_conn(self, vnc_api_handle):
- self._server_conn = vnc_api_handle
- #end set_server_conn
-
- @classmethod
- def from_dict(cls, **kwargs):
- props_dict = {}
- if 'id_perms' in kwargs:
- props_dict['id_perms'] = vnc_api.gen.resource_xsd.IdPermsType(**kwargs['id_perms'])
- if 'display_name' in kwargs:
- props_dict['display_name'] = kwargs['display_name']
-
- # obj constructor takes only props
- parent_type = kwargs.get('parent_type', None)
- fq_name = kwargs['fq_name']
- props_dict.update({'parent_type': parent_type, 'fq_name': fq_name})
- obj = ConfigRoot(fq_name[-1], **props_dict)
- obj.uuid = kwargs['uuid']
- if 'parent_uuid' in kwargs:
- obj.parent_uuid = kwargs['parent_uuid']
-
- # add summary of any children...
- if 'global_system_configs' in kwargs:
- obj.global_system_configs = kwargs['global_system_configs']
- if 'domains' in kwargs:
- obj.domains = kwargs['domains']
-
- # add any specified references...
-
- # and back references but no obj api for it...
-
- return obj
- #end from_dict
-
- @vnc_api.gen.resource_common.ConfigRoot.uuid.setter
- def uuid(self, uuid_val):
- self._uuid = uuid_val
- if 'uuid' not in self._pending_field_updates:
- self._pending_field_updates.add('uuid')
- #end uuid
-
- def set_uuid(self, uuid_val):
- self.uuid = uuid_val
- #end set_uuid
-
- @vnc_api.gen.resource_common.ConfigRoot.id_perms.setter
- def id_perms(self, id_perms):
- """Set id-perms for config-root.
-
- :param id_perms: IdPermsType object
-
- """
- if 'id_perms' not in self._pending_field_updates:
- self._pending_field_updates.add('id_perms')
-
- self._id_perms = id_perms
- #end id_perms
-
- def set_id_perms(self, value):
- self.id_perms = value
- #end set_id_perms
-
- @vnc_api.gen.resource_common.ConfigRoot.display_name.setter
- def display_name(self, display_name):
- """Set display-name for config-root.
-
- :param display_name: xsd:string object
-
- """
- if 'display_name' not in self._pending_field_updates:
- self._pending_field_updates.add('display_name')
-
- self._display_name = display_name
- #end display_name
-
- def set_display_name(self, value):
- self.display_name = value
- #end set_display_name
-
- def get_global_system_configs(self):
- # if object not created/read from lib can't service
- svr_conn = self._server_conn
- if not svr_conn:
- return None
-
- children = super(ConfigRoot, self).get_global_system_configs()
- if not children: # read it for first time
- obj = svr_conn.config_root_read(id = self.uuid, fields = ['global_system_configs'])
- children = getattr(obj, 'global_system_configs', None)
- self.global_system_configs = children
-
- return children
- #end get_global_system_configs
-
- def get_domains(self):
- # if object not created/read from lib can't service
- svr_conn = self._server_conn
- if not svr_conn:
- return None
-
- children = super(ConfigRoot, self).get_domains()
- if not children: # read it for first time
- obj = svr_conn.config_root_read(id = self.uuid, fields = ['domains'])
- children = getattr(obj, 'domains', None)
- self.domains = children
-
- return children
- #end get_domains
-
-
-#end class ConfigRoot
-
-class Subnet(vnc_api.gen.resource_common.Subnet):
- create_uri = ''
- resource_uri_base = {}
- def __init__(self, name = None, subnet_ip_prefix = None, id_perms = None, display_name = None, *args, **kwargs):
- pending_fields = ['fq_name']
-
- self._server_conn = None
-
- if subnet_ip_prefix:
- pending_fields.append('subnet_ip_prefix')
- if id_perms:
- pending_fields.append('id_perms')
- if display_name:
- pending_fields.append('display_name')
-
- self._pending_field_updates = set(pending_fields)
- self._pending_ref_updates = set([])
-
- super(Subnet, self).__init__(name, subnet_ip_prefix, id_perms, display_name, *args, **kwargs)
- #end __init__
-
- def get_pending_updates(self):
- return self._pending_field_updates
- #end get_pending_updates
-
- def get_ref_updates(self):
- return self._pending_ref_updates
- #end get_ref_updates
-
- def clear_pending_updates(self):
- self._pending_field_updates = set([])
- self._pending_ref_updates = set([])
- #end clear_pending_updates
-
- def set_server_conn(self, vnc_api_handle):
- self._server_conn = vnc_api_handle
- #end set_server_conn
-
- @classmethod
- def from_dict(cls, **kwargs):
- props_dict = {}
- if 'subnet_ip_prefix' in kwargs:
- props_dict['subnet_ip_prefix'] = vnc_api.gen.resource_xsd.SubnetType(**kwargs['subnet_ip_prefix'])
- if 'id_perms' in kwargs:
- props_dict['id_perms'] = vnc_api.gen.resource_xsd.IdPermsType(**kwargs['id_perms'])
- if 'display_name' in kwargs:
- props_dict['display_name'] = kwargs['display_name']
-
- # obj constructor takes only props
- parent_type = kwargs.get('parent_type', None)
- fq_name = kwargs['fq_name']
- props_dict.update({'parent_type': parent_type, 'fq_name': fq_name})
- obj = Subnet(fq_name[-1], **props_dict)
- obj.uuid = kwargs['uuid']
- if 'parent_uuid' in kwargs:
- obj.parent_uuid = kwargs['parent_uuid']
-
- # add summary of any children...
-
- # add any specified references...
- if 'virtual_machine_interface_refs' in kwargs:
- obj.virtual_machine_interface_refs = kwargs['virtual_machine_interface_refs']
-
- # and back references but no obj api for it...
-
- return obj
- #end from_dict
-
- @vnc_api.gen.resource_common.Subnet.uuid.setter
- def uuid(self, uuid_val):
- self._uuid = uuid_val
- if 'uuid' not in self._pending_field_updates:
- self._pending_field_updates.add('uuid')
- #end uuid
-
- def set_uuid(self, uuid_val):
- self.uuid = uuid_val
- #end set_uuid
-
- @vnc_api.gen.resource_common.Subnet.subnet_ip_prefix.setter
- def subnet_ip_prefix(self, subnet_ip_prefix):
- """Set subnet-ip-prefix for subnet.
-
- :param subnet_ip_prefix: SubnetType object
-
- """
- if 'subnet_ip_prefix' not in self._pending_field_updates:
- self._pending_field_updates.add('subnet_ip_prefix')
-
- self._subnet_ip_prefix = subnet_ip_prefix
- #end subnet_ip_prefix
-
- def set_subnet_ip_prefix(self, value):
- self.subnet_ip_prefix = value
- #end set_subnet_ip_prefix
-
- @vnc_api.gen.resource_common.Subnet.id_perms.setter
- def id_perms(self, id_perms):
- """Set id-perms for subnet.
-
- :param id_perms: IdPermsType object
-
- """
- if 'id_perms' not in self._pending_field_updates:
- self._pending_field_updates.add('id_perms')
-
- self._id_perms = id_perms
- #end id_perms
-
- def set_id_perms(self, value):
- self.id_perms = value
- #end set_id_perms
-
- @vnc_api.gen.resource_common.Subnet.display_name.setter
- def display_name(self, display_name):
- """Set display-name for subnet.
-
- :param display_name: xsd:string object
-
- """
- if 'display_name' not in self._pending_field_updates:
- self._pending_field_updates.add('display_name')
-
- self._display_name = display_name
- #end display_name
-
- def set_display_name(self, value):
- self.display_name = value
- #end set_display_name
-
- def set_virtual_machine_interface(self, *args, **kwargs):
- """Set virtual-machine-interface for subnet.
-
- :param ref_obj: VirtualMachineInterface object
-
- """
- self._pending_field_updates.add('virtual_machine_interface_refs')
- self._pending_ref_updates.discard('virtual_machine_interface_refs')
- super(Subnet, self).set_virtual_machine_interface(*args, **kwargs)
-
- #end set_virtual_machine_interface
-
- def add_virtual_machine_interface(self, *args, **kwargs):
- """Add virtual-machine-interface to subnet.
-
- :param ref_obj: VirtualMachineInterface object
-
- """
- if 'virtual_machine_interface_refs' not in self._pending_ref_updates|self._pending_field_updates:
- self._pending_ref_updates.add('virtual_machine_interface_refs')
- self._original_virtual_machine_interface_refs = (self.get_virtual_machine_interface_refs() or [])[:]
- super(Subnet, self).add_virtual_machine_interface(*args, **kwargs)
- #end add_virtual_machine_interface
-
- def del_virtual_machine_interface(self, *args, **kwargs):
- if 'virtual_machine_interface_refs' not in self._pending_ref_updates:
- self._pending_ref_updates.add('virtual_machine_interface_refs')
- self._original_virtual_machine_interface_refs = (self.get_virtual_machine_interface_refs() or [])[:]
- super(Subnet, self).del_virtual_machine_interface(*args, **kwargs)
- #end del_virtual_machine_interface
-
- def set_virtual_machine_interface_list(self, *args, **kwargs):
- """Set virtual-machine-interface list for subnet.
-
- :param ref_obj_list: list of VirtualMachineInterface object
-
- """
- self._pending_field_updates.add('virtual_machine_interface_refs')
- self._pending_ref_updates.discard('virtual_machine_interface_refs')
- super(Subnet, self).set_virtual_machine_interface_list(*args, **kwargs)
- #end set_virtual_machine_interface_list
-
-
-#end class Subnet
-
-class GlobalSystemConfig(vnc_api.gen.resource_common.GlobalSystemConfig):
- create_uri = ''
- resource_uri_base = {}
- def __init__(self, name = None, parent_obj = None, autonomous_system = None, config_version = None, plugin_tuning = None, ibgp_auto_mesh = None, ip_fabric_subnets = None, id_perms = None, display_name = None, *args, **kwargs):
- pending_fields = ['fq_name', 'parent_type']
-
- self._server_conn = None
-
- if autonomous_system:
- pending_fields.append('autonomous_system')
- if config_version:
- pending_fields.append('config_version')
- if plugin_tuning:
- pending_fields.append('plugin_tuning')
- if ibgp_auto_mesh:
- pending_fields.append('ibgp_auto_mesh')
- if ip_fabric_subnets:
- pending_fields.append('ip_fabric_subnets')
- if id_perms:
- pending_fields.append('id_perms')
- if display_name:
- pending_fields.append('display_name')
-
- self._pending_field_updates = set(pending_fields)
- self._pending_ref_updates = set([])
-
- super(GlobalSystemConfig, self).__init__(name, parent_obj, autonomous_system, config_version, plugin_tuning, ibgp_auto_mesh, ip_fabric_subnets, id_perms, display_name, *args, **kwargs)
- #end __init__
-
- def get_pending_updates(self):
- return self._pending_field_updates
- #end get_pending_updates
-
- def get_ref_updates(self):
- return self._pending_ref_updates
- #end get_ref_updates
-
- def clear_pending_updates(self):
- self._pending_field_updates = set([])
- self._pending_ref_updates = set([])
- #end clear_pending_updates
-
- def set_server_conn(self, vnc_api_handle):
- self._server_conn = vnc_api_handle
- #end set_server_conn
-
- @classmethod
- def from_dict(cls, **kwargs):
- props_dict = {}
- if 'autonomous_system' in kwargs:
- props_dict['autonomous_system'] = kwargs['autonomous_system']
- if 'config_version' in kwargs:
- props_dict['config_version'] = kwargs['config_version']
- if 'plugin_tuning' in kwargs:
- props_dict['plugin_tuning'] = vnc_api.gen.resource_xsd.PluginProperties(**kwargs['plugin_tuning'])
- if 'ibgp_auto_mesh' in kwargs:
- props_dict['ibgp_auto_mesh'] = kwargs['ibgp_auto_mesh']
- if 'ip_fabric_subnets' in kwargs:
- props_dict['ip_fabric_subnets'] = vnc_api.gen.resource_xsd.SubnetListType(**kwargs['ip_fabric_subnets'])
- if 'id_perms' in kwargs:
- props_dict['id_perms'] = vnc_api.gen.resource_xsd.IdPermsType(**kwargs['id_perms'])
- if 'display_name' in kwargs:
- props_dict['display_name'] = kwargs['display_name']
-
- # obj constructor takes only props
- parent_type = kwargs.get('parent_type', None)
- fq_name = kwargs['fq_name']
- props_dict.update({'parent_type': parent_type, 'fq_name': fq_name})
- obj = GlobalSystemConfig(fq_name[-1], **props_dict)
- obj.uuid = kwargs['uuid']
- if 'parent_uuid' in kwargs:
- obj.parent_uuid = kwargs['parent_uuid']
-
- # add summary of any children...
- if 'global_vrouter_configs' in kwargs:
- obj.global_vrouter_configs = kwargs['global_vrouter_configs']
- if 'physical_routers' in kwargs:
- obj.physical_routers = kwargs['physical_routers']
- if 'virtual_routers' in kwargs:
- obj.virtual_routers = kwargs['virtual_routers']
- if 'config_nodes' in kwargs:
- obj.config_nodes = kwargs['config_nodes']
- if 'analytics_nodes' in kwargs:
- obj.analytics_nodes = kwargs['analytics_nodes']
- if 'database_nodes' in kwargs:
- obj.database_nodes = kwargs['database_nodes']
- if 'service_appliance_sets' in kwargs:
- obj.service_appliance_sets = kwargs['service_appliance_sets']
-
- # add any specified references...
- if 'bgp_router_refs' in kwargs:
- obj.bgp_router_refs = kwargs['bgp_router_refs']
-
- # and back references but no obj api for it...
-
- return obj
- #end from_dict
-
- @vnc_api.gen.resource_common.GlobalSystemConfig.uuid.setter
- def uuid(self, uuid_val):
- self._uuid = uuid_val
- if 'uuid' not in self._pending_field_updates:
- self._pending_field_updates.add('uuid')
- #end uuid
-
- def set_uuid(self, uuid_val):
- self.uuid = uuid_val
- #end set_uuid
-
- @vnc_api.gen.resource_common.GlobalSystemConfig.autonomous_system.setter
- def autonomous_system(self, autonomous_system):
- """Set autonomous-system for global-system-config.
-
- :param autonomous_system: AutonomousSystemType object
-
- """
- if 'autonomous_system' not in self._pending_field_updates:
- self._pending_field_updates.add('autonomous_system')
-
- self._autonomous_system = autonomous_system
- #end autonomous_system
-
- def set_autonomous_system(self, value):
- self.autonomous_system = value
- #end set_autonomous_system
-
- @vnc_api.gen.resource_common.GlobalSystemConfig.config_version.setter
- def config_version(self, config_version):
- """Set config-version for global-system-config.
-
- :param config_version: xsd:string object
-
- """
- if 'config_version' not in self._pending_field_updates:
- self._pending_field_updates.add('config_version')
-
- self._config_version = config_version
- #end config_version
-
- def set_config_version(self, value):
- self.config_version = value
- #end set_config_version
-
- @vnc_api.gen.resource_common.GlobalSystemConfig.plugin_tuning.setter
- def plugin_tuning(self, plugin_tuning):
- """Set plugin-tuning for global-system-config.
-
- :param plugin_tuning: PluginProperties object
-
- """
- if 'plugin_tuning' not in self._pending_field_updates:
- self._pending_field_updates.add('plugin_tuning')
-
- self._plugin_tuning = plugin_tuning
- #end plugin_tuning
-
- def set_plugin_tuning(self, value):
- self.plugin_tuning = value
- #end set_plugin_tuning
-
- @vnc_api.gen.resource_common.GlobalSystemConfig.ibgp_auto_mesh.setter
- def ibgp_auto_mesh(self, ibgp_auto_mesh):
- """Set ibgp-auto-mesh for global-system-config.
-
- :param ibgp_auto_mesh: xsd:boolean object
-
- """
- if 'ibgp_auto_mesh' not in self._pending_field_updates:
- self._pending_field_updates.add('ibgp_auto_mesh')
-
- self._ibgp_auto_mesh = ibgp_auto_mesh
- #end ibgp_auto_mesh
-
- def set_ibgp_auto_mesh(self, value):
- self.ibgp_auto_mesh = value
- #end set_ibgp_auto_mesh
-
- @vnc_api.gen.resource_common.GlobalSystemConfig.ip_fabric_subnets.setter
- def ip_fabric_subnets(self, ip_fabric_subnets):
- """Set ip-fabric-subnets for global-system-config.
-
- :param ip_fabric_subnets: SubnetListType object
-
- """
- if 'ip_fabric_subnets' not in self._pending_field_updates:
- self._pending_field_updates.add('ip_fabric_subnets')
-
- self._ip_fabric_subnets = ip_fabric_subnets
- #end ip_fabric_subnets
-
- def set_ip_fabric_subnets(self, value):
- self.ip_fabric_subnets = value
- #end set_ip_fabric_subnets
-
- @vnc_api.gen.resource_common.GlobalSystemConfig.id_perms.setter
- def id_perms(self, id_perms):
- """Set id-perms for global-system-config.
-
- :param id_perms: IdPermsType object
-
- """
- if 'id_perms' not in self._pending_field_updates:
- self._pending_field_updates.add('id_perms')
-
- self._id_perms = id_perms
- #end id_perms
-
- def set_id_perms(self, value):
- self.id_perms = value
- #end set_id_perms
-
- @vnc_api.gen.resource_common.GlobalSystemConfig.display_name.setter
- def display_name(self, display_name):
- """Set display-name for global-system-config.
-
- :param display_name: xsd:string object
-
- """
- if 'display_name' not in self._pending_field_updates:
- self._pending_field_updates.add('display_name')
-
- self._display_name = display_name
- #end display_name
-
- def set_display_name(self, value):
- self.display_name = value
- #end set_display_name
-
- def set_bgp_router(self, *args, **kwargs):
- """Set bgp-router for global-system-config.
-
- :param ref_obj: BgpRouter object
-
- """
- self._pending_field_updates.add('bgp_router_refs')
- self._pending_ref_updates.discard('bgp_router_refs')
- super(GlobalSystemConfig, self).set_bgp_router(*args, **kwargs)
-
- #end set_bgp_router
-
- def add_bgp_router(self, *args, **kwargs):
- """Add bgp-router to global-system-config.
-
- :param ref_obj: BgpRouter object
-
- """
- if 'bgp_router_refs' not in self._pending_ref_updates|self._pending_field_updates:
- self._pending_ref_updates.add('bgp_router_refs')
- self._original_bgp_router_refs = (self.get_bgp_router_refs() or [])[:]
- super(GlobalSystemConfig, self).add_bgp_router(*args, **kwargs)
- #end add_bgp_router
-
- def del_bgp_router(self, *args, **kwargs):
- if 'bgp_router_refs' not in self._pending_ref_updates:
- self._pending_ref_updates.add('bgp_router_refs')
- self._original_bgp_router_refs = (self.get_bgp_router_refs() or [])[:]
- super(GlobalSystemConfig, self).del_bgp_router(*args, **kwargs)
- #end del_bgp_router
-
- def set_bgp_router_list(self, *args, **kwargs):
- """Set bgp-router list for global-system-config.
-
- :param ref_obj_list: list of BgpRouter object
-
- """
- self._pending_field_updates.add('bgp_router_refs')
- self._pending_ref_updates.discard('bgp_router_refs')
- super(GlobalSystemConfig, self).set_bgp_router_list(*args, **kwargs)
- #end set_bgp_router_list
-
- def get_global_vrouter_configs(self):
- # if object not created/read from lib can't service
- svr_conn = self._server_conn
- if not svr_conn:
- return None
-
- children = super(GlobalSystemConfig, self).get_global_vrouter_configs()
- if not children: # read it for first time
- obj = svr_conn.global_system_config_read(id = self.uuid, fields = ['global_vrouter_configs'])
- children = getattr(obj, 'global_vrouter_configs', None)
- self.global_vrouter_configs = children
-
- return children
- #end get_global_vrouter_configs
-
- def get_physical_routers(self):
- # if object not created/read from lib can't service
- svr_conn = self._server_conn
- if not svr_conn:
- return None
-
- children = super(GlobalSystemConfig, self).get_physical_routers()
- if not children: # read it for first time
- obj = svr_conn.global_system_config_read(id = self.uuid, fields = ['physical_routers'])
- children = getattr(obj, 'physical_routers', None)
- self.physical_routers = children
-
- return children
- #end get_physical_routers
-
- def get_virtual_routers(self):
- # if object not created/read from lib can't service
- svr_conn = self._server_conn
- if not svr_conn:
- return None
-
- children = super(GlobalSystemConfig, self).get_virtual_routers()
- if not children: # read it for first time
- obj = svr_conn.global_system_config_read(id = self.uuid, fields = ['virtual_routers'])
- children = getattr(obj, 'virtual_routers', None)
- self.virtual_routers = children
-
- return children
- #end get_virtual_routers
-
- def get_config_nodes(self):
- # if object not created/read from lib can't service
- svr_conn = self._server_conn
- if not svr_conn:
- return None
-
- children = super(GlobalSystemConfig, self).get_config_nodes()
- if not children: # read it for first time
- obj = svr_conn.global_system_config_read(id = self.uuid, fields = ['config_nodes'])
- children = getattr(obj, 'config_nodes', None)
- self.config_nodes = children
-
- return children
- #end get_config_nodes
-
- def get_analytics_nodes(self):
- # if object not created/read from lib can't service
- svr_conn = self._server_conn
- if not svr_conn:
- return None
-
- children = super(GlobalSystemConfig, self).get_analytics_nodes()
- if not children: # read it for first time
- obj = svr_conn.global_system_config_read(id = self.uuid, fields = ['analytics_nodes'])
- children = getattr(obj, 'analytics_nodes', None)
- self.analytics_nodes = children
-
- return children
- #end get_analytics_nodes
-
- def get_database_nodes(self):
- # if object not created/read from lib can't service
- svr_conn = self._server_conn
- if not svr_conn:
- return None
-
- children = super(GlobalSystemConfig, self).get_database_nodes()
- if not children: # read it for first time
- obj = svr_conn.global_system_config_read(id = self.uuid, fields = ['database_nodes'])
- children = getattr(obj, 'database_nodes', None)
- self.database_nodes = children
-
- return children
- #end get_database_nodes
-
- def get_service_appliance_sets(self):
- # if object not created/read from lib can't service
- svr_conn = self._server_conn
- if not svr_conn:
- return None
-
- children = super(GlobalSystemConfig, self).get_service_appliance_sets()
- if not children: # read it for first time
- obj = svr_conn.global_system_config_read(id = self.uuid, fields = ['service_appliance_sets'])
- children = getattr(obj, 'service_appliance_sets', None)
- self.service_appliance_sets = children
-
- return children
- #end get_service_appliance_sets
-
-
-#end class GlobalSystemConfig
-
-class ServiceAppliance(vnc_api.gen.resource_common.ServiceAppliance):
- create_uri = ''
- resource_uri_base = {}
- def __init__(self, name = None, parent_obj = None, service_appliance_user_credentials = None, service_appliance_ip_address = None, service_appliance_properties = None, id_perms = None, display_name = None, *args, **kwargs):
- pending_fields = ['fq_name', 'parent_type']
-
- self._server_conn = None
-
- if service_appliance_user_credentials:
- pending_fields.append('service_appliance_user_credentials')
- if service_appliance_ip_address:
- pending_fields.append('service_appliance_ip_address')
- if service_appliance_properties:
- pending_fields.append('service_appliance_properties')
- if id_perms:
- pending_fields.append('id_perms')
- if display_name:
- pending_fields.append('display_name')
-
- self._pending_field_updates = set(pending_fields)
- self._pending_ref_updates = set([])
-
- super(ServiceAppliance, self).__init__(name, parent_obj, service_appliance_user_credentials, service_appliance_ip_address, service_appliance_properties, id_perms, display_name, *args, **kwargs)
- #end __init__
-
- def get_pending_updates(self):
- return self._pending_field_updates
- #end get_pending_updates
-
- def get_ref_updates(self):
- return self._pending_ref_updates
- #end get_ref_updates
-
- def clear_pending_updates(self):
- self._pending_field_updates = set([])
- self._pending_ref_updates = set([])
- #end clear_pending_updates
-
- def set_server_conn(self, vnc_api_handle):
- self._server_conn = vnc_api_handle
- #end set_server_conn
-
- @classmethod
- def from_dict(cls, **kwargs):
- props_dict = {}
- if 'service_appliance_user_credentials' in kwargs:
- props_dict['service_appliance_user_credentials'] = vnc_api.gen.resource_xsd.UserCredentials(**kwargs['service_appliance_user_credentials'])
- if 'service_appliance_ip_address' in kwargs:
- props_dict['service_appliance_ip_address'] = kwargs['service_appliance_ip_address']
- if 'service_appliance_properties' in kwargs:
- props_dict['service_appliance_properties'] = vnc_api.gen.resource_xsd.KeyValuePairs(**kwargs['service_appliance_properties'])
- if 'id_perms' in kwargs:
- props_dict['id_perms'] = vnc_api.gen.resource_xsd.IdPermsType(**kwargs['id_perms'])
- if 'display_name' in kwargs:
- props_dict['display_name'] = kwargs['display_name']
-
- # obj constructor takes only props
- parent_type = kwargs.get('parent_type', None)
- fq_name = kwargs['fq_name']
- props_dict.update({'parent_type': parent_type, 'fq_name': fq_name})
- obj = ServiceAppliance(fq_name[-1], **props_dict)
- obj.uuid = kwargs['uuid']
- if 'parent_uuid' in kwargs:
- obj.parent_uuid = kwargs['parent_uuid']
-
- # add summary of any children...
-
- # add any specified references...
-
- # and back references but no obj api for it...
-
- return obj
- #end from_dict
-
- @vnc_api.gen.resource_common.ServiceAppliance.uuid.setter
- def uuid(self, uuid_val):
- self._uuid = uuid_val
- if 'uuid' not in self._pending_field_updates:
- self._pending_field_updates.add('uuid')
- #end uuid
-
- def set_uuid(self, uuid_val):
- self.uuid = uuid_val
- #end set_uuid
-
- @vnc_api.gen.resource_common.ServiceAppliance.service_appliance_user_credentials.setter
- def service_appliance_user_credentials(self, service_appliance_user_credentials):
- """Set service-appliance-user-credentials for service-appliance.
-
- :param service_appliance_user_credentials: UserCredentials object
-
- """
- if 'service_appliance_user_credentials' not in self._pending_field_updates:
- self._pending_field_updates.add('service_appliance_user_credentials')
-
- self._service_appliance_user_credentials = service_appliance_user_credentials
- #end service_appliance_user_credentials
-
- def set_service_appliance_user_credentials(self, value):
- self.service_appliance_user_credentials = value
- #end set_service_appliance_user_credentials
-
- @vnc_api.gen.resource_common.ServiceAppliance.service_appliance_ip_address.setter
- def service_appliance_ip_address(self, service_appliance_ip_address):
- """Set service-appliance-ip-address for service-appliance.
-
- :param service_appliance_ip_address: IpAddressType object
-
- """
- if 'service_appliance_ip_address' not in self._pending_field_updates:
- self._pending_field_updates.add('service_appliance_ip_address')
-
- self._service_appliance_ip_address = service_appliance_ip_address
- #end service_appliance_ip_address
-
- def set_service_appliance_ip_address(self, value):
- self.service_appliance_ip_address = value
- #end set_service_appliance_ip_address
-
- @vnc_api.gen.resource_common.ServiceAppliance.service_appliance_properties.setter
- def service_appliance_properties(self, service_appliance_properties):
- """Set service-appliance-properties for service-appliance.
-
- :param service_appliance_properties: KeyValuePairs object
-
- """
- if 'service_appliance_properties' not in self._pending_field_updates:
- self._pending_field_updates.add('service_appliance_properties')
-
- self._service_appliance_properties = service_appliance_properties
- #end service_appliance_properties
-
- def set_service_appliance_properties(self, value):
- self.service_appliance_properties = value
- #end set_service_appliance_properties
-
- @vnc_api.gen.resource_common.ServiceAppliance.id_perms.setter
- def id_perms(self, id_perms):
- """Set id-perms for service-appliance.
-
- :param id_perms: IdPermsType object
-
- """
- if 'id_perms' not in self._pending_field_updates:
- self._pending_field_updates.add('id_perms')
-
- self._id_perms = id_perms
- #end id_perms
-
- def set_id_perms(self, value):
- self.id_perms = value
- #end set_id_perms
-
- @vnc_api.gen.resource_common.ServiceAppliance.display_name.setter
- def display_name(self, display_name):
- """Set display-name for service-appliance.
-
- :param display_name: xsd:string object
-
- """
- if 'display_name' not in self._pending_field_updates:
- self._pending_field_updates.add('display_name')
-
- self._display_name = display_name
- #end display_name
-
- def set_display_name(self, value):
- self.display_name = value
- #end set_display_name
-
-
-#end class ServiceAppliance
-
-class ServiceInstance(vnc_api.gen.resource_common.ServiceInstance):
- create_uri = ''
- resource_uri_base = {}
- def __init__(self, name = None, parent_obj = None, service_instance_properties = None, id_perms = None, display_name = None, *args, **kwargs):
- pending_fields = ['fq_name', 'parent_type']
-
- self._server_conn = None
-
- if service_instance_properties:
- pending_fields.append('service_instance_properties')
- if id_perms:
- pending_fields.append('id_perms')
- if display_name:
- pending_fields.append('display_name')
-
- self._pending_field_updates = set(pending_fields)
- self._pending_ref_updates = set([])
-
- super(ServiceInstance, self).__init__(name, parent_obj, service_instance_properties, id_perms, display_name, *args, **kwargs)
- #end __init__
-
- def get_pending_updates(self):
- return self._pending_field_updates
- #end get_pending_updates
-
- def get_ref_updates(self):
- return self._pending_ref_updates
- #end get_ref_updates
-
- def clear_pending_updates(self):
- self._pending_field_updates = set([])
- self._pending_ref_updates = set([])
- #end clear_pending_updates
-
- def set_server_conn(self, vnc_api_handle):
- self._server_conn = vnc_api_handle
- #end set_server_conn
-
- @classmethod
- def from_dict(cls, **kwargs):
- props_dict = {}
- if 'service_instance_properties' in kwargs:
- props_dict['service_instance_properties'] = vnc_api.gen.resource_xsd.ServiceInstanceType(**kwargs['service_instance_properties'])
- if 'id_perms' in kwargs:
- props_dict['id_perms'] = vnc_api.gen.resource_xsd.IdPermsType(**kwargs['id_perms'])
- if 'display_name' in kwargs:
- props_dict['display_name'] = kwargs['display_name']
-
- # obj constructor takes only props
- parent_type = kwargs.get('parent_type', None)
- fq_name = kwargs['fq_name']
- props_dict.update({'parent_type': parent_type, 'fq_name': fq_name})
- obj = ServiceInstance(fq_name[-1], **props_dict)
- obj.uuid = kwargs['uuid']
- if 'parent_uuid' in kwargs:
- obj.parent_uuid = kwargs['parent_uuid']
-
- # add summary of any children...
-
- # add any specified references...
- if 'service_template_refs' in kwargs:
- obj.service_template_refs = kwargs['service_template_refs']
-
- # and back references but no obj api for it...
- if 'virtual_machine_back_refs' in kwargs:
- obj.virtual_machine_back_refs = kwargs['virtual_machine_back_refs']
- if 'logical_router_back_refs' in kwargs:
- obj.logical_router_back_refs = kwargs['logical_router_back_refs']
- if 'loadbalancer_pool_back_refs' in kwargs:
- obj.loadbalancer_pool_back_refs = kwargs['loadbalancer_pool_back_refs']
-
- return obj
- #end from_dict
-
- @vnc_api.gen.resource_common.ServiceInstance.uuid.setter
- def uuid(self, uuid_val):
- self._uuid = uuid_val
- if 'uuid' not in self._pending_field_updates:
- self._pending_field_updates.add('uuid')
- #end uuid
-
- def set_uuid(self, uuid_val):
- self.uuid = uuid_val
- #end set_uuid
-
- @vnc_api.gen.resource_common.ServiceInstance.service_instance_properties.setter
- def service_instance_properties(self, service_instance_properties):
- """Set service-instance-properties for service-instance.
-
- :param service_instance_properties: ServiceInstanceType object
-
- """
- if 'service_instance_properties' not in self._pending_field_updates:
- self._pending_field_updates.add('service_instance_properties')
-
- self._service_instance_properties = service_instance_properties
- #end service_instance_properties
-
- def set_service_instance_properties(self, value):
- self.service_instance_properties = value
- #end set_service_instance_properties
-
- @vnc_api.gen.resource_common.ServiceInstance.id_perms.setter
- def id_perms(self, id_perms):
- """Set id-perms for service-instance.
-
- :param id_perms: IdPermsType object
-
- """
- if 'id_perms' not in self._pending_field_updates:
- self._pending_field_updates.add('id_perms')
-
- self._id_perms = id_perms
- #end id_perms
-
- def set_id_perms(self, value):
- self.id_perms = value
- #end set_id_perms
-
- @vnc_api.gen.resource_common.ServiceInstance.display_name.setter
- def display_name(self, display_name):
- """Set display-name for service-instance.
-
- :param display_name: xsd:string object
-
- """
- if 'display_name' not in self._pending_field_updates:
- self._pending_field_updates.add('display_name')
-
- self._display_name = display_name
- #end display_name
-
- def set_display_name(self, value):
- self.display_name = value
- #end set_display_name
-
- def set_service_template(self, *args, **kwargs):
- """Set service-template for service-instance.
-
- :param ref_obj: ServiceTemplate object
-
- """
- self._pending_field_updates.add('service_template_refs')
- self._pending_ref_updates.discard('service_template_refs')
- super(ServiceInstance, self).set_service_template(*args, **kwargs)
-
- #end set_service_template
-
- def add_service_template(self, *args, **kwargs):
- """Add service-template to service-instance.
-
- :param ref_obj: ServiceTemplate object
-
- """
- if 'service_template_refs' not in self._pending_ref_updates|self._pending_field_updates:
- self._pending_ref_updates.add('service_template_refs')
- self._original_service_template_refs = (self.get_service_template_refs() or [])[:]
- super(ServiceInstance, self).add_service_template(*args, **kwargs)
- #end add_service_template
-
- def del_service_template(self, *args, **kwargs):
- if 'service_template_refs' not in self._pending_ref_updates:
- self._pending_ref_updates.add('service_template_refs')
- self._original_service_template_refs = (self.get_service_template_refs() or [])[:]
- super(ServiceInstance, self).del_service_template(*args, **kwargs)
- #end del_service_template
-
- def set_service_template_list(self, *args, **kwargs):
- """Set service-template list for service-instance.
-
- :param ref_obj_list: list of ServiceTemplate object
-
- """
- self._pending_field_updates.add('service_template_refs')
- self._pending_ref_updates.discard('service_template_refs')
- super(ServiceInstance, self).set_service_template_list(*args, **kwargs)
- #end set_service_template_list
-
-
- def get_virtual_machine_back_refs(self):
- """Return list of all virtual-machines using this service-instance"""
- # if object not created/read from lib can't service
- svr_conn = self._server_conn
- if not svr_conn:
- return None
-
- obj = svr_conn.service_instance_read(id = self.uuid, fields = ['virtual_machine_back_refs'])
- back_refs = getattr(obj, 'virtual_machine_back_refs', None)
- self.virtual_machine_back_refs = back_refs
-
- return back_refs
- #end get_virtual_machine_back_refs
-
- def get_logical_router_back_refs(self):
- """Return list of all logical-routers using this service-instance"""
- # if object not created/read from lib can't service
- svr_conn = self._server_conn
- if not svr_conn:
- return None
-
- obj = svr_conn.service_instance_read(id = self.uuid, fields = ['logical_router_back_refs'])
- back_refs = getattr(obj, 'logical_router_back_refs', None)
- self.logical_router_back_refs = back_refs
-
- return back_refs
- #end get_logical_router_back_refs
-
- def get_loadbalancer_pool_back_refs(self):
- """Return list of all loadbalancer-pools using this service-instance"""
- # if object not created/read from lib can't service
- svr_conn = self._server_conn
- if not svr_conn:
- return None
-
- obj = svr_conn.service_instance_read(id = self.uuid, fields = ['loadbalancer_pool_back_refs'])
- back_refs = getattr(obj, 'loadbalancer_pool_back_refs', None)
- self.loadbalancer_pool_back_refs = back_refs
-
- return back_refs
- #end get_loadbalancer_pool_back_refs
-
-#end class ServiceInstance
-
-class Namespace(vnc_api.gen.resource_common.Namespace):
- create_uri = ''
- resource_uri_base = {}
- def __init__(self, name = None, parent_obj = None, namespace_cidr = None, id_perms = None, display_name = None, *args, **kwargs):
- pending_fields = ['fq_name', 'parent_type']
-
- self._server_conn = None
-
- if namespace_cidr:
- pending_fields.append('namespace_cidr')
- if id_perms:
- pending_fields.append('id_perms')
- if display_name:
- pending_fields.append('display_name')
-
- self._pending_field_updates = set(pending_fields)
- self._pending_ref_updates = set([])
-
- super(Namespace, self).__init__(name, parent_obj, namespace_cidr, id_perms, display_name, *args, **kwargs)
- #end __init__
-
- def get_pending_updates(self):
- return self._pending_field_updates
- #end get_pending_updates
-
- def get_ref_updates(self):
- return self._pending_ref_updates
- #end get_ref_updates
-
- def clear_pending_updates(self):
- self._pending_field_updates = set([])
- self._pending_ref_updates = set([])
- #end clear_pending_updates
-
- def set_server_conn(self, vnc_api_handle):
- self._server_conn = vnc_api_handle
- #end set_server_conn
-
- @classmethod
- def from_dict(cls, **kwargs):
- props_dict = {}
- if 'namespace_cidr' in kwargs:
- props_dict['namespace_cidr'] = vnc_api.gen.resource_xsd.SubnetType(**kwargs['namespace_cidr'])
- if 'id_perms' in kwargs:
- props_dict['id_perms'] = vnc_api.gen.resource_xsd.IdPermsType(**kwargs['id_perms'])
- if 'display_name' in kwargs:
- props_dict['display_name'] = kwargs['display_name']
-
- # obj constructor takes only props
- parent_type = kwargs.get('parent_type', None)
- fq_name = kwargs['fq_name']
- props_dict.update({'parent_type': parent_type, 'fq_name': fq_name})
- obj = Namespace(fq_name[-1], **props_dict)
- obj.uuid = kwargs['uuid']
- if 'parent_uuid' in kwargs:
- obj.parent_uuid = kwargs['parent_uuid']
-
- # add summary of any children...
-
- # add any specified references...
-
- # and back references but no obj api for it...
- if 'project_back_refs' in kwargs:
- obj.project_back_refs = kwargs['project_back_refs']
-
- return obj
- #end from_dict
-
- @vnc_api.gen.resource_common.Namespace.uuid.setter
- def uuid(self, uuid_val):
- self._uuid = uuid_val
- if 'uuid' not in self._pending_field_updates:
- self._pending_field_updates.add('uuid')
- #end uuid
-
- def set_uuid(self, uuid_val):
- self.uuid = uuid_val
- #end set_uuid
-
- @vnc_api.gen.resource_common.Namespace.namespace_cidr.setter
- def namespace_cidr(self, namespace_cidr):
- """Set namespace-cidr for namespace.
-
- :param namespace_cidr: SubnetType object
-
- """
- if 'namespace_cidr' not in self._pending_field_updates:
- self._pending_field_updates.add('namespace_cidr')
-
- self._namespace_cidr = namespace_cidr
- #end namespace_cidr
-
- def set_namespace_cidr(self, value):
- self.namespace_cidr = value
- #end set_namespace_cidr
-
- @vnc_api.gen.resource_common.Namespace.id_perms.setter
- def id_perms(self, id_perms):
- """Set id-perms for namespace.
-
- :param id_perms: IdPermsType object
-
- """
- if 'id_perms' not in self._pending_field_updates:
- self._pending_field_updates.add('id_perms')
-
- self._id_perms = id_perms
- #end id_perms
-
- def set_id_perms(self, value):
- self.id_perms = value
- #end set_id_perms
-
- @vnc_api.gen.resource_common.Namespace.display_name.setter
- def display_name(self, display_name):
- """Set display-name for namespace.
-
- :param display_name: xsd:string object
-
- """
- if 'display_name' not in self._pending_field_updates:
- self._pending_field_updates.add('display_name')
-
- self._display_name = display_name
- #end display_name
-
- def set_display_name(self, value):
- self.display_name = value
- #end set_display_name
-
-
- def get_project_back_refs(self):
- """Return list of all projects using this namespace"""
- # if object not created/read from lib can't service
- svr_conn = self._server_conn
- if not svr_conn:
- return None
-
- obj = svr_conn.namespace_read(id = self.uuid, fields = ['project_back_refs'])
- back_refs = getattr(obj, 'project_back_refs', None)
- self.project_back_refs = back_refs
-
- return back_refs
- #end get_project_back_refs
-
-#end class Namespace
-
-class LogicalInterface(vnc_api.gen.resource_common.LogicalInterface):
- create_uri = ''
- resource_uri_base = {}
- def __init__(self, name = None, parent_obj = None, logical_interface_vlan_tag = None, logical_interface_type = None, id_perms = None, display_name = None, *args, **kwargs):
- pending_fields = ['fq_name', 'parent_type']
-
- self._server_conn = None
-
- if logical_interface_vlan_tag:
- pending_fields.append('logical_interface_vlan_tag')
- if logical_interface_type:
- pending_fields.append('logical_interface_type')
- if id_perms:
- pending_fields.append('id_perms')
- if display_name:
- pending_fields.append('display_name')
-
- self._pending_field_updates = set(pending_fields)
- self._pending_ref_updates = set([])
-
- super(LogicalInterface, self).__init__(name, parent_obj, logical_interface_vlan_tag, logical_interface_type, id_perms, display_name, *args, **kwargs)
- #end __init__
-
- def get_pending_updates(self):
- return self._pending_field_updates
- #end get_pending_updates
-
- def get_ref_updates(self):
- return self._pending_ref_updates
- #end get_ref_updates
-
- def clear_pending_updates(self):
- self._pending_field_updates = set([])
- self._pending_ref_updates = set([])
- #end clear_pending_updates
-
- def set_server_conn(self, vnc_api_handle):
- self._server_conn = vnc_api_handle
- #end set_server_conn
-
- @classmethod
- def from_dict(cls, **kwargs):
- props_dict = {}
- if 'logical_interface_vlan_tag' in kwargs:
- props_dict['logical_interface_vlan_tag'] = kwargs['logical_interface_vlan_tag']
- if 'logical_interface_type' in kwargs:
- props_dict['logical_interface_type'] = kwargs['logical_interface_type']
- if 'id_perms' in kwargs:
- props_dict['id_perms'] = vnc_api.gen.resource_xsd.IdPermsType(**kwargs['id_perms'])
- if 'display_name' in kwargs:
- props_dict['display_name'] = kwargs['display_name']
-
- # obj constructor takes only props
- parent_type = kwargs.get('parent_type', None)
- fq_name = kwargs['fq_name']
- props_dict.update({'parent_type': parent_type, 'fq_name': fq_name})
- obj = LogicalInterface(fq_name[-1], **props_dict)
- obj.uuid = kwargs['uuid']
- if 'parent_uuid' in kwargs:
- obj.parent_uuid = kwargs['parent_uuid']
-
- # add summary of any children...
-
- # add any specified references...
- if 'virtual_machine_interface_refs' in kwargs:
- obj.virtual_machine_interface_refs = kwargs['virtual_machine_interface_refs']
-
- # and back references but no obj api for it...
-
- return obj
- #end from_dict
-
- @vnc_api.gen.resource_common.LogicalInterface.uuid.setter
- def uuid(self, uuid_val):
- self._uuid = uuid_val
- if 'uuid' not in self._pending_field_updates:
- self._pending_field_updates.add('uuid')
- #end uuid
-
- def set_uuid(self, uuid_val):
- self.uuid = uuid_val
- #end set_uuid
-
- @vnc_api.gen.resource_common.LogicalInterface.logical_interface_vlan_tag.setter
- def logical_interface_vlan_tag(self, logical_interface_vlan_tag):
- """Set logical-interface-vlan-tag for logical-interface.
-
- :param logical_interface_vlan_tag: xsd:integer object
-
- """
- if 'logical_interface_vlan_tag' not in self._pending_field_updates:
- self._pending_field_updates.add('logical_interface_vlan_tag')
-
- self._logical_interface_vlan_tag = logical_interface_vlan_tag
- #end logical_interface_vlan_tag
-
- def set_logical_interface_vlan_tag(self, value):
- self.logical_interface_vlan_tag = value
- #end set_logical_interface_vlan_tag
-
- @vnc_api.gen.resource_common.LogicalInterface.logical_interface_type.setter
- def logical_interface_type(self, logical_interface_type):
- """Set logical-interface-type for logical-interface.
-
- :param logical_interface_type: LogicalInterfaceType object
-
- """
- if 'logical_interface_type' not in self._pending_field_updates:
- self._pending_field_updates.add('logical_interface_type')
-
- self._logical_interface_type = logical_interface_type
- #end logical_interface_type
-
- def set_logical_interface_type(self, value):
- self.logical_interface_type = value
- #end set_logical_interface_type
-
- @vnc_api.gen.resource_common.LogicalInterface.id_perms.setter
- def id_perms(self, id_perms):
- """Set id-perms for logical-interface.
-
- :param id_perms: IdPermsType object
-
- """
- if 'id_perms' not in self._pending_field_updates:
- self._pending_field_updates.add('id_perms')
-
- self._id_perms = id_perms
- #end id_perms
-
- def set_id_perms(self, value):
- self.id_perms = value
- #end set_id_perms
-
- @vnc_api.gen.resource_common.LogicalInterface.display_name.setter
- def display_name(self, display_name):
- """Set display-name for logical-interface.
-
- :param display_name: xsd:string object
-
- """
- if 'display_name' not in self._pending_field_updates:
- self._pending_field_updates.add('display_name')
-
- self._display_name = display_name
- #end display_name
-
- def set_display_name(self, value):
- self.display_name = value
- #end set_display_name
-
- def set_virtual_machine_interface(self, *args, **kwargs):
- """Set virtual-machine-interface for logical-interface.
-
- :param ref_obj: VirtualMachineInterface object
-
- """
- self._pending_field_updates.add('virtual_machine_interface_refs')
- self._pending_ref_updates.discard('virtual_machine_interface_refs')
- super(LogicalInterface, self).set_virtual_machine_interface(*args, **kwargs)
-
- #end set_virtual_machine_interface
-
- def add_virtual_machine_interface(self, *args, **kwargs):
- """Add virtual-machine-interface to logical-interface.
-
- :param ref_obj: VirtualMachineInterface object
-
- """
- if 'virtual_machine_interface_refs' not in self._pending_ref_updates|self._pending_field_updates:
- self._pending_ref_updates.add('virtual_machine_interface_refs')
- self._original_virtual_machine_interface_refs = (self.get_virtual_machine_interface_refs() or [])[:]
- super(LogicalInterface, self).add_virtual_machine_interface(*args, **kwargs)
- #end add_virtual_machine_interface
-
- def del_virtual_machine_interface(self, *args, **kwargs):
- if 'virtual_machine_interface_refs' not in self._pending_ref_updates:
- self._pending_ref_updates.add('virtual_machine_interface_refs')
- self._original_virtual_machine_interface_refs = (self.get_virtual_machine_interface_refs() or [])[:]
- super(LogicalInterface, self).del_virtual_machine_interface(*args, **kwargs)
- #end del_virtual_machine_interface
-
- def set_virtual_machine_interface_list(self, *args, **kwargs):
- """Set virtual-machine-interface list for logical-interface.
-
- :param ref_obj_list: list of VirtualMachineInterface object
-
- """
- self._pending_field_updates.add('virtual_machine_interface_refs')
- self._pending_ref_updates.discard('virtual_machine_interface_refs')
- super(LogicalInterface, self).set_virtual_machine_interface_list(*args, **kwargs)
- #end set_virtual_machine_interface_list
-
-
-#end class LogicalInterface
-
-class RouteTable(vnc_api.gen.resource_common.RouteTable):
- create_uri = ''
- resource_uri_base = {}
- def __init__(self, name = None, parent_obj = None, routes = None, id_perms = None, display_name = None, *args, **kwargs):
- pending_fields = ['fq_name', 'parent_type']
-
- self._server_conn = None
-
- if routes:
- pending_fields.append('routes')
- if id_perms:
- pending_fields.append('id_perms')
- if display_name:
- pending_fields.append('display_name')
-
- self._pending_field_updates = set(pending_fields)
- self._pending_ref_updates = set([])
-
- super(RouteTable, self).__init__(name, parent_obj, routes, id_perms, display_name, *args, **kwargs)
- #end __init__
-
- def get_pending_updates(self):
- return self._pending_field_updates
- #end get_pending_updates
-
- def get_ref_updates(self):
- return self._pending_ref_updates
- #end get_ref_updates
-
- def clear_pending_updates(self):
- self._pending_field_updates = set([])
- self._pending_ref_updates = set([])
- #end clear_pending_updates
-
- def set_server_conn(self, vnc_api_handle):
- self._server_conn = vnc_api_handle
- #end set_server_conn
-
- @classmethod
- def from_dict(cls, **kwargs):
- props_dict = {}
- if 'routes' in kwargs:
- props_dict['routes'] = vnc_api.gen.resource_xsd.RouteTableType(**kwargs['routes'])
- if 'id_perms' in kwargs:
- props_dict['id_perms'] = vnc_api.gen.resource_xsd.IdPermsType(**kwargs['id_perms'])
- if 'display_name' in kwargs:
- props_dict['display_name'] = kwargs['display_name']
-
- # obj constructor takes only props
- parent_type = kwargs.get('parent_type', None)
- fq_name = kwargs['fq_name']
- props_dict.update({'parent_type': parent_type, 'fq_name': fq_name})
- obj = RouteTable(fq_name[-1], **props_dict)
- obj.uuid = kwargs['uuid']
- if 'parent_uuid' in kwargs:
- obj.parent_uuid = kwargs['parent_uuid']
-
- # add summary of any children...
-
- # add any specified references...
-
- # and back references but no obj api for it...
- if 'virtual_network_back_refs' in kwargs:
- obj.virtual_network_back_refs = kwargs['virtual_network_back_refs']
-
- return obj
- #end from_dict
-
- @vnc_api.gen.resource_common.RouteTable.uuid.setter
- def uuid(self, uuid_val):
- self._uuid = uuid_val
- if 'uuid' not in self._pending_field_updates:
- self._pending_field_updates.add('uuid')
- #end uuid
-
- def set_uuid(self, uuid_val):
- self.uuid = uuid_val
- #end set_uuid
-
- @vnc_api.gen.resource_common.RouteTable.routes.setter
- def routes(self, routes):
- """Set routes for route-table.
-
- :param routes: RouteTableType object
-
- """
- if 'routes' not in self._pending_field_updates:
- self._pending_field_updates.add('routes')
-
- self._routes = routes
- #end routes
-
- def set_routes(self, value):
- self.routes = value
- #end set_routes
-
- @vnc_api.gen.resource_common.RouteTable.id_perms.setter
- def id_perms(self, id_perms):
- """Set id-perms for route-table.
-
- :param id_perms: IdPermsType object
-
- """
- if 'id_perms' not in self._pending_field_updates:
- self._pending_field_updates.add('id_perms')
-
- self._id_perms = id_perms
- #end id_perms
-
- def set_id_perms(self, value):
- self.id_perms = value
- #end set_id_perms
-
- @vnc_api.gen.resource_common.RouteTable.display_name.setter
- def display_name(self, display_name):
- """Set display-name for route-table.
-
- :param display_name: xsd:string object
-
- """
- if 'display_name' not in self._pending_field_updates:
- self._pending_field_updates.add('display_name')
-
- self._display_name = display_name
- #end display_name
-
- def set_display_name(self, value):
- self.display_name = value
- #end set_display_name
-
-
- def get_virtual_network_back_refs(self):
- """Return list of all virtual-networks using this route-table"""
- # if object not created/read from lib can't service
- svr_conn = self._server_conn
- if not svr_conn:
- return None
-
- obj = svr_conn.route_table_read(id = self.uuid, fields = ['virtual_network_back_refs'])
- back_refs = getattr(obj, 'virtual_network_back_refs', None)
- self.virtual_network_back_refs = back_refs
-
- return back_refs
- #end get_virtual_network_back_refs
-
-#end class RouteTable
-
-class PhysicalInterface(vnc_api.gen.resource_common.PhysicalInterface):
- create_uri = ''
- resource_uri_base = {}
- def __init__(self, name = None, parent_obj = None, id_perms = None, display_name = None, *args, **kwargs):
- pending_fields = ['fq_name', 'parent_type']
-
- self._server_conn = None
-
- if id_perms:
- pending_fields.append('id_perms')
- if display_name:
- pending_fields.append('display_name')
-
- self._pending_field_updates = set(pending_fields)
- self._pending_ref_updates = set([])
-
- super(PhysicalInterface, self).__init__(name, parent_obj, id_perms, display_name, *args, **kwargs)
- #end __init__
-
- def get_pending_updates(self):
- return self._pending_field_updates
- #end get_pending_updates
-
- def get_ref_updates(self):
- return self._pending_ref_updates
- #end get_ref_updates
-
- def clear_pending_updates(self):
- self._pending_field_updates = set([])
- self._pending_ref_updates = set([])
- #end clear_pending_updates
-
- def set_server_conn(self, vnc_api_handle):
- self._server_conn = vnc_api_handle
- #end set_server_conn
-
- @classmethod
- def from_dict(cls, **kwargs):
- props_dict = {}
- if 'id_perms' in kwargs:
- props_dict['id_perms'] = vnc_api.gen.resource_xsd.IdPermsType(**kwargs['id_perms'])
- if 'display_name' in kwargs:
- props_dict['display_name'] = kwargs['display_name']
-
- # obj constructor takes only props
- parent_type = kwargs.get('parent_type', None)
- fq_name = kwargs['fq_name']
- props_dict.update({'parent_type': parent_type, 'fq_name': fq_name})
- obj = PhysicalInterface(fq_name[-1], **props_dict)
- obj.uuid = kwargs['uuid']
- if 'parent_uuid' in kwargs:
- obj.parent_uuid = kwargs['parent_uuid']
-
- # add summary of any children...
- if 'logical_interfaces' in kwargs:
- obj.logical_interfaces = kwargs['logical_interfaces']
-
- # add any specified references...
-
- # and back references but no obj api for it...
-
- return obj
- #end from_dict
-
- @vnc_api.gen.resource_common.PhysicalInterface.uuid.setter
- def uuid(self, uuid_val):
- self._uuid = uuid_val
- if 'uuid' not in self._pending_field_updates:
- self._pending_field_updates.add('uuid')
- #end uuid
-
- def set_uuid(self, uuid_val):
- self.uuid = uuid_val
- #end set_uuid
-
- @vnc_api.gen.resource_common.PhysicalInterface.id_perms.setter
- def id_perms(self, id_perms):
- """Set id-perms for physical-interface.
-
- :param id_perms: IdPermsType object
-
- """
- if 'id_perms' not in self._pending_field_updates:
- self._pending_field_updates.add('id_perms')
-
- self._id_perms = id_perms
- #end id_perms
-
- def set_id_perms(self, value):
- self.id_perms = value
- #end set_id_perms
-
- @vnc_api.gen.resource_common.PhysicalInterface.display_name.setter
- def display_name(self, display_name):
- """Set display-name for physical-interface.
-
- :param display_name: xsd:string object
-
- """
- if 'display_name' not in self._pending_field_updates:
- self._pending_field_updates.add('display_name')
-
- self._display_name = display_name
- #end display_name
-
- def set_display_name(self, value):
- self.display_name = value
- #end set_display_name
-
- def get_logical_interfaces(self):
- # if object not created/read from lib can't service
- svr_conn = self._server_conn
- if not svr_conn:
- return None
-
- children = super(PhysicalInterface, self).get_logical_interfaces()
- if not children: # read it for first time
- obj = svr_conn.physical_interface_read(id = self.uuid, fields = ['logical_interfaces'])
- children = getattr(obj, 'logical_interfaces', None)
- self.logical_interfaces = children
-
- return children
- #end get_logical_interfaces
-
-
-#end class PhysicalInterface
-
-class AccessControlList(vnc_api.gen.resource_common.AccessControlList):
- create_uri = ''
- resource_uri_base = {}
- def __init__(self, name = None, parent_obj = None, access_control_list_entries = None, id_perms = None, display_name = None, *args, **kwargs):
- pending_fields = ['fq_name', 'parent_type']
-
- self._server_conn = None
-
- if access_control_list_entries:
- pending_fields.append('access_control_list_entries')
- if id_perms:
- pending_fields.append('id_perms')
- if display_name:
- pending_fields.append('display_name')
-
- self._pending_field_updates = set(pending_fields)
- self._pending_ref_updates = set([])
-
- super(AccessControlList, self).__init__(name, parent_obj, access_control_list_entries, id_perms, display_name, *args, **kwargs)
- #end __init__
-
- def get_pending_updates(self):
- return self._pending_field_updates
- #end get_pending_updates
-
- def get_ref_updates(self):
- return self._pending_ref_updates
- #end get_ref_updates
-
- def clear_pending_updates(self):
- self._pending_field_updates = set([])
- self._pending_ref_updates = set([])
- #end clear_pending_updates
-
- def set_server_conn(self, vnc_api_handle):
- self._server_conn = vnc_api_handle
- #end set_server_conn
-
- @classmethod
- def from_dict(cls, **kwargs):
- props_dict = {}
- if 'access_control_list_entries' in kwargs:
- props_dict['access_control_list_entries'] = vnc_api.gen.resource_xsd.AclEntriesType(**kwargs['access_control_list_entries'])
- if 'id_perms' in kwargs:
- props_dict['id_perms'] = vnc_api.gen.resource_xsd.IdPermsType(**kwargs['id_perms'])
- if 'display_name' in kwargs:
- props_dict['display_name'] = kwargs['display_name']
-
- # obj constructor takes only props
- parent_type = kwargs.get('parent_type', None)
- fq_name = kwargs['fq_name']
- props_dict.update({'parent_type': parent_type, 'fq_name': fq_name})
- obj = AccessControlList(fq_name[-1], **props_dict)
- obj.uuid = kwargs['uuid']
- if 'parent_uuid' in kwargs:
- obj.parent_uuid = kwargs['parent_uuid']
-
- # add summary of any children...
-
- # add any specified references...
-
- # and back references but no obj api for it...
-
- return obj
- #end from_dict
-
- @vnc_api.gen.resource_common.AccessControlList.uuid.setter
- def uuid(self, uuid_val):
- self._uuid = uuid_val
- if 'uuid' not in self._pending_field_updates:
- self._pending_field_updates.add('uuid')
- #end uuid
-
- def set_uuid(self, uuid_val):
- self.uuid = uuid_val
- #end set_uuid
-
- @vnc_api.gen.resource_common.AccessControlList.access_control_list_entries.setter
- def access_control_list_entries(self, access_control_list_entries):
- """Set access-control-list-entries for access-control-list.
-
- :param access_control_list_entries: AclEntriesType object
-
- """
- if 'access_control_list_entries' not in self._pending_field_updates:
- self._pending_field_updates.add('access_control_list_entries')
-
- self._access_control_list_entries = access_control_list_entries
- #end access_control_list_entries
-
- def set_access_control_list_entries(self, value):
- self.access_control_list_entries = value
- #end set_access_control_list_entries
-
- @vnc_api.gen.resource_common.AccessControlList.id_perms.setter
- def id_perms(self, id_perms):
- """Set id-perms for access-control-list.
-
- :param id_perms: IdPermsType object
-
- """
- if 'id_perms' not in self._pending_field_updates:
- self._pending_field_updates.add('id_perms')
-
- self._id_perms = id_perms
- #end id_perms
-
- def set_id_perms(self, value):
- self.id_perms = value
- #end set_id_perms
-
- @vnc_api.gen.resource_common.AccessControlList.display_name.setter
- def display_name(self, display_name):
- """Set display-name for access-control-list.
-
- :param display_name: xsd:string object
-
- """
- if 'display_name' not in self._pending_field_updates:
- self._pending_field_updates.add('display_name')
-
- self._display_name = display_name
- #end display_name
-
- def set_display_name(self, value):
- self.display_name = value
- #end set_display_name
-
-
-#end class AccessControlList
-
-class AnalyticsNode(vnc_api.gen.resource_common.AnalyticsNode):
- create_uri = ''
- resource_uri_base = {}
- def __init__(self, name = None, parent_obj = None, analytics_node_ip_address = None, id_perms = None, display_name = None, *args, **kwargs):
- pending_fields = ['fq_name', 'parent_type']
-
- self._server_conn = None
-
- if analytics_node_ip_address:
- pending_fields.append('analytics_node_ip_address')
- if id_perms:
- pending_fields.append('id_perms')
- if display_name:
- pending_fields.append('display_name')
-
- self._pending_field_updates = set(pending_fields)
- self._pending_ref_updates = set([])
-
- super(AnalyticsNode, self).__init__(name, parent_obj, analytics_node_ip_address, id_perms, display_name, *args, **kwargs)
- #end __init__
-
- def get_pending_updates(self):
- return self._pending_field_updates
- #end get_pending_updates
-
- def get_ref_updates(self):
- return self._pending_ref_updates
- #end get_ref_updates
-
- def clear_pending_updates(self):
- self._pending_field_updates = set([])
- self._pending_ref_updates = set([])
- #end clear_pending_updates
-
- def set_server_conn(self, vnc_api_handle):
- self._server_conn = vnc_api_handle
- #end set_server_conn
-
- @classmethod
- def from_dict(cls, **kwargs):
- props_dict = {}
- if 'analytics_node_ip_address' in kwargs:
- props_dict['analytics_node_ip_address'] = kwargs['analytics_node_ip_address']
- if 'id_perms' in kwargs:
- props_dict['id_perms'] = vnc_api.gen.resource_xsd.IdPermsType(**kwargs['id_perms'])
- if 'display_name' in kwargs:
- props_dict['display_name'] = kwargs['display_name']
-
- # obj constructor takes only props
- parent_type = kwargs.get('parent_type', None)
- fq_name = kwargs['fq_name']
- props_dict.update({'parent_type': parent_type, 'fq_name': fq_name})
- obj = AnalyticsNode(fq_name[-1], **props_dict)
- obj.uuid = kwargs['uuid']
- if 'parent_uuid' in kwargs:
- obj.parent_uuid = kwargs['parent_uuid']
-
- # add summary of any children...
-
- # add any specified references...
-
- # and back references but no obj api for it...
-
- return obj
- #end from_dict
-
- @vnc_api.gen.resource_common.AnalyticsNode.uuid.setter
- def uuid(self, uuid_val):
- self._uuid = uuid_val
- if 'uuid' not in self._pending_field_updates:
- self._pending_field_updates.add('uuid')
- #end uuid
-
- def set_uuid(self, uuid_val):
- self.uuid = uuid_val
- #end set_uuid
-
- @vnc_api.gen.resource_common.AnalyticsNode.analytics_node_ip_address.setter
- def analytics_node_ip_address(self, analytics_node_ip_address):
- """Set analytics-node-ip-address for analytics-node.
-
- :param analytics_node_ip_address: IpAddressType object
-
- """
- if 'analytics_node_ip_address' not in self._pending_field_updates:
- self._pending_field_updates.add('analytics_node_ip_address')
-
- self._analytics_node_ip_address = analytics_node_ip_address
- #end analytics_node_ip_address
-
- def set_analytics_node_ip_address(self, value):
- self.analytics_node_ip_address = value
- #end set_analytics_node_ip_address
-
- @vnc_api.gen.resource_common.AnalyticsNode.id_perms.setter
- def id_perms(self, id_perms):
- """Set id-perms for analytics-node.
-
- :param id_perms: IdPermsType object
-
- """
- if 'id_perms' not in self._pending_field_updates:
- self._pending_field_updates.add('id_perms')
-
- self._id_perms = id_perms
- #end id_perms
-
- def set_id_perms(self, value):
- self.id_perms = value
- #end set_id_perms
-
- @vnc_api.gen.resource_common.AnalyticsNode.display_name.setter
- def display_name(self, display_name):
- """Set display-name for analytics-node.
-
- :param display_name: xsd:string object
-
- """
- if 'display_name' not in self._pending_field_updates:
- self._pending_field_updates.add('display_name')
-
- self._display_name = display_name
- #end display_name
-
- def set_display_name(self, value):
- self.display_name = value
- #end set_display_name
-
-
-#end class AnalyticsNode
-
-class VirtualDns(vnc_api.gen.resource_common.VirtualDns):
- create_uri = ''
- resource_uri_base = {}
- def __init__(self, name = None, parent_obj = None, virtual_DNS_data = None, id_perms = None, display_name = None, *args, **kwargs):
- pending_fields = ['fq_name', 'parent_type']
-
- self._server_conn = None
-
- if virtual_DNS_data:
- pending_fields.append('virtual_DNS_data')
- if id_perms:
- pending_fields.append('id_perms')
- if display_name:
- pending_fields.append('display_name')
-
- self._pending_field_updates = set(pending_fields)
- self._pending_ref_updates = set([])
-
- super(VirtualDns, self).__init__(name, parent_obj, virtual_DNS_data, id_perms, display_name, *args, **kwargs)
- #end __init__
-
- def get_pending_updates(self):
- return self._pending_field_updates
- #end get_pending_updates
-
- def get_ref_updates(self):
- return self._pending_ref_updates
- #end get_ref_updates
-
- def clear_pending_updates(self):
- self._pending_field_updates = set([])
- self._pending_ref_updates = set([])
- #end clear_pending_updates
-
- def set_server_conn(self, vnc_api_handle):
- self._server_conn = vnc_api_handle
- #end set_server_conn
-
- @classmethod
- def from_dict(cls, **kwargs):
- props_dict = {}
- if 'virtual_DNS_data' in kwargs:
- props_dict['virtual_DNS_data'] = vnc_api.gen.resource_xsd.VirtualDnsType(**kwargs['virtual_DNS_data'])
- if 'id_perms' in kwargs:
- props_dict['id_perms'] = vnc_api.gen.resource_xsd.IdPermsType(**kwargs['id_perms'])
- if 'display_name' in kwargs:
- props_dict['display_name'] = kwargs['display_name']
-
- # obj constructor takes only props
- parent_type = kwargs.get('parent_type', None)
- fq_name = kwargs['fq_name']
- props_dict.update({'parent_type': parent_type, 'fq_name': fq_name})
- obj = VirtualDns(fq_name[-1], **props_dict)
- obj.uuid = kwargs['uuid']
- if 'parent_uuid' in kwargs:
- obj.parent_uuid = kwargs['parent_uuid']
-
- # add summary of any children...
- if 'virtual_DNS_records' in kwargs:
- obj.virtual_DNS_records = kwargs['virtual_DNS_records']
-
- # add any specified references...
-
- # and back references but no obj api for it...
- if 'network_ipam_back_refs' in kwargs:
- obj.network_ipam_back_refs = kwargs['network_ipam_back_refs']
-
- return obj
- #end from_dict
-
- @vnc_api.gen.resource_common.VirtualDns.uuid.setter
- def uuid(self, uuid_val):
- self._uuid = uuid_val
- if 'uuid' not in self._pending_field_updates:
- self._pending_field_updates.add('uuid')
- #end uuid
-
- def set_uuid(self, uuid_val):
- self.uuid = uuid_val
- #end set_uuid
-
- @vnc_api.gen.resource_common.VirtualDns.virtual_DNS_data.setter
- def virtual_DNS_data(self, virtual_DNS_data):
- """Set virtual-DNS-data for virtual-DNS.
-
- :param virtual_DNS_data: VirtualDnsType object
-
- """
- if 'virtual_DNS_data' not in self._pending_field_updates:
- self._pending_field_updates.add('virtual_DNS_data')
-
- self._virtual_DNS_data = virtual_DNS_data
- #end virtual_DNS_data
-
- def set_virtual_DNS_data(self, value):
- self.virtual_DNS_data = value
- #end set_virtual_DNS_data
-
- @vnc_api.gen.resource_common.VirtualDns.id_perms.setter
- def id_perms(self, id_perms):
- """Set id-perms for virtual-DNS.
-
- :param id_perms: IdPermsType object
-
- """
- if 'id_perms' not in self._pending_field_updates:
- self._pending_field_updates.add('id_perms')
-
- self._id_perms = id_perms
- #end id_perms
-
- def set_id_perms(self, value):
- self.id_perms = value
- #end set_id_perms
-
- @vnc_api.gen.resource_common.VirtualDns.display_name.setter
- def display_name(self, display_name):
- """Set display-name for virtual-DNS.
-
- :param display_name: xsd:string object
-
- """
- if 'display_name' not in self._pending_field_updates:
- self._pending_field_updates.add('display_name')
-
- self._display_name = display_name
- #end display_name
-
- def set_display_name(self, value):
- self.display_name = value
- #end set_display_name
-
- def get_virtual_DNS_records(self):
- # if object not created/read from lib can't service
- svr_conn = self._server_conn
- if not svr_conn:
- return None
-
- children = super(VirtualDns, self).get_virtual_DNS_records()
- if not children: # read it for first time
- obj = svr_conn.virtual_DNS_read(id = self.uuid, fields = ['virtual_DNS_records'])
- children = getattr(obj, 'virtual_DNS_records', None)
- self.virtual_DNS_records = children
-
- return children
- #end get_virtual_DNS_records
-
-
- def get_network_ipam_back_refs(self):
- """Return list of all network-ipams using this virtual-DNS"""
- # if object not created/read from lib can't service
- svr_conn = self._server_conn
- if not svr_conn:
- return None
-
- obj = svr_conn.virtual_DNS_read(id = self.uuid, fields = ['network_ipam_back_refs'])
- back_refs = getattr(obj, 'network_ipam_back_refs', None)
- self.network_ipam_back_refs = back_refs
-
- return back_refs
- #end get_network_ipam_back_refs
-
-#end class VirtualDns
-
-class CustomerAttachment(vnc_api.gen.resource_common.CustomerAttachment):
- create_uri = ''
- resource_uri_base = {}
- def __init__(self, name = None, attachment_address = None, id_perms = None, display_name = None, *args, **kwargs):
- pending_fields = ['fq_name']
-
- self._server_conn = None
-
- if attachment_address:
- pending_fields.append('attachment_address')
- if id_perms:
- pending_fields.append('id_perms')
- if display_name:
- pending_fields.append('display_name')
-
- self._pending_field_updates = set(pending_fields)
- self._pending_ref_updates = set([])
-
- super(CustomerAttachment, self).__init__(name, attachment_address, id_perms, display_name, *args, **kwargs)
- #end __init__
-
- def get_pending_updates(self):
- return self._pending_field_updates
- #end get_pending_updates
-
- def get_ref_updates(self):
- return self._pending_ref_updates
- #end get_ref_updates
-
- def clear_pending_updates(self):
- self._pending_field_updates = set([])
- self._pending_ref_updates = set([])
- #end clear_pending_updates
-
- def set_server_conn(self, vnc_api_handle):
- self._server_conn = vnc_api_handle
- #end set_server_conn
-
- @classmethod
- def from_dict(cls, **kwargs):
- props_dict = {}
- if 'attachment_address' in kwargs:
- props_dict['attachment_address'] = vnc_api.gen.resource_xsd.AttachmentAddressType(**kwargs['attachment_address'])
- if 'id_perms' in kwargs:
- props_dict['id_perms'] = vnc_api.gen.resource_xsd.IdPermsType(**kwargs['id_perms'])
- if 'display_name' in kwargs:
- props_dict['display_name'] = kwargs['display_name']
-
- # obj constructor takes only props
- parent_type = kwargs.get('parent_type', None)
- fq_name = kwargs['fq_name']
- props_dict.update({'parent_type': parent_type, 'fq_name': fq_name})
- obj = CustomerAttachment(fq_name[-1], **props_dict)
- obj.uuid = kwargs['uuid']
- if 'parent_uuid' in kwargs:
- obj.parent_uuid = kwargs['parent_uuid']
-
- # add summary of any children...
-
- # add any specified references...
- if 'virtual_machine_interface_refs' in kwargs:
- obj.virtual_machine_interface_refs = kwargs['virtual_machine_interface_refs']
- if 'floating_ip_refs' in kwargs:
- obj.floating_ip_refs = kwargs['floating_ip_refs']
-
- # and back references but no obj api for it...
-
- return obj
- #end from_dict
-
- @vnc_api.gen.resource_common.CustomerAttachment.uuid.setter
- def uuid(self, uuid_val):
- self._uuid = uuid_val
- if 'uuid' not in self._pending_field_updates:
- self._pending_field_updates.add('uuid')
- #end uuid
-
- def set_uuid(self, uuid_val):
- self.uuid = uuid_val
- #end set_uuid
-
- @vnc_api.gen.resource_common.CustomerAttachment.attachment_address.setter
- def attachment_address(self, attachment_address):
- """Set attachment-address for customer-attachment.
-
- :param attachment_address: AttachmentAddressType object
-
- """
- if 'attachment_address' not in self._pending_field_updates:
- self._pending_field_updates.add('attachment_address')
-
- self._attachment_address = attachment_address
- #end attachment_address
-
- def set_attachment_address(self, value):
- self.attachment_address = value
- #end set_attachment_address
-
- @vnc_api.gen.resource_common.CustomerAttachment.id_perms.setter
- def id_perms(self, id_perms):
- """Set id-perms for customer-attachment.
-
- :param id_perms: IdPermsType object
-
- """
- if 'id_perms' not in self._pending_field_updates:
- self._pending_field_updates.add('id_perms')
-
- self._id_perms = id_perms
- #end id_perms
-
- def set_id_perms(self, value):
- self.id_perms = value
- #end set_id_perms
-
- @vnc_api.gen.resource_common.CustomerAttachment.display_name.setter
- def display_name(self, display_name):
- """Set display-name for customer-attachment.
-
- :param display_name: xsd:string object
-
- """
- if 'display_name' not in self._pending_field_updates:
- self._pending_field_updates.add('display_name')
-
- self._display_name = display_name
- #end display_name
-
- def set_display_name(self, value):
- self.display_name = value
- #end set_display_name
-
- def set_virtual_machine_interface(self, *args, **kwargs):
- """Set virtual-machine-interface for customer-attachment.
-
- :param ref_obj: VirtualMachineInterface object
-
- """
- self._pending_field_updates.add('virtual_machine_interface_refs')
- self._pending_ref_updates.discard('virtual_machine_interface_refs')
- super(CustomerAttachment, self).set_virtual_machine_interface(*args, **kwargs)
-
- #end set_virtual_machine_interface
-
- def add_virtual_machine_interface(self, *args, **kwargs):
- """Add virtual-machine-interface to customer-attachment.
-
- :param ref_obj: VirtualMachineInterface object
-
- """
- if 'virtual_machine_interface_refs' not in self._pending_ref_updates|self._pending_field_updates:
- self._pending_ref_updates.add('virtual_machine_interface_refs')
- self._original_virtual_machine_interface_refs = (self.get_virtual_machine_interface_refs() or [])[:]
- super(CustomerAttachment, self).add_virtual_machine_interface(*args, **kwargs)
- #end add_virtual_machine_interface
-
- def del_virtual_machine_interface(self, *args, **kwargs):
- if 'virtual_machine_interface_refs' not in self._pending_ref_updates:
- self._pending_ref_updates.add('virtual_machine_interface_refs')
- self._original_virtual_machine_interface_refs = (self.get_virtual_machine_interface_refs() or [])[:]
- super(CustomerAttachment, self).del_virtual_machine_interface(*args, **kwargs)
- #end del_virtual_machine_interface
-
- def set_virtual_machine_interface_list(self, *args, **kwargs):
- """Set virtual-machine-interface list for customer-attachment.
-
- :param ref_obj_list: list of VirtualMachineInterface object
-
- """
- self._pending_field_updates.add('virtual_machine_interface_refs')
- self._pending_ref_updates.discard('virtual_machine_interface_refs')
- super(CustomerAttachment, self).set_virtual_machine_interface_list(*args, **kwargs)
- #end set_virtual_machine_interface_list
-
- def set_floating_ip(self, *args, **kwargs):
- """Set floating-ip for customer-attachment.
-
- :param ref_obj: FloatingIp object
-
- """
- self._pending_field_updates.add('floating_ip_refs')
- self._pending_ref_updates.discard('floating_ip_refs')
- super(CustomerAttachment, self).set_floating_ip(*args, **kwargs)
-
- #end set_floating_ip
-
- def add_floating_ip(self, *args, **kwargs):
- """Add floating-ip to customer-attachment.
-
- :param ref_obj: FloatingIp object
-
- """
- if 'floating_ip_refs' not in self._pending_ref_updates|self._pending_field_updates:
- self._pending_ref_updates.add('floating_ip_refs')
- self._original_floating_ip_refs = (self.get_floating_ip_refs() or [])[:]
- super(CustomerAttachment, self).add_floating_ip(*args, **kwargs)
- #end add_floating_ip
-
- def del_floating_ip(self, *args, **kwargs):
- if 'floating_ip_refs' not in self._pending_ref_updates:
- self._pending_ref_updates.add('floating_ip_refs')
- self._original_floating_ip_refs = (self.get_floating_ip_refs() or [])[:]
- super(CustomerAttachment, self).del_floating_ip(*args, **kwargs)
- #end del_floating_ip
-
- def set_floating_ip_list(self, *args, **kwargs):
- """Set floating-ip list for customer-attachment.
-
- :param ref_obj_list: list of FloatingIp object
-
- """
- self._pending_field_updates.add('floating_ip_refs')
- self._pending_ref_updates.discard('floating_ip_refs')
- super(CustomerAttachment, self).set_floating_ip_list(*args, **kwargs)
- #end set_floating_ip_list
-
-
-#end class CustomerAttachment
-
-class ServiceApplianceSet(vnc_api.gen.resource_common.ServiceApplianceSet):
- create_uri = ''
- resource_uri_base = {}
- def __init__(self, name = None, parent_obj = None, service_appliance_set_properties = None, service_appliance_driver = None, service_appliance_ha_mode = None, id_perms = None, display_name = None, *args, **kwargs):
- pending_fields = ['fq_name', 'parent_type']
-
- self._server_conn = None
-
- if service_appliance_set_properties:
- pending_fields.append('service_appliance_set_properties')
- if service_appliance_driver:
- pending_fields.append('service_appliance_driver')
- if service_appliance_ha_mode:
- pending_fields.append('service_appliance_ha_mode')
- if id_perms:
- pending_fields.append('id_perms')
- if display_name:
- pending_fields.append('display_name')
-
- self._pending_field_updates = set(pending_fields)
- self._pending_ref_updates = set([])
-
- super(ServiceApplianceSet, self).__init__(name, parent_obj, service_appliance_set_properties, service_appliance_driver, service_appliance_ha_mode, id_perms, display_name, *args, **kwargs)
- #end __init__
-
- def get_pending_updates(self):
- return self._pending_field_updates
- #end get_pending_updates
-
- def get_ref_updates(self):
- return self._pending_ref_updates
- #end get_ref_updates
-
- def clear_pending_updates(self):
- self._pending_field_updates = set([])
- self._pending_ref_updates = set([])
- #end clear_pending_updates
-
- def set_server_conn(self, vnc_api_handle):
- self._server_conn = vnc_api_handle
- #end set_server_conn
-
- @classmethod
- def from_dict(cls, **kwargs):
- props_dict = {}
- if 'service_appliance_set_properties' in kwargs:
- props_dict['service_appliance_set_properties'] = vnc_api.gen.resource_xsd.KeyValuePairs(**kwargs['service_appliance_set_properties'])
- if 'service_appliance_driver' in kwargs:
- props_dict['service_appliance_driver'] = kwargs['service_appliance_driver']
- if 'service_appliance_ha_mode' in kwargs:
- props_dict['service_appliance_ha_mode'] = kwargs['service_appliance_ha_mode']
- if 'id_perms' in kwargs:
- props_dict['id_perms'] = vnc_api.gen.resource_xsd.IdPermsType(**kwargs['id_perms'])
- if 'display_name' in kwargs:
- props_dict['display_name'] = kwargs['display_name']
-
- # obj constructor takes only props
- parent_type = kwargs.get('parent_type', None)
- fq_name = kwargs['fq_name']
- props_dict.update({'parent_type': parent_type, 'fq_name': fq_name})
- obj = ServiceApplianceSet(fq_name[-1], **props_dict)
- obj.uuid = kwargs['uuid']
- if 'parent_uuid' in kwargs:
- obj.parent_uuid = kwargs['parent_uuid']
-
- # add summary of any children...
- if 'service_appliances' in kwargs:
- obj.service_appliances = kwargs['service_appliances']
-
- # add any specified references...
-
- # and back references but no obj api for it...
- if 'loadbalancer_pool_back_refs' in kwargs:
- obj.loadbalancer_pool_back_refs = kwargs['loadbalancer_pool_back_refs']
-
- return obj
- #end from_dict
-
- @vnc_api.gen.resource_common.ServiceApplianceSet.uuid.setter
- def uuid(self, uuid_val):
- self._uuid = uuid_val
- if 'uuid' not in self._pending_field_updates:
- self._pending_field_updates.add('uuid')
- #end uuid
-
- def set_uuid(self, uuid_val):
- self.uuid = uuid_val
- #end set_uuid
-
- @vnc_api.gen.resource_common.ServiceApplianceSet.service_appliance_set_properties.setter
- def service_appliance_set_properties(self, service_appliance_set_properties):
- """Set service-appliance-set-properties for service-appliance-set.
-
- :param service_appliance_set_properties: KeyValuePairs object
-
- """
- if 'service_appliance_set_properties' not in self._pending_field_updates:
- self._pending_field_updates.add('service_appliance_set_properties')
-
- self._service_appliance_set_properties = service_appliance_set_properties
- #end service_appliance_set_properties
-
- def set_service_appliance_set_properties(self, value):
- self.service_appliance_set_properties = value
- #end set_service_appliance_set_properties
-
- @vnc_api.gen.resource_common.ServiceApplianceSet.service_appliance_driver.setter
- def service_appliance_driver(self, service_appliance_driver):
- """Set service-appliance-driver for service-appliance-set.
-
- :param service_appliance_driver: xsd:string object
-
- """
- if 'service_appliance_driver' not in self._pending_field_updates:
- self._pending_field_updates.add('service_appliance_driver')
-
- self._service_appliance_driver = service_appliance_driver
- #end service_appliance_driver
-
- def set_service_appliance_driver(self, value):
- self.service_appliance_driver = value
- #end set_service_appliance_driver
-
- @vnc_api.gen.resource_common.ServiceApplianceSet.service_appliance_ha_mode.setter
- def service_appliance_ha_mode(self, service_appliance_ha_mode):
- """Set service-appliance-ha-mode for service-appliance-set.
-
- :param service_appliance_ha_mode: xsd:string object
-
- """
- if 'service_appliance_ha_mode' not in self._pending_field_updates:
- self._pending_field_updates.add('service_appliance_ha_mode')
-
- self._service_appliance_ha_mode = service_appliance_ha_mode
- #end service_appliance_ha_mode
-
- def set_service_appliance_ha_mode(self, value):
- self.service_appliance_ha_mode = value
- #end set_service_appliance_ha_mode
-
- @vnc_api.gen.resource_common.ServiceApplianceSet.id_perms.setter
- def id_perms(self, id_perms):
- """Set id-perms for service-appliance-set.
-
- :param id_perms: IdPermsType object
-
- """
- if 'id_perms' not in self._pending_field_updates:
- self._pending_field_updates.add('id_perms')
-
- self._id_perms = id_perms
- #end id_perms
-
- def set_id_perms(self, value):
- self.id_perms = value
- #end set_id_perms
-
- @vnc_api.gen.resource_common.ServiceApplianceSet.display_name.setter
- def display_name(self, display_name):
- """Set display-name for service-appliance-set.
-
- :param display_name: xsd:string object
-
- """
- if 'display_name' not in self._pending_field_updates:
- self._pending_field_updates.add('display_name')
-
- self._display_name = display_name
- #end display_name
-
- def set_display_name(self, value):
- self.display_name = value
- #end set_display_name
-
- def get_service_appliances(self):
- # if object not created/read from lib can't service
- svr_conn = self._server_conn
- if not svr_conn:
- return None
-
- children = super(ServiceApplianceSet, self).get_service_appliances()
- if not children: # read it for first time
- obj = svr_conn.service_appliance_set_read(id = self.uuid, fields = ['service_appliances'])
- children = getattr(obj, 'service_appliances', None)
- self.service_appliances = children
-
- return children
- #end get_service_appliances
-
-
- def get_loadbalancer_pool_back_refs(self):
- """Return list of all loadbalancer-pools using this service-appliance-set"""
- # if object not created/read from lib can't service
- svr_conn = self._server_conn
- if not svr_conn:
- return None
-
- obj = svr_conn.service_appliance_set_read(id = self.uuid, fields = ['loadbalancer_pool_back_refs'])
- back_refs = getattr(obj, 'loadbalancer_pool_back_refs', None)
- self.loadbalancer_pool_back_refs = back_refs
-
- return back_refs
- #end get_loadbalancer_pool_back_refs
-
-#end class ServiceApplianceSet
-
-class ConfigNode(vnc_api.gen.resource_common.ConfigNode):
- create_uri = ''
- resource_uri_base = {}
- def __init__(self, name = None, parent_obj = None, config_node_ip_address = None, id_perms = None, display_name = None, *args, **kwargs):
- pending_fields = ['fq_name', 'parent_type']
-
- self._server_conn = None
-
- if config_node_ip_address:
- pending_fields.append('config_node_ip_address')
- if id_perms:
- pending_fields.append('id_perms')
- if display_name:
- pending_fields.append('display_name')
-
- self._pending_field_updates = set(pending_fields)
- self._pending_ref_updates = set([])
-
- super(ConfigNode, self).__init__(name, parent_obj, config_node_ip_address, id_perms, display_name, *args, **kwargs)
- #end __init__
-
- def get_pending_updates(self):
- return self._pending_field_updates
- #end get_pending_updates
-
- def get_ref_updates(self):
- return self._pending_ref_updates
- #end get_ref_updates
-
- def clear_pending_updates(self):
- self._pending_field_updates = set([])
- self._pending_ref_updates = set([])
- #end clear_pending_updates
-
- def set_server_conn(self, vnc_api_handle):
- self._server_conn = vnc_api_handle
- #end set_server_conn
-
- @classmethod
- def from_dict(cls, **kwargs):
- props_dict = {}
- if 'config_node_ip_address' in kwargs:
- props_dict['config_node_ip_address'] = kwargs['config_node_ip_address']
- if 'id_perms' in kwargs:
- props_dict['id_perms'] = vnc_api.gen.resource_xsd.IdPermsType(**kwargs['id_perms'])
- if 'display_name' in kwargs:
- props_dict['display_name'] = kwargs['display_name']
-
- # obj constructor takes only props
- parent_type = kwargs.get('parent_type', None)
- fq_name = kwargs['fq_name']
- props_dict.update({'parent_type': parent_type, 'fq_name': fq_name})
- obj = ConfigNode(fq_name[-1], **props_dict)
- obj.uuid = kwargs['uuid']
- if 'parent_uuid' in kwargs:
- obj.parent_uuid = kwargs['parent_uuid']
-
- # add summary of any children...
-
- # add any specified references...
-
- # and back references but no obj api for it...
-
- return obj
- #end from_dict
-
- @vnc_api.gen.resource_common.ConfigNode.uuid.setter
- def uuid(self, uuid_val):
- self._uuid = uuid_val
- if 'uuid' not in self._pending_field_updates:
- self._pending_field_updates.add('uuid')
- #end uuid
-
- def set_uuid(self, uuid_val):
- self.uuid = uuid_val
- #end set_uuid
-
- @vnc_api.gen.resource_common.ConfigNode.config_node_ip_address.setter
- def config_node_ip_address(self, config_node_ip_address):
- """Set config-node-ip-address for config-node.
-
- :param config_node_ip_address: IpAddressType object
-
- """
- if 'config_node_ip_address' not in self._pending_field_updates:
- self._pending_field_updates.add('config_node_ip_address')
-
- self._config_node_ip_address = config_node_ip_address
- #end config_node_ip_address
-
- def set_config_node_ip_address(self, value):
- self.config_node_ip_address = value
- #end set_config_node_ip_address
-
- @vnc_api.gen.resource_common.ConfigNode.id_perms.setter
- def id_perms(self, id_perms):
- """Set id-perms for config-node.
-
- :param id_perms: IdPermsType object
-
- """
- if 'id_perms' not in self._pending_field_updates:
- self._pending_field_updates.add('id_perms')
-
- self._id_perms = id_perms
- #end id_perms
-
- def set_id_perms(self, value):
- self.id_perms = value
- #end set_id_perms
-
- @vnc_api.gen.resource_common.ConfigNode.display_name.setter
- def display_name(self, display_name):
- """Set display-name for config-node.
-
- :param display_name: xsd:string object
-
- """
- if 'display_name' not in self._pending_field_updates:
- self._pending_field_updates.add('display_name')
-
- self._display_name = display_name
- #end display_name
-
- def set_display_name(self, value):
- self.display_name = value
- #end set_display_name
-
-
-#end class ConfigNode
-
-class QosQueue(vnc_api.gen.resource_common.QosQueue):
- create_uri = ''
- resource_uri_base = {}
- def __init__(self, name = None, parent_obj = None, min_bandwidth = None, max_bandwidth = None, id_perms = None, display_name = None, *args, **kwargs):
- pending_fields = ['fq_name', 'parent_type']
-
- self._server_conn = None
-
- if min_bandwidth:
- pending_fields.append('min_bandwidth')
- if max_bandwidth:
- pending_fields.append('max_bandwidth')
- if id_perms:
- pending_fields.append('id_perms')
- if display_name:
- pending_fields.append('display_name')
-
- self._pending_field_updates = set(pending_fields)
- self._pending_ref_updates = set([])
-
- super(QosQueue, self).__init__(name, parent_obj, min_bandwidth, max_bandwidth, id_perms, display_name, *args, **kwargs)
- #end __init__
-
- def get_pending_updates(self):
- return self._pending_field_updates
- #end get_pending_updates
-
- def get_ref_updates(self):
- return self._pending_ref_updates
- #end get_ref_updates
-
- def clear_pending_updates(self):
- self._pending_field_updates = set([])
- self._pending_ref_updates = set([])
- #end clear_pending_updates
-
- def set_server_conn(self, vnc_api_handle):
- self._server_conn = vnc_api_handle
- #end set_server_conn
-
- @classmethod
- def from_dict(cls, **kwargs):
- props_dict = {}
- if 'min_bandwidth' in kwargs:
- props_dict['min_bandwidth'] = kwargs['min_bandwidth']
- if 'max_bandwidth' in kwargs:
- props_dict['max_bandwidth'] = kwargs['max_bandwidth']
- if 'id_perms' in kwargs:
- props_dict['id_perms'] = vnc_api.gen.resource_xsd.IdPermsType(**kwargs['id_perms'])
- if 'display_name' in kwargs:
- props_dict['display_name'] = kwargs['display_name']
-
- # obj constructor takes only props
- parent_type = kwargs.get('parent_type', None)
- fq_name = kwargs['fq_name']
- props_dict.update({'parent_type': parent_type, 'fq_name': fq_name})
- obj = QosQueue(fq_name[-1], **props_dict)
- obj.uuid = kwargs['uuid']
- if 'parent_uuid' in kwargs:
- obj.parent_uuid = kwargs['parent_uuid']
-
- # add summary of any children...
-
- # add any specified references...
-
- # and back references but no obj api for it...
- if 'qos_forwarding_class_back_refs' in kwargs:
- obj.qos_forwarding_class_back_refs = kwargs['qos_forwarding_class_back_refs']
-
- return obj
- #end from_dict
-
- @vnc_api.gen.resource_common.QosQueue.uuid.setter
- def uuid(self, uuid_val):
- self._uuid = uuid_val
- if 'uuid' not in self._pending_field_updates:
- self._pending_field_updates.add('uuid')
- #end uuid
-
- def set_uuid(self, uuid_val):
- self.uuid = uuid_val
- #end set_uuid
-
- @vnc_api.gen.resource_common.QosQueue.min_bandwidth.setter
- def min_bandwidth(self, min_bandwidth):
- """Set min-bandwidth for qos-queue.
-
- :param min_bandwidth: xsd:integer object
-
- """
- if 'min_bandwidth' not in self._pending_field_updates:
- self._pending_field_updates.add('min_bandwidth')
-
- self._min_bandwidth = min_bandwidth
- #end min_bandwidth
-
- def set_min_bandwidth(self, value):
- self.min_bandwidth = value
- #end set_min_bandwidth
-
- @vnc_api.gen.resource_common.QosQueue.max_bandwidth.setter
- def max_bandwidth(self, max_bandwidth):
- """Set max-bandwidth for qos-queue.
-
- :param max_bandwidth: xsd:integer object
-
- """
- if 'max_bandwidth' not in self._pending_field_updates:
- self._pending_field_updates.add('max_bandwidth')
-
- self._max_bandwidth = max_bandwidth
- #end max_bandwidth
-
- def set_max_bandwidth(self, value):
- self.max_bandwidth = value
- #end set_max_bandwidth
-
- @vnc_api.gen.resource_common.QosQueue.id_perms.setter
- def id_perms(self, id_perms):
- """Set id-perms for qos-queue.
-
- :param id_perms: IdPermsType object
-
- """
- if 'id_perms' not in self._pending_field_updates:
- self._pending_field_updates.add('id_perms')
-
- self._id_perms = id_perms
- #end id_perms
-
- def set_id_perms(self, value):
- self.id_perms = value
- #end set_id_perms
-
- @vnc_api.gen.resource_common.QosQueue.display_name.setter
- def display_name(self, display_name):
- """Set display-name for qos-queue.
-
- :param display_name: xsd:string object
-
- """
- if 'display_name' not in self._pending_field_updates:
- self._pending_field_updates.add('display_name')
-
- self._display_name = display_name
- #end display_name
-
- def set_display_name(self, value):
- self.display_name = value
- #end set_display_name
-
-
- def get_qos_forwarding_class_back_refs(self):
- """Return list of all qos-forwarding-classs using this qos-queue"""
- # if object not created/read from lib can't service
- svr_conn = self._server_conn
- if not svr_conn:
- return None
-
- obj = svr_conn.qos_queue_read(id = self.uuid, fields = ['qos_forwarding_class_back_refs'])
- back_refs = getattr(obj, 'qos_forwarding_class_back_refs', None)
- self.qos_forwarding_class_back_refs = back_refs
-
- return back_refs
- #end get_qos_forwarding_class_back_refs
-
-#end class QosQueue
-
-class VirtualMachine(vnc_api.gen.resource_common.VirtualMachine):
- create_uri = ''
- resource_uri_base = {}
- def __init__(self, name = None, id_perms = None, display_name = None, *args, **kwargs):
- pending_fields = ['fq_name']
-
- self._server_conn = None
-
- if id_perms:
- pending_fields.append('id_perms')
- if display_name:
- pending_fields.append('display_name')
-
- self._pending_field_updates = set(pending_fields)
- self._pending_ref_updates = set([])
-
- super(VirtualMachine, self).__init__(name, id_perms, display_name, *args, **kwargs)
- #end __init__
-
- def get_pending_updates(self):
- return self._pending_field_updates
- #end get_pending_updates
-
- def get_ref_updates(self):
- return self._pending_ref_updates
- #end get_ref_updates
-
- def clear_pending_updates(self):
- self._pending_field_updates = set([])
- self._pending_ref_updates = set([])
- #end clear_pending_updates
-
- def set_server_conn(self, vnc_api_handle):
- self._server_conn = vnc_api_handle
- #end set_server_conn
-
- @classmethod
- def from_dict(cls, **kwargs):
- props_dict = {}
- if 'id_perms' in kwargs:
- props_dict['id_perms'] = vnc_api.gen.resource_xsd.IdPermsType(**kwargs['id_perms'])
- if 'display_name' in kwargs:
- props_dict['display_name'] = kwargs['display_name']
-
- # obj constructor takes only props
- parent_type = kwargs.get('parent_type', None)
- fq_name = kwargs['fq_name']
- props_dict.update({'parent_type': parent_type, 'fq_name': fq_name})
- obj = VirtualMachine(fq_name[-1], **props_dict)
- obj.uuid = kwargs['uuid']
- if 'parent_uuid' in kwargs:
- obj.parent_uuid = kwargs['parent_uuid']
-
- # add summary of any children...
- if 'virtual_machine_interfaces' in kwargs:
- obj.virtual_machine_interfaces = kwargs['virtual_machine_interfaces']
-
- # add any specified references...
- if 'service_instance_refs' in kwargs:
- obj.service_instance_refs = kwargs['service_instance_refs']
-
- # and back references but no obj api for it...
- if 'virtual_machine_interface_back_refs' in kwargs:
- obj.virtual_machine_interface_back_refs = kwargs['virtual_machine_interface_back_refs']
- if 'virtual_router_back_refs' in kwargs:
- obj.virtual_router_back_refs = kwargs['virtual_router_back_refs']
-
- return obj
- #end from_dict
-
- @vnc_api.gen.resource_common.VirtualMachine.uuid.setter
- def uuid(self, uuid_val):
- self._uuid = uuid_val
- if 'uuid' not in self._pending_field_updates:
- self._pending_field_updates.add('uuid')
- #end uuid
-
- def set_uuid(self, uuid_val):
- self.uuid = uuid_val
- #end set_uuid
-
- @vnc_api.gen.resource_common.VirtualMachine.id_perms.setter
- def id_perms(self, id_perms):
- """Set id-perms for virtual-machine.
-
- :param id_perms: IdPermsType object
-
- """
- if 'id_perms' not in self._pending_field_updates:
- self._pending_field_updates.add('id_perms')
-
- self._id_perms = id_perms
- #end id_perms
-
- def set_id_perms(self, value):
- self.id_perms = value
- #end set_id_perms
-
- @vnc_api.gen.resource_common.VirtualMachine.display_name.setter
- def display_name(self, display_name):
- """Set display-name for virtual-machine.
-
- :param display_name: xsd:string object
-
- """
- if 'display_name' not in self._pending_field_updates:
- self._pending_field_updates.add('display_name')
-
- self._display_name = display_name
- #end display_name
-
- def set_display_name(self, value):
- self.display_name = value
- #end set_display_name
-
- def set_service_instance(self, *args, **kwargs):
- """Set service-instance for virtual-machine.
-
- :param ref_obj: ServiceInstance object
-
- """
- self._pending_field_updates.add('service_instance_refs')
- self._pending_ref_updates.discard('service_instance_refs')
- super(VirtualMachine, self).set_service_instance(*args, **kwargs)
-
- #end set_service_instance
-
- def add_service_instance(self, *args, **kwargs):
- """Add service-instance to virtual-machine.
-
- :param ref_obj: ServiceInstance object
-
- """
- if 'service_instance_refs' not in self._pending_ref_updates|self._pending_field_updates:
- self._pending_ref_updates.add('service_instance_refs')
- self._original_service_instance_refs = (self.get_service_instance_refs() or [])[:]
- super(VirtualMachine, self).add_service_instance(*args, **kwargs)
- #end add_service_instance
-
- def del_service_instance(self, *args, **kwargs):
- if 'service_instance_refs' not in self._pending_ref_updates:
- self._pending_ref_updates.add('service_instance_refs')
- self._original_service_instance_refs = (self.get_service_instance_refs() or [])[:]
- super(VirtualMachine, self).del_service_instance(*args, **kwargs)
- #end del_service_instance
-
- def set_service_instance_list(self, *args, **kwargs):
- """Set service-instance list for virtual-machine.
-
- :param ref_obj_list: list of ServiceInstance object
-
- """
- self._pending_field_updates.add('service_instance_refs')
- self._pending_ref_updates.discard('service_instance_refs')
- super(VirtualMachine, self).set_service_instance_list(*args, **kwargs)
- #end set_service_instance_list
-
- def get_virtual_machine_interfaces(self):
- # if object not created/read from lib can't service
- svr_conn = self._server_conn
- if not svr_conn:
- return None
-
- children = super(VirtualMachine, self).get_virtual_machine_interfaces()
- if not children: # read it for first time
- obj = svr_conn.virtual_machine_read(id = self.uuid, fields = ['virtual_machine_interfaces'])
- children = getattr(obj, 'virtual_machine_interfaces', None)
- self.virtual_machine_interfaces = children
-
- return children
- #end get_virtual_machine_interfaces
-
-
- def get_virtual_machine_interface_back_refs(self):
- """Return list of all virtual-machine-interfaces using this virtual-machine"""
- # if object not created/read from lib can't service
- svr_conn = self._server_conn
- if not svr_conn:
- return None
-
- obj = svr_conn.virtual_machine_read(id = self.uuid, fields = ['virtual_machine_interface_back_refs'])
- back_refs = getattr(obj, 'virtual_machine_interface_back_refs', None)
- self.virtual_machine_interface_back_refs = back_refs
-
- return back_refs
- #end get_virtual_machine_interface_back_refs
-
- def get_virtual_router_back_refs(self):
- """Return list of all virtual-routers using this virtual-machine"""
- # if object not created/read from lib can't service
- svr_conn = self._server_conn
- if not svr_conn:
- return None
-
- obj = svr_conn.virtual_machine_read(id = self.uuid, fields = ['virtual_router_back_refs'])
- back_refs = getattr(obj, 'virtual_router_back_refs', None)
- self.virtual_router_back_refs = back_refs
-
- return back_refs
- #end get_virtual_router_back_refs
-
-#end class VirtualMachine
-
-class InterfaceRouteTable(vnc_api.gen.resource_common.InterfaceRouteTable):
- create_uri = ''
- resource_uri_base = {}
- def __init__(self, name = None, parent_obj = None, interface_route_table_routes = None, id_perms = None, display_name = None, *args, **kwargs):
- pending_fields = ['fq_name', 'parent_type']
-
- self._server_conn = None
-
- if interface_route_table_routes:
- pending_fields.append('interface_route_table_routes')
- if id_perms:
- pending_fields.append('id_perms')
- if display_name:
- pending_fields.append('display_name')
-
- self._pending_field_updates = set(pending_fields)
- self._pending_ref_updates = set([])
-
- super(InterfaceRouteTable, self).__init__(name, parent_obj, interface_route_table_routes, id_perms, display_name, *args, **kwargs)
- #end __init__
-
- def get_pending_updates(self):
- return self._pending_field_updates
- #end get_pending_updates
-
- def get_ref_updates(self):
- return self._pending_ref_updates
- #end get_ref_updates
-
- def clear_pending_updates(self):
- self._pending_field_updates = set([])
- self._pending_ref_updates = set([])
- #end clear_pending_updates
-
- def set_server_conn(self, vnc_api_handle):
- self._server_conn = vnc_api_handle
- #end set_server_conn
-
- @classmethod
- def from_dict(cls, **kwargs):
- props_dict = {}
- if 'interface_route_table_routes' in kwargs:
- props_dict['interface_route_table_routes'] = vnc_api.gen.resource_xsd.RouteTableType(**kwargs['interface_route_table_routes'])
- if 'id_perms' in kwargs:
- props_dict['id_perms'] = vnc_api.gen.resource_xsd.IdPermsType(**kwargs['id_perms'])
- if 'display_name' in kwargs:
- props_dict['display_name'] = kwargs['display_name']
-
- # obj constructor takes only props
- parent_type = kwargs.get('parent_type', None)
- fq_name = kwargs['fq_name']
- props_dict.update({'parent_type': parent_type, 'fq_name': fq_name})
- obj = InterfaceRouteTable(fq_name[-1], **props_dict)
- obj.uuid = kwargs['uuid']
- if 'parent_uuid' in kwargs:
- obj.parent_uuid = kwargs['parent_uuid']
-
- # add summary of any children...
-
- # add any specified references...
-
- # and back references but no obj api for it...
- if 'virtual_machine_interface_back_refs' in kwargs:
- obj.virtual_machine_interface_back_refs = kwargs['virtual_machine_interface_back_refs']
-
- return obj
- #end from_dict
-
- @vnc_api.gen.resource_common.InterfaceRouteTable.uuid.setter
- def uuid(self, uuid_val):
- self._uuid = uuid_val
- if 'uuid' not in self._pending_field_updates:
- self._pending_field_updates.add('uuid')
- #end uuid
-
- def set_uuid(self, uuid_val):
- self.uuid = uuid_val
- #end set_uuid
-
- @vnc_api.gen.resource_common.InterfaceRouteTable.interface_route_table_routes.setter
- def interface_route_table_routes(self, interface_route_table_routes):
- """Set interface-route-table-routes for interface-route-table.
-
- :param interface_route_table_routes: RouteTableType object
-
- """
- if 'interface_route_table_routes' not in self._pending_field_updates:
- self._pending_field_updates.add('interface_route_table_routes')
-
- self._interface_route_table_routes = interface_route_table_routes
- #end interface_route_table_routes
-
- def set_interface_route_table_routes(self, value):
- self.interface_route_table_routes = value
- #end set_interface_route_table_routes
-
- @vnc_api.gen.resource_common.InterfaceRouteTable.id_perms.setter
- def id_perms(self, id_perms):
- """Set id-perms for interface-route-table.
-
- :param id_perms: IdPermsType object
-
- """
- if 'id_perms' not in self._pending_field_updates:
- self._pending_field_updates.add('id_perms')
-
- self._id_perms = id_perms
- #end id_perms
-
- def set_id_perms(self, value):
- self.id_perms = value
- #end set_id_perms
-
- @vnc_api.gen.resource_common.InterfaceRouteTable.display_name.setter
- def display_name(self, display_name):
- """Set display-name for interface-route-table.
-
- :param display_name: xsd:string object
-
- """
- if 'display_name' not in self._pending_field_updates:
- self._pending_field_updates.add('display_name')
-
- self._display_name = display_name
- #end display_name
-
- def set_display_name(self, value):
- self.display_name = value
- #end set_display_name
-
-
- def get_virtual_machine_interface_back_refs(self):
- """Return list of all virtual-machine-interfaces using this interface-route-table"""
- # if object not created/read from lib can't service
- svr_conn = self._server_conn
- if not svr_conn:
- return None
-
- obj = svr_conn.interface_route_table_read(id = self.uuid, fields = ['virtual_machine_interface_back_refs'])
- back_refs = getattr(obj, 'virtual_machine_interface_back_refs', None)
- self.virtual_machine_interface_back_refs = back_refs
-
- return back_refs
- #end get_virtual_machine_interface_back_refs
-
-#end class InterfaceRouteTable
-
-class ServiceTemplate(vnc_api.gen.resource_common.ServiceTemplate):
- create_uri = ''
- resource_uri_base = {}
- def __init__(self, name = None, parent_obj = None, service_template_properties = None, id_perms = None, display_name = None, *args, **kwargs):
- pending_fields = ['fq_name', 'parent_type']
-
- self._server_conn = None
-
- if service_template_properties:
- pending_fields.append('service_template_properties')
- if id_perms:
- pending_fields.append('id_perms')
- if display_name:
- pending_fields.append('display_name')
-
- self._pending_field_updates = set(pending_fields)
- self._pending_ref_updates = set([])
-
- super(ServiceTemplate, self).__init__(name, parent_obj, service_template_properties, id_perms, display_name, *args, **kwargs)
- #end __init__
-
- def get_pending_updates(self):
- return self._pending_field_updates
- #end get_pending_updates
-
- def get_ref_updates(self):
- return self._pending_ref_updates
- #end get_ref_updates
-
- def clear_pending_updates(self):
- self._pending_field_updates = set([])
- self._pending_ref_updates = set([])
- #end clear_pending_updates
-
- def set_server_conn(self, vnc_api_handle):
- self._server_conn = vnc_api_handle
- #end set_server_conn
-
- @classmethod
- def from_dict(cls, **kwargs):
- props_dict = {}
- if 'service_template_properties' in kwargs:
- props_dict['service_template_properties'] = vnc_api.gen.resource_xsd.ServiceTemplateType(**kwargs['service_template_properties'])
- if 'id_perms' in kwargs:
- props_dict['id_perms'] = vnc_api.gen.resource_xsd.IdPermsType(**kwargs['id_perms'])
- if 'display_name' in kwargs:
- props_dict['display_name'] = kwargs['display_name']
-
- # obj constructor takes only props
- parent_type = kwargs.get('parent_type', None)
- fq_name = kwargs['fq_name']
- props_dict.update({'parent_type': parent_type, 'fq_name': fq_name})
- obj = ServiceTemplate(fq_name[-1], **props_dict)
- obj.uuid = kwargs['uuid']
- if 'parent_uuid' in kwargs:
- obj.parent_uuid = kwargs['parent_uuid']
-
- # add summary of any children...
-
- # add any specified references...
-
- # and back references but no obj api for it...
- if 'service_instance_back_refs' in kwargs:
- obj.service_instance_back_refs = kwargs['service_instance_back_refs']
-
- return obj
- #end from_dict
-
- @vnc_api.gen.resource_common.ServiceTemplate.uuid.setter
- def uuid(self, uuid_val):
- self._uuid = uuid_val
- if 'uuid' not in self._pending_field_updates:
- self._pending_field_updates.add('uuid')
- #end uuid
-
- def set_uuid(self, uuid_val):
- self.uuid = uuid_val
- #end set_uuid
-
- @vnc_api.gen.resource_common.ServiceTemplate.service_template_properties.setter
- def service_template_properties(self, service_template_properties):
- """Set service-template-properties for service-template.
-
- :param service_template_properties: ServiceTemplateType object
-
- """
- if 'service_template_properties' not in self._pending_field_updates:
- self._pending_field_updates.add('service_template_properties')
-
- self._service_template_properties = service_template_properties
- #end service_template_properties
-
- def set_service_template_properties(self, value):
- self.service_template_properties = value
- #end set_service_template_properties
-
- @vnc_api.gen.resource_common.ServiceTemplate.id_perms.setter
- def id_perms(self, id_perms):
- """Set id-perms for service-template.
-
- :param id_perms: IdPermsType object
-
- """
- if 'id_perms' not in self._pending_field_updates:
- self._pending_field_updates.add('id_perms')
-
- self._id_perms = id_perms
- #end id_perms
-
- def set_id_perms(self, value):
- self.id_perms = value
- #end set_id_perms
-
- @vnc_api.gen.resource_common.ServiceTemplate.display_name.setter
- def display_name(self, display_name):
- """Set display-name for service-template.
-
- :param display_name: xsd:string object
-
- """
- if 'display_name' not in self._pending_field_updates:
- self._pending_field_updates.add('display_name')
-
- self._display_name = display_name
- #end display_name
-
- def set_display_name(self, value):
- self.display_name = value
- #end set_display_name
-
-
- def get_service_instance_back_refs(self):
- """Return list of all service-instances using this service-template"""
- # if object not created/read from lib can't service
- svr_conn = self._server_conn
- if not svr_conn:
- return None
-
- obj = svr_conn.service_template_read(id = self.uuid, fields = ['service_instance_back_refs'])
- back_refs = getattr(obj, 'service_instance_back_refs', None)
- self.service_instance_back_refs = back_refs
-
- return back_refs
- #end get_service_instance_back_refs
-
-#end class ServiceTemplate
-
-class VirtualIp(vnc_api.gen.resource_common.VirtualIp):
- create_uri = ''
- resource_uri_base = {}
- def __init__(self, name = None, parent_obj = None, virtual_ip_properties = None, id_perms = None, display_name = None, *args, **kwargs):
- pending_fields = ['fq_name', 'parent_type']
-
- self._server_conn = None
-
- if virtual_ip_properties:
- pending_fields.append('virtual_ip_properties')
- if id_perms:
- pending_fields.append('id_perms')
- if display_name:
- pending_fields.append('display_name')
-
- self._pending_field_updates = set(pending_fields)
- self._pending_ref_updates = set([])
-
- super(VirtualIp, self).__init__(name, parent_obj, virtual_ip_properties, id_perms, display_name, *args, **kwargs)
- #end __init__
-
- def get_pending_updates(self):
- return self._pending_field_updates
- #end get_pending_updates
-
- def get_ref_updates(self):
- return self._pending_ref_updates
- #end get_ref_updates
-
- def clear_pending_updates(self):
- self._pending_field_updates = set([])
- self._pending_ref_updates = set([])
- #end clear_pending_updates
-
- def set_server_conn(self, vnc_api_handle):
- self._server_conn = vnc_api_handle
- #end set_server_conn
-
- @classmethod
- def from_dict(cls, **kwargs):
- props_dict = {}
- if 'virtual_ip_properties' in kwargs:
- props_dict['virtual_ip_properties'] = vnc_api.gen.resource_xsd.VirtualIpType(**kwargs['virtual_ip_properties'])
- if 'id_perms' in kwargs:
- props_dict['id_perms'] = vnc_api.gen.resource_xsd.IdPermsType(**kwargs['id_perms'])
- if 'display_name' in kwargs:
- props_dict['display_name'] = kwargs['display_name']
-
- # obj constructor takes only props
- parent_type = kwargs.get('parent_type', None)
- fq_name = kwargs['fq_name']
- props_dict.update({'parent_type': parent_type, 'fq_name': fq_name})
- obj = VirtualIp(fq_name[-1], **props_dict)
- obj.uuid = kwargs['uuid']
- if 'parent_uuid' in kwargs:
- obj.parent_uuid = kwargs['parent_uuid']
-
- # add summary of any children...
-
- # add any specified references...
- if 'loadbalancer_pool_refs' in kwargs:
- obj.loadbalancer_pool_refs = kwargs['loadbalancer_pool_refs']
- if 'virtual_machine_interface_refs' in kwargs:
- obj.virtual_machine_interface_refs = kwargs['virtual_machine_interface_refs']
-
- # and back references but no obj api for it...
-
- return obj
- #end from_dict
-
- @vnc_api.gen.resource_common.VirtualIp.uuid.setter
- def uuid(self, uuid_val):
- self._uuid = uuid_val
- if 'uuid' not in self._pending_field_updates:
- self._pending_field_updates.add('uuid')
- #end uuid
-
- def set_uuid(self, uuid_val):
- self.uuid = uuid_val
- #end set_uuid
-
- @vnc_api.gen.resource_common.VirtualIp.virtual_ip_properties.setter
- def virtual_ip_properties(self, virtual_ip_properties):
- """Set virtual-ip-properties for virtual-ip.
-
- :param virtual_ip_properties: VirtualIpType object
-
- """
- if 'virtual_ip_properties' not in self._pending_field_updates:
- self._pending_field_updates.add('virtual_ip_properties')
-
- self._virtual_ip_properties = virtual_ip_properties
- #end virtual_ip_properties
-
- def set_virtual_ip_properties(self, value):
- self.virtual_ip_properties = value
- #end set_virtual_ip_properties
-
- @vnc_api.gen.resource_common.VirtualIp.id_perms.setter
- def id_perms(self, id_perms):
- """Set id-perms for virtual-ip.
-
- :param id_perms: IdPermsType object
-
- """
- if 'id_perms' not in self._pending_field_updates:
- self._pending_field_updates.add('id_perms')
-
- self._id_perms = id_perms
- #end id_perms
-
- def set_id_perms(self, value):
- self.id_perms = value
- #end set_id_perms
-
- @vnc_api.gen.resource_common.VirtualIp.display_name.setter
- def display_name(self, display_name):
- """Set display-name for virtual-ip.
-
- :param display_name: xsd:string object
-
- """
- if 'display_name' not in self._pending_field_updates:
- self._pending_field_updates.add('display_name')
-
- self._display_name = display_name
- #end display_name
-
- def set_display_name(self, value):
- self.display_name = value
- #end set_display_name
-
- def set_loadbalancer_pool(self, *args, **kwargs):
- """Set loadbalancer-pool for virtual-ip.
-
- :param ref_obj: LoadbalancerPool object
-
- """
- self._pending_field_updates.add('loadbalancer_pool_refs')
- self._pending_ref_updates.discard('loadbalancer_pool_refs')
- super(VirtualIp, self).set_loadbalancer_pool(*args, **kwargs)
-
- #end set_loadbalancer_pool
-
- def add_loadbalancer_pool(self, *args, **kwargs):
- """Add loadbalancer-pool to virtual-ip.
-
- :param ref_obj: LoadbalancerPool object
-
- """
- if 'loadbalancer_pool_refs' not in self._pending_ref_updates|self._pending_field_updates:
- self._pending_ref_updates.add('loadbalancer_pool_refs')
- self._original_loadbalancer_pool_refs = (self.get_loadbalancer_pool_refs() or [])[:]
- super(VirtualIp, self).add_loadbalancer_pool(*args, **kwargs)
- #end add_loadbalancer_pool
-
- def del_loadbalancer_pool(self, *args, **kwargs):
- if 'loadbalancer_pool_refs' not in self._pending_ref_updates:
- self._pending_ref_updates.add('loadbalancer_pool_refs')
- self._original_loadbalancer_pool_refs = (self.get_loadbalancer_pool_refs() or [])[:]
- super(VirtualIp, self).del_loadbalancer_pool(*args, **kwargs)
- #end del_loadbalancer_pool
-
- def set_loadbalancer_pool_list(self, *args, **kwargs):
- """Set loadbalancer-pool list for virtual-ip.
-
- :param ref_obj_list: list of LoadbalancerPool object
-
- """
- self._pending_field_updates.add('loadbalancer_pool_refs')
- self._pending_ref_updates.discard('loadbalancer_pool_refs')
- super(VirtualIp, self).set_loadbalancer_pool_list(*args, **kwargs)
- #end set_loadbalancer_pool_list
-
- def set_virtual_machine_interface(self, *args, **kwargs):
- """Set virtual-machine-interface for virtual-ip.
-
- :param ref_obj: VirtualMachineInterface object
-
- """
- self._pending_field_updates.add('virtual_machine_interface_refs')
- self._pending_ref_updates.discard('virtual_machine_interface_refs')
- super(VirtualIp, self).set_virtual_machine_interface(*args, **kwargs)
-
- #end set_virtual_machine_interface
-
- def add_virtual_machine_interface(self, *args, **kwargs):
- """Add virtual-machine-interface to virtual-ip.
-
- :param ref_obj: VirtualMachineInterface object
-
- """
- if 'virtual_machine_interface_refs' not in self._pending_ref_updates|self._pending_field_updates:
- self._pending_ref_updates.add('virtual_machine_interface_refs')
- self._original_virtual_machine_interface_refs = (self.get_virtual_machine_interface_refs() or [])[:]
- super(VirtualIp, self).add_virtual_machine_interface(*args, **kwargs)
- #end add_virtual_machine_interface
-
- def del_virtual_machine_interface(self, *args, **kwargs):
- if 'virtual_machine_interface_refs' not in self._pending_ref_updates:
- self._pending_ref_updates.add('virtual_machine_interface_refs')
- self._original_virtual_machine_interface_refs = (self.get_virtual_machine_interface_refs() or [])[:]
- super(VirtualIp, self).del_virtual_machine_interface(*args, **kwargs)
- #end del_virtual_machine_interface
-
- def set_virtual_machine_interface_list(self, *args, **kwargs):
- """Set virtual-machine-interface list for virtual-ip.
-
- :param ref_obj_list: list of VirtualMachineInterface object
-
- """
- self._pending_field_updates.add('virtual_machine_interface_refs')
- self._pending_ref_updates.discard('virtual_machine_interface_refs')
- super(VirtualIp, self).set_virtual_machine_interface_list(*args, **kwargs)
- #end set_virtual_machine_interface_list
-
-
-#end class VirtualIp
-
-class LoadbalancerMember(vnc_api.gen.resource_common.LoadbalancerMember):
- create_uri = ''
- resource_uri_base = {}
- def __init__(self, name = None, parent_obj = None, loadbalancer_member_properties = None, id_perms = None, display_name = None, *args, **kwargs):
- pending_fields = ['fq_name', 'parent_type']
-
- self._server_conn = None
-
- if loadbalancer_member_properties:
- pending_fields.append('loadbalancer_member_properties')
- if id_perms:
- pending_fields.append('id_perms')
- if display_name:
- pending_fields.append('display_name')
-
- self._pending_field_updates = set(pending_fields)
- self._pending_ref_updates = set([])
-
- super(LoadbalancerMember, self).__init__(name, parent_obj, loadbalancer_member_properties, id_perms, display_name, *args, **kwargs)
- #end __init__
-
- def get_pending_updates(self):
- return self._pending_field_updates
- #end get_pending_updates
-
- def get_ref_updates(self):
- return self._pending_ref_updates
- #end get_ref_updates
-
- def clear_pending_updates(self):
- self._pending_field_updates = set([])
- self._pending_ref_updates = set([])
- #end clear_pending_updates
-
- def set_server_conn(self, vnc_api_handle):
- self._server_conn = vnc_api_handle
- #end set_server_conn
-
- @classmethod
- def from_dict(cls, **kwargs):
- props_dict = {}
- if 'loadbalancer_member_properties' in kwargs:
- props_dict['loadbalancer_member_properties'] = vnc_api.gen.resource_xsd.LoadbalancerMemberType(**kwargs['loadbalancer_member_properties'])
- if 'id_perms' in kwargs:
- props_dict['id_perms'] = vnc_api.gen.resource_xsd.IdPermsType(**kwargs['id_perms'])
- if 'display_name' in kwargs:
- props_dict['display_name'] = kwargs['display_name']
-
- # obj constructor takes only props
- parent_type = kwargs.get('parent_type', None)
- fq_name = kwargs['fq_name']
- props_dict.update({'parent_type': parent_type, 'fq_name': fq_name})
- obj = LoadbalancerMember(fq_name[-1], **props_dict)
- obj.uuid = kwargs['uuid']
- if 'parent_uuid' in kwargs:
- obj.parent_uuid = kwargs['parent_uuid']
-
- # add summary of any children...
-
- # add any specified references...
-
- # and back references but no obj api for it...
-
- return obj
- #end from_dict
-
- @vnc_api.gen.resource_common.LoadbalancerMember.uuid.setter
- def uuid(self, uuid_val):
- self._uuid = uuid_val
- if 'uuid' not in self._pending_field_updates:
- self._pending_field_updates.add('uuid')
- #end uuid
-
- def set_uuid(self, uuid_val):
- self.uuid = uuid_val
- #end set_uuid
-
- @vnc_api.gen.resource_common.LoadbalancerMember.loadbalancer_member_properties.setter
- def loadbalancer_member_properties(self, loadbalancer_member_properties):
- """Set loadbalancer-member-properties for loadbalancer-member.
-
- :param loadbalancer_member_properties: LoadbalancerMemberType object
-
- """
- if 'loadbalancer_member_properties' not in self._pending_field_updates:
- self._pending_field_updates.add('loadbalancer_member_properties')
-
- self._loadbalancer_member_properties = loadbalancer_member_properties
- #end loadbalancer_member_properties
-
- def set_loadbalancer_member_properties(self, value):
- self.loadbalancer_member_properties = value
- #end set_loadbalancer_member_properties
-
- @vnc_api.gen.resource_common.LoadbalancerMember.id_perms.setter
- def id_perms(self, id_perms):
- """Set id-perms for loadbalancer-member.
-
- :param id_perms: IdPermsType object
-
- """
- if 'id_perms' not in self._pending_field_updates:
- self._pending_field_updates.add('id_perms')
-
- self._id_perms = id_perms
- #end id_perms
-
- def set_id_perms(self, value):
- self.id_perms = value
- #end set_id_perms
-
- @vnc_api.gen.resource_common.LoadbalancerMember.display_name.setter
- def display_name(self, display_name):
- """Set display-name for loadbalancer-member.
-
- :param display_name: xsd:string object
-
- """
- if 'display_name' not in self._pending_field_updates:
- self._pending_field_updates.add('display_name')
-
- self._display_name = display_name
- #end display_name
-
- def set_display_name(self, value):
- self.display_name = value
- #end set_display_name
-
-
-#end class LoadbalancerMember
-
-class SecurityGroup(vnc_api.gen.resource_common.SecurityGroup):
- create_uri = ''
- resource_uri_base = {}
- def __init__(self, name = None, parent_obj = None, security_group_id = None, configured_security_group_id = None, security_group_entries = None, id_perms = None, display_name = None, *args, **kwargs):
- pending_fields = ['fq_name', 'parent_type']
-
- self._server_conn = None
-
- if security_group_id:
- pending_fields.append('security_group_id')
- if configured_security_group_id:
- pending_fields.append('configured_security_group_id')
- if security_group_entries:
- pending_fields.append('security_group_entries')
- if id_perms:
- pending_fields.append('id_perms')
- if display_name:
- pending_fields.append('display_name')
-
- self._pending_field_updates = set(pending_fields)
- self._pending_ref_updates = set([])
-
- super(SecurityGroup, self).__init__(name, parent_obj, security_group_id, configured_security_group_id, security_group_entries, id_perms, display_name, *args, **kwargs)
- #end __init__
-
- def get_pending_updates(self):
- return self._pending_field_updates
- #end get_pending_updates
-
- def get_ref_updates(self):
- return self._pending_ref_updates
- #end get_ref_updates
-
- def clear_pending_updates(self):
- self._pending_field_updates = set([])
- self._pending_ref_updates = set([])
- #end clear_pending_updates
-
- def set_server_conn(self, vnc_api_handle):
- self._server_conn = vnc_api_handle
- #end set_server_conn
-
- @classmethod
- def from_dict(cls, **kwargs):
- props_dict = {}
- if 'security_group_id' in kwargs:
- props_dict['security_group_id'] = kwargs['security_group_id']
- if 'configured_security_group_id' in kwargs:
- props_dict['configured_security_group_id'] = kwargs['configured_security_group_id']
- if 'security_group_entries' in kwargs:
- props_dict['security_group_entries'] = vnc_api.gen.resource_xsd.PolicyEntriesType(**kwargs['security_group_entries'])
- if 'id_perms' in kwargs:
- props_dict['id_perms'] = vnc_api.gen.resource_xsd.IdPermsType(**kwargs['id_perms'])
- if 'display_name' in kwargs:
- props_dict['display_name'] = kwargs['display_name']
-
- # obj constructor takes only props
- parent_type = kwargs.get('parent_type', None)
- fq_name = kwargs['fq_name']
- props_dict.update({'parent_type': parent_type, 'fq_name': fq_name})
- obj = SecurityGroup(fq_name[-1], **props_dict)
- obj.uuid = kwargs['uuid']
- if 'parent_uuid' in kwargs:
- obj.parent_uuid = kwargs['parent_uuid']
-
- # add summary of any children...
- if 'access_control_lists' in kwargs:
- obj.access_control_lists = kwargs['access_control_lists']
-
- # add any specified references...
-
- # and back references but no obj api for it...
- if 'virtual_machine_interface_back_refs' in kwargs:
- obj.virtual_machine_interface_back_refs = kwargs['virtual_machine_interface_back_refs']
-
- return obj
- #end from_dict
-
- @vnc_api.gen.resource_common.SecurityGroup.uuid.setter
- def uuid(self, uuid_val):
- self._uuid = uuid_val
- if 'uuid' not in self._pending_field_updates:
- self._pending_field_updates.add('uuid')
- #end uuid
-
- def set_uuid(self, uuid_val):
- self.uuid = uuid_val
- #end set_uuid
-
- @vnc_api.gen.resource_common.SecurityGroup.security_group_id.setter
- def security_group_id(self, security_group_id):
- """Set security-group-id for security-group.
-
- :param security_group_id: xsd:string object
-
- """
- if 'security_group_id' not in self._pending_field_updates:
- self._pending_field_updates.add('security_group_id')
-
- self._security_group_id = security_group_id
- #end security_group_id
-
- def set_security_group_id(self, value):
- self.security_group_id = value
- #end set_security_group_id
-
- @vnc_api.gen.resource_common.SecurityGroup.configured_security_group_id.setter
- def configured_security_group_id(self, configured_security_group_id):
- """Set configured-security-group-id for security-group.
-
- :param configured_security_group_id: xsd:integer object
-
- """
- if 'configured_security_group_id' not in self._pending_field_updates:
- self._pending_field_updates.add('configured_security_group_id')
-
- self._configured_security_group_id = configured_security_group_id
- #end configured_security_group_id
-
- def set_configured_security_group_id(self, value):
- self.configured_security_group_id = value
- #end set_configured_security_group_id
-
- @vnc_api.gen.resource_common.SecurityGroup.security_group_entries.setter
- def security_group_entries(self, security_group_entries):
- """Set security-group-entries for security-group.
-
- :param security_group_entries: PolicyEntriesType object
-
- """
- if 'security_group_entries' not in self._pending_field_updates:
- self._pending_field_updates.add('security_group_entries')
-
- self._security_group_entries = security_group_entries
- #end security_group_entries
-
- def set_security_group_entries(self, value):
- self.security_group_entries = value
- #end set_security_group_entries
-
- @vnc_api.gen.resource_common.SecurityGroup.id_perms.setter
- def id_perms(self, id_perms):
- """Set id-perms for security-group.
-
- :param id_perms: IdPermsType object
-
- """
- if 'id_perms' not in self._pending_field_updates:
- self._pending_field_updates.add('id_perms')
-
- self._id_perms = id_perms
- #end id_perms
-
- def set_id_perms(self, value):
- self.id_perms = value
- #end set_id_perms
-
- @vnc_api.gen.resource_common.SecurityGroup.display_name.setter
- def display_name(self, display_name):
- """Set display-name for security-group.
-
- :param display_name: xsd:string object
-
- """
- if 'display_name' not in self._pending_field_updates:
- self._pending_field_updates.add('display_name')
-
- self._display_name = display_name
- #end display_name
-
- def set_display_name(self, value):
- self.display_name = value
- #end set_display_name
-
- def get_access_control_lists(self):
- # if object not created/read from lib can't service
- svr_conn = self._server_conn
- if not svr_conn:
- return None
-
- children = super(SecurityGroup, self).get_access_control_lists()
- if not children: # read it for first time
- obj = svr_conn.security_group_read(id = self.uuid, fields = ['access_control_lists'])
- children = getattr(obj, 'access_control_lists', None)
- self.access_control_lists = children
-
- return children
- #end get_access_control_lists
-
-
- def get_virtual_machine_interface_back_refs(self):
- """Return list of all virtual-machine-interfaces using this security-group"""
- # if object not created/read from lib can't service
- svr_conn = self._server_conn
- if not svr_conn:
- return None
-
- obj = svr_conn.security_group_read(id = self.uuid, fields = ['virtual_machine_interface_back_refs'])
- back_refs = getattr(obj, 'virtual_machine_interface_back_refs', None)
- self.virtual_machine_interface_back_refs = back_refs
-
- return back_refs
- #end get_virtual_machine_interface_back_refs
-
-#end class SecurityGroup
-
-class ProviderAttachment(vnc_api.gen.resource_common.ProviderAttachment):
- create_uri = ''
- resource_uri_base = {}
- def __init__(self, name = None, id_perms = None, display_name = None, *args, **kwargs):
- pending_fields = ['fq_name']
-
- self._server_conn = None
-
- if id_perms:
- pending_fields.append('id_perms')
- if display_name:
- pending_fields.append('display_name')
-
- self._pending_field_updates = set(pending_fields)
- self._pending_ref_updates = set([])
-
- super(ProviderAttachment, self).__init__(name, id_perms, display_name, *args, **kwargs)
- #end __init__
-
- def get_pending_updates(self):
- return self._pending_field_updates
- #end get_pending_updates
-
- def get_ref_updates(self):
- return self._pending_ref_updates
- #end get_ref_updates
-
- def clear_pending_updates(self):
- self._pending_field_updates = set([])
- self._pending_ref_updates = set([])
- #end clear_pending_updates
-
- def set_server_conn(self, vnc_api_handle):
- self._server_conn = vnc_api_handle
- #end set_server_conn
-
- @classmethod
- def from_dict(cls, **kwargs):
- props_dict = {}
- if 'id_perms' in kwargs:
- props_dict['id_perms'] = vnc_api.gen.resource_xsd.IdPermsType(**kwargs['id_perms'])
- if 'display_name' in kwargs:
- props_dict['display_name'] = kwargs['display_name']
-
- # obj constructor takes only props
- parent_type = kwargs.get('parent_type', None)
- fq_name = kwargs['fq_name']
- props_dict.update({'parent_type': parent_type, 'fq_name': fq_name})
- obj = ProviderAttachment(fq_name[-1], **props_dict)
- obj.uuid = kwargs['uuid']
- if 'parent_uuid' in kwargs:
- obj.parent_uuid = kwargs['parent_uuid']
-
- # add summary of any children...
-
- # add any specified references...
- if 'virtual_router_refs' in kwargs:
- obj.virtual_router_refs = kwargs['virtual_router_refs']
-
- # and back references but no obj api for it...
-
- return obj
- #end from_dict
-
- @vnc_api.gen.resource_common.ProviderAttachment.uuid.setter
- def uuid(self, uuid_val):
- self._uuid = uuid_val
- if 'uuid' not in self._pending_field_updates:
- self._pending_field_updates.add('uuid')
- #end uuid
-
- def set_uuid(self, uuid_val):
- self.uuid = uuid_val
- #end set_uuid
-
- @vnc_api.gen.resource_common.ProviderAttachment.id_perms.setter
- def id_perms(self, id_perms):
- """Set id-perms for provider-attachment.
-
- :param id_perms: IdPermsType object
-
- """
- if 'id_perms' not in self._pending_field_updates:
- self._pending_field_updates.add('id_perms')
-
- self._id_perms = id_perms
- #end id_perms
-
- def set_id_perms(self, value):
- self.id_perms = value
- #end set_id_perms
-
- @vnc_api.gen.resource_common.ProviderAttachment.display_name.setter
- def display_name(self, display_name):
- """Set display-name for provider-attachment.
-
- :param display_name: xsd:string object
-
- """
- if 'display_name' not in self._pending_field_updates:
- self._pending_field_updates.add('display_name')
-
- self._display_name = display_name
- #end display_name
-
- def set_display_name(self, value):
- self.display_name = value
- #end set_display_name
-
- def set_virtual_router(self, *args, **kwargs):
- """Set virtual-router for provider-attachment.
-
- :param ref_obj: VirtualRouter object
-
- """
- self._pending_field_updates.add('virtual_router_refs')
- self._pending_ref_updates.discard('virtual_router_refs')
- super(ProviderAttachment, self).set_virtual_router(*args, **kwargs)
-
- #end set_virtual_router
-
- def add_virtual_router(self, *args, **kwargs):
- """Add virtual-router to provider-attachment.
-
- :param ref_obj: VirtualRouter object
-
- """
- if 'virtual_router_refs' not in self._pending_ref_updates|self._pending_field_updates:
- self._pending_ref_updates.add('virtual_router_refs')
- self._original_virtual_router_refs = (self.get_virtual_router_refs() or [])[:]
- super(ProviderAttachment, self).add_virtual_router(*args, **kwargs)
- #end add_virtual_router
-
- def del_virtual_router(self, *args, **kwargs):
- if 'virtual_router_refs' not in self._pending_ref_updates:
- self._pending_ref_updates.add('virtual_router_refs')
- self._original_virtual_router_refs = (self.get_virtual_router_refs() or [])[:]
- super(ProviderAttachment, self).del_virtual_router(*args, **kwargs)
- #end del_virtual_router
-
- def set_virtual_router_list(self, *args, **kwargs):
- """Set virtual-router list for provider-attachment.
-
- :param ref_obj_list: list of VirtualRouter object
-
- """
- self._pending_field_updates.add('virtual_router_refs')
- self._pending_ref_updates.discard('virtual_router_refs')
- super(ProviderAttachment, self).set_virtual_router_list(*args, **kwargs)
- #end set_virtual_router_list
-
-
-#end class ProviderAttachment
-
-class VirtualMachineInterface(vnc_api.gen.resource_common.VirtualMachineInterface):
- create_uri = ''
- resource_uri_base = {}
- def __init__(self, name = None, parent_obj = None, virtual_machine_interface_mac_addresses = None, virtual_machine_interface_dhcp_option_list = None, virtual_machine_interface_host_routes = None, virtual_machine_interface_allowed_address_pairs = None, vrf_assign_table = None, virtual_machine_interface_device_owner = None, virtual_machine_interface_properties = None, id_perms = None, display_name = None, *args, **kwargs):
- pending_fields = ['fq_name', 'parent_type']
-
- self._server_conn = None
-
- if virtual_machine_interface_mac_addresses:
- pending_fields.append('virtual_machine_interface_mac_addresses')
- if virtual_machine_interface_dhcp_option_list:
- pending_fields.append('virtual_machine_interface_dhcp_option_list')
- if virtual_machine_interface_host_routes:
- pending_fields.append('virtual_machine_interface_host_routes')
- if virtual_machine_interface_allowed_address_pairs:
- pending_fields.append('virtual_machine_interface_allowed_address_pairs')
- if vrf_assign_table:
- pending_fields.append('vrf_assign_table')
- if virtual_machine_interface_device_owner:
- pending_fields.append('virtual_machine_interface_device_owner')
- if virtual_machine_interface_properties:
- pending_fields.append('virtual_machine_interface_properties')
- if id_perms:
- pending_fields.append('id_perms')
- if display_name:
- pending_fields.append('display_name')
-
- self._pending_field_updates = set(pending_fields)
- self._pending_ref_updates = set([])
-
- super(VirtualMachineInterface, self).__init__(name, parent_obj, virtual_machine_interface_mac_addresses, virtual_machine_interface_dhcp_option_list, virtual_machine_interface_host_routes, virtual_machine_interface_allowed_address_pairs, vrf_assign_table, virtual_machine_interface_device_owner, virtual_machine_interface_properties, id_perms, display_name, *args, **kwargs)
- #end __init__
-
- def get_pending_updates(self):
- return self._pending_field_updates
- #end get_pending_updates
-
- def get_ref_updates(self):
- return self._pending_ref_updates
- #end get_ref_updates
-
- def clear_pending_updates(self):
- self._pending_field_updates = set([])
- self._pending_ref_updates = set([])
- #end clear_pending_updates
-
- def set_server_conn(self, vnc_api_handle):
- self._server_conn = vnc_api_handle
- #end set_server_conn
-
- @classmethod
- def from_dict(cls, **kwargs):
- props_dict = {}
- if 'virtual_machine_interface_mac_addresses' in kwargs:
- props_dict['virtual_machine_interface_mac_addresses'] = vnc_api.gen.resource_xsd.MacAddressesType(**kwargs['virtual_machine_interface_mac_addresses'])
- if 'virtual_machine_interface_dhcp_option_list' in kwargs:
- props_dict['virtual_machine_interface_dhcp_option_list'] = vnc_api.gen.resource_xsd.DhcpOptionsListType(**kwargs['virtual_machine_interface_dhcp_option_list'])
- if 'virtual_machine_interface_host_routes' in kwargs:
- props_dict['virtual_machine_interface_host_routes'] = vnc_api.gen.resource_xsd.RouteTableType(**kwargs['virtual_machine_interface_host_routes'])
- if 'virtual_machine_interface_allowed_address_pairs' in kwargs:
- props_dict['virtual_machine_interface_allowed_address_pairs'] = vnc_api.gen.resource_xsd.AllowedAddressPairs(**kwargs['virtual_machine_interface_allowed_address_pairs'])
- if 'vrf_assign_table' in kwargs:
- props_dict['vrf_assign_table'] = vnc_api.gen.resource_xsd.VrfAssignTableType(**kwargs['vrf_assign_table'])
- if 'virtual_machine_interface_device_owner' in kwargs:
- props_dict['virtual_machine_interface_device_owner'] = kwargs['virtual_machine_interface_device_owner']
- if 'virtual_machine_interface_properties' in kwargs:
- props_dict['virtual_machine_interface_properties'] = vnc_api.gen.resource_xsd.VirtualMachineInterfacePropertiesType(**kwargs['virtual_machine_interface_properties'])
- if 'id_perms' in kwargs:
- props_dict['id_perms'] = vnc_api.gen.resource_xsd.IdPermsType(**kwargs['id_perms'])
- if 'display_name' in kwargs:
- props_dict['display_name'] = kwargs['display_name']
-
- # obj constructor takes only props
- parent_type = kwargs.get('parent_type', None)
- fq_name = kwargs['fq_name']
- props_dict.update({'parent_type': parent_type, 'fq_name': fq_name})
- obj = VirtualMachineInterface(fq_name[-1], **props_dict)
- obj.uuid = kwargs['uuid']
- if 'parent_uuid' in kwargs:
- obj.parent_uuid = kwargs['parent_uuid']
-
- # add summary of any children...
-
- # add any specified references...
- if 'qos_forwarding_class_refs' in kwargs:
- obj.qos_forwarding_class_refs = kwargs['qos_forwarding_class_refs']
- if 'security_group_refs' in kwargs:
- obj.security_group_refs = kwargs['security_group_refs']
- if 'virtual_machine_interface_refs' in kwargs:
- obj.virtual_machine_interface_refs = kwargs['virtual_machine_interface_refs']
- if 'virtual_machine_refs' in kwargs:
- obj.virtual_machine_refs = kwargs['virtual_machine_refs']
- if 'virtual_network_refs' in kwargs:
- obj.virtual_network_refs = kwargs['virtual_network_refs']
- if 'routing_instance_refs' in kwargs:
- obj.routing_instance_refs = kwargs['routing_instance_refs']
- for ref in obj.routing_instance_refs:
- ref['attr'] = vnc_api.gen.resource_xsd.PolicyBasedForwardingRuleType(**ref['attr'])
- if 'interface_route_table_refs' in kwargs:
- obj.interface_route_table_refs = kwargs['interface_route_table_refs']
-
- # and back references but no obj api for it...
- if 'virtual_machine_interface_back_refs' in kwargs:
- obj.virtual_machine_interface_back_refs = kwargs['virtual_machine_interface_back_refs']
- if 'instance_ip_back_refs' in kwargs:
- obj.instance_ip_back_refs = kwargs['instance_ip_back_refs']
- if 'subnet_back_refs' in kwargs:
- obj.subnet_back_refs = kwargs['subnet_back_refs']
- if 'floating_ip_back_refs' in kwargs:
- obj.floating_ip_back_refs = kwargs['floating_ip_back_refs']
- if 'logical_interface_back_refs' in kwargs:
- obj.logical_interface_back_refs = kwargs['logical_interface_back_refs']
- if 'customer_attachment_back_refs' in kwargs:
- obj.customer_attachment_back_refs = kwargs['customer_attachment_back_refs']
- if 'logical_router_back_refs' in kwargs:
- obj.logical_router_back_refs = kwargs['logical_router_back_refs']
- if 'loadbalancer_pool_back_refs' in kwargs:
- obj.loadbalancer_pool_back_refs = kwargs['loadbalancer_pool_back_refs']
- if 'virtual_ip_back_refs' in kwargs:
- obj.virtual_ip_back_refs = kwargs['virtual_ip_back_refs']
-
- return obj
- #end from_dict
-
- @vnc_api.gen.resource_common.VirtualMachineInterface.uuid.setter
- def uuid(self, uuid_val):
- self._uuid = uuid_val
- if 'uuid' not in self._pending_field_updates:
- self._pending_field_updates.add('uuid')
- #end uuid
-
- def set_uuid(self, uuid_val):
- self.uuid = uuid_val
- #end set_uuid
-
- @vnc_api.gen.resource_common.VirtualMachineInterface.virtual_machine_interface_mac_addresses.setter
- def virtual_machine_interface_mac_addresses(self, virtual_machine_interface_mac_addresses):
- """Set virtual-machine-interface-mac-addresses for virtual-machine-interface.
-
- :param virtual_machine_interface_mac_addresses: MacAddressesType object
-
- """
- if 'virtual_machine_interface_mac_addresses' not in self._pending_field_updates:
- self._pending_field_updates.add('virtual_machine_interface_mac_addresses')
-
- self._virtual_machine_interface_mac_addresses = virtual_machine_interface_mac_addresses
- #end virtual_machine_interface_mac_addresses
-
- def set_virtual_machine_interface_mac_addresses(self, value):
- self.virtual_machine_interface_mac_addresses = value
- #end set_virtual_machine_interface_mac_addresses
-
- @vnc_api.gen.resource_common.VirtualMachineInterface.virtual_machine_interface_dhcp_option_list.setter
- def virtual_machine_interface_dhcp_option_list(self, virtual_machine_interface_dhcp_option_list):
- """Set virtual-machine-interface-dhcp-option-list for virtual-machine-interface.
-
- :param virtual_machine_interface_dhcp_option_list: DhcpOptionsListType object
-
- """
- if 'virtual_machine_interface_dhcp_option_list' not in self._pending_field_updates:
- self._pending_field_updates.add('virtual_machine_interface_dhcp_option_list')
-
- self._virtual_machine_interface_dhcp_option_list = virtual_machine_interface_dhcp_option_list
- #end virtual_machine_interface_dhcp_option_list
-
- def set_virtual_machine_interface_dhcp_option_list(self, value):
- self.virtual_machine_interface_dhcp_option_list = value
- #end set_virtual_machine_interface_dhcp_option_list
-
- @vnc_api.gen.resource_common.VirtualMachineInterface.virtual_machine_interface_host_routes.setter
- def virtual_machine_interface_host_routes(self, virtual_machine_interface_host_routes):
- """Set virtual-machine-interface-host-routes for virtual-machine-interface.
-
- :param virtual_machine_interface_host_routes: RouteTableType object
-
- """
- if 'virtual_machine_interface_host_routes' not in self._pending_field_updates:
- self._pending_field_updates.add('virtual_machine_interface_host_routes')
-
- self._virtual_machine_interface_host_routes = virtual_machine_interface_host_routes
- #end virtual_machine_interface_host_routes
-
- def set_virtual_machine_interface_host_routes(self, value):
- self.virtual_machine_interface_host_routes = value
- #end set_virtual_machine_interface_host_routes
-
- @vnc_api.gen.resource_common.VirtualMachineInterface.virtual_machine_interface_allowed_address_pairs.setter
- def virtual_machine_interface_allowed_address_pairs(self, virtual_machine_interface_allowed_address_pairs):
- """Set virtual-machine-interface-allowed-address-pairs for virtual-machine-interface.
-
- :param virtual_machine_interface_allowed_address_pairs: AllowedAddressPairs object
-
- """
- if 'virtual_machine_interface_allowed_address_pairs' not in self._pending_field_updates:
- self._pending_field_updates.add('virtual_machine_interface_allowed_address_pairs')
-
- self._virtual_machine_interface_allowed_address_pairs = virtual_machine_interface_allowed_address_pairs
- #end virtual_machine_interface_allowed_address_pairs
-
- def set_virtual_machine_interface_allowed_address_pairs(self, value):
- self.virtual_machine_interface_allowed_address_pairs = value
- #end set_virtual_machine_interface_allowed_address_pairs
-
- @vnc_api.gen.resource_common.VirtualMachineInterface.vrf_assign_table.setter
- def vrf_assign_table(self, vrf_assign_table):
- """Set vrf-assign-table for virtual-machine-interface.
-
- :param vrf_assign_table: VrfAssignTableType object
-
- """
- if 'vrf_assign_table' not in self._pending_field_updates:
- self._pending_field_updates.add('vrf_assign_table')
-
- self._vrf_assign_table = vrf_assign_table
- #end vrf_assign_table
-
- def set_vrf_assign_table(self, value):
- self.vrf_assign_table = value
- #end set_vrf_assign_table
-
- @vnc_api.gen.resource_common.VirtualMachineInterface.virtual_machine_interface_device_owner.setter
- def virtual_machine_interface_device_owner(self, virtual_machine_interface_device_owner):
- """Set virtual-machine-interface-device-owner for virtual-machine-interface.
-
- :param virtual_machine_interface_device_owner: xsd:string object
-
- """
- if 'virtual_machine_interface_device_owner' not in self._pending_field_updates:
- self._pending_field_updates.add('virtual_machine_interface_device_owner')
-
- self._virtual_machine_interface_device_owner = virtual_machine_interface_device_owner
- #end virtual_machine_interface_device_owner
-
- def set_virtual_machine_interface_device_owner(self, value):
- self.virtual_machine_interface_device_owner = value
- #end set_virtual_machine_interface_device_owner
-
- @vnc_api.gen.resource_common.VirtualMachineInterface.virtual_machine_interface_properties.setter
- def virtual_machine_interface_properties(self, virtual_machine_interface_properties):
- """Set virtual-machine-interface-properties for virtual-machine-interface.
-
- :param virtual_machine_interface_properties: VirtualMachineInterfacePropertiesType object
-
- """
- if 'virtual_machine_interface_properties' not in self._pending_field_updates:
- self._pending_field_updates.add('virtual_machine_interface_properties')
-
- self._virtual_machine_interface_properties = virtual_machine_interface_properties
- #end virtual_machine_interface_properties
-
- def set_virtual_machine_interface_properties(self, value):
- self.virtual_machine_interface_properties = value
- #end set_virtual_machine_interface_properties
-
- @vnc_api.gen.resource_common.VirtualMachineInterface.id_perms.setter
- def id_perms(self, id_perms):
- """Set id-perms for virtual-machine-interface.
-
- :param id_perms: IdPermsType object
-
- """
- if 'id_perms' not in self._pending_field_updates:
- self._pending_field_updates.add('id_perms')
-
- self._id_perms = id_perms
- #end id_perms
-
- def set_id_perms(self, value):
- self.id_perms = value
- #end set_id_perms
-
- @vnc_api.gen.resource_common.VirtualMachineInterface.display_name.setter
- def display_name(self, display_name):
- """Set display-name for virtual-machine-interface.
-
- :param display_name: xsd:string object
-
- """
- if 'display_name' not in self._pending_field_updates:
- self._pending_field_updates.add('display_name')
-
- self._display_name = display_name
- #end display_name
-
- def set_display_name(self, value):
- self.display_name = value
- #end set_display_name
-
- def set_qos_forwarding_class(self, *args, **kwargs):
- """Set qos-forwarding-class for virtual-machine-interface.
-
- :param ref_obj: QosForwardingClass object
-
- """
- self._pending_field_updates.add('qos_forwarding_class_refs')
- self._pending_ref_updates.discard('qos_forwarding_class_refs')
- super(VirtualMachineInterface, self).set_qos_forwarding_class(*args, **kwargs)
-
- #end set_qos_forwarding_class
-
- def add_qos_forwarding_class(self, *args, **kwargs):
- """Add qos-forwarding-class to virtual-machine-interface.
-
- :param ref_obj: QosForwardingClass object
-
- """
- if 'qos_forwarding_class_refs' not in self._pending_ref_updates|self._pending_field_updates:
- self._pending_ref_updates.add('qos_forwarding_class_refs')
- self._original_qos_forwarding_class_refs = (self.get_qos_forwarding_class_refs() or [])[:]
- super(VirtualMachineInterface, self).add_qos_forwarding_class(*args, **kwargs)
- #end add_qos_forwarding_class
-
- def del_qos_forwarding_class(self, *args, **kwargs):
- if 'qos_forwarding_class_refs' not in self._pending_ref_updates:
- self._pending_ref_updates.add('qos_forwarding_class_refs')
- self._original_qos_forwarding_class_refs = (self.get_qos_forwarding_class_refs() or [])[:]
- super(VirtualMachineInterface, self).del_qos_forwarding_class(*args, **kwargs)
- #end del_qos_forwarding_class
-
- def set_qos_forwarding_class_list(self, *args, **kwargs):
- """Set qos-forwarding-class list for virtual-machine-interface.
-
- :param ref_obj_list: list of QosForwardingClass object
-
- """
- self._pending_field_updates.add('qos_forwarding_class_refs')
- self._pending_ref_updates.discard('qos_forwarding_class_refs')
- super(VirtualMachineInterface, self).set_qos_forwarding_class_list(*args, **kwargs)
- #end set_qos_forwarding_class_list
-
- def set_security_group(self, *args, **kwargs):
- """Set security-group for virtual-machine-interface.
-
- :param ref_obj: SecurityGroup object
-
- """
- self._pending_field_updates.add('security_group_refs')
- self._pending_ref_updates.discard('security_group_refs')
- super(VirtualMachineInterface, self).set_security_group(*args, **kwargs)
-
- #end set_security_group
-
- def add_security_group(self, *args, **kwargs):
- """Add security-group to virtual-machine-interface.
-
- :param ref_obj: SecurityGroup object
-
- """
- if 'security_group_refs' not in self._pending_ref_updates|self._pending_field_updates:
- self._pending_ref_updates.add('security_group_refs')
- self._original_security_group_refs = (self.get_security_group_refs() or [])[:]
- super(VirtualMachineInterface, self).add_security_group(*args, **kwargs)
- #end add_security_group
-
- def del_security_group(self, *args, **kwargs):
- if 'security_group_refs' not in self._pending_ref_updates:
- self._pending_ref_updates.add('security_group_refs')
- self._original_security_group_refs = (self.get_security_group_refs() or [])[:]
- super(VirtualMachineInterface, self).del_security_group(*args, **kwargs)
- #end del_security_group
-
- def set_security_group_list(self, *args, **kwargs):
- """Set security-group list for virtual-machine-interface.
-
- :param ref_obj_list: list of SecurityGroup object
-
- """
- self._pending_field_updates.add('security_group_refs')
- self._pending_ref_updates.discard('security_group_refs')
- super(VirtualMachineInterface, self).set_security_group_list(*args, **kwargs)
- #end set_security_group_list
-
- def set_virtual_machine_interface(self, *args, **kwargs):
- """Set virtual-machine-interface for virtual-machine-interface.
-
- :param ref_obj: VirtualMachineInterface object
-
- """
- self._pending_field_updates.add('virtual_machine_interface_refs')
- self._pending_ref_updates.discard('virtual_machine_interface_refs')
- super(VirtualMachineInterface, self).set_virtual_machine_interface(*args, **kwargs)
-
- #end set_virtual_machine_interface
-
- def add_virtual_machine_interface(self, *args, **kwargs):
- """Add virtual-machine-interface to virtual-machine-interface.
-
- :param ref_obj: VirtualMachineInterface object
-
- """
- if 'virtual_machine_interface_refs' not in self._pending_ref_updates|self._pending_field_updates:
- self._pending_ref_updates.add('virtual_machine_interface_refs')
- self._original_virtual_machine_interface_refs = (self.get_virtual_machine_interface_refs() or [])[:]
- super(VirtualMachineInterface, self).add_virtual_machine_interface(*args, **kwargs)
- #end add_virtual_machine_interface
-
- def del_virtual_machine_interface(self, *args, **kwargs):
- if 'virtual_machine_interface_refs' not in self._pending_ref_updates:
- self._pending_ref_updates.add('virtual_machine_interface_refs')
- self._original_virtual_machine_interface_refs = (self.get_virtual_machine_interface_refs() or [])[:]
- super(VirtualMachineInterface, self).del_virtual_machine_interface(*args, **kwargs)
- #end del_virtual_machine_interface
-
- def set_virtual_machine_interface_list(self, *args, **kwargs):
- """Set virtual-machine-interface list for virtual-machine-interface.
-
- :param ref_obj_list: list of VirtualMachineInterface object
-
- """
- self._pending_field_updates.add('virtual_machine_interface_refs')
- self._pending_ref_updates.discard('virtual_machine_interface_refs')
- super(VirtualMachineInterface, self).set_virtual_machine_interface_list(*args, **kwargs)
- #end set_virtual_machine_interface_list
-
- def set_virtual_machine(self, *args, **kwargs):
- """Set virtual-machine for virtual-machine-interface.
-
- :param ref_obj: VirtualMachine object
-
- """
- self._pending_field_updates.add('virtual_machine_refs')
- self._pending_ref_updates.discard('virtual_machine_refs')
- super(VirtualMachineInterface, self).set_virtual_machine(*args, **kwargs)
-
- #end set_virtual_machine
-
- def add_virtual_machine(self, *args, **kwargs):
- """Add virtual-machine to virtual-machine-interface.
-
- :param ref_obj: VirtualMachine object
-
- """
- if 'virtual_machine_refs' not in self._pending_ref_updates|self._pending_field_updates:
- self._pending_ref_updates.add('virtual_machine_refs')
- self._original_virtual_machine_refs = (self.get_virtual_machine_refs() or [])[:]
- super(VirtualMachineInterface, self).add_virtual_machine(*args, **kwargs)
- #end add_virtual_machine
-
- def del_virtual_machine(self, *args, **kwargs):
- if 'virtual_machine_refs' not in self._pending_ref_updates:
- self._pending_ref_updates.add('virtual_machine_refs')
- self._original_virtual_machine_refs = (self.get_virtual_machine_refs() or [])[:]
- super(VirtualMachineInterface, self).del_virtual_machine(*args, **kwargs)
- #end del_virtual_machine
-
- def set_virtual_machine_list(self, *args, **kwargs):
- """Set virtual-machine list for virtual-machine-interface.
-
- :param ref_obj_list: list of VirtualMachine object
-
- """
- self._pending_field_updates.add('virtual_machine_refs')
- self._pending_ref_updates.discard('virtual_machine_refs')
- super(VirtualMachineInterface, self).set_virtual_machine_list(*args, **kwargs)
- #end set_virtual_machine_list
-
- def set_virtual_network(self, *args, **kwargs):
- """Set virtual-network for virtual-machine-interface.
-
- :param ref_obj: VirtualNetwork object
-
- """
- self._pending_field_updates.add('virtual_network_refs')
- self._pending_ref_updates.discard('virtual_network_refs')
- super(VirtualMachineInterface, self).set_virtual_network(*args, **kwargs)
-
- #end set_virtual_network
-
- def add_virtual_network(self, *args, **kwargs):
- """Add virtual-network to virtual-machine-interface.
-
- :param ref_obj: VirtualNetwork object
-
- """
- if 'virtual_network_refs' not in self._pending_ref_updates|self._pending_field_updates:
- self._pending_ref_updates.add('virtual_network_refs')
- self._original_virtual_network_refs = (self.get_virtual_network_refs() or [])[:]
- super(VirtualMachineInterface, self).add_virtual_network(*args, **kwargs)
- #end add_virtual_network
-
- def del_virtual_network(self, *args, **kwargs):
- if 'virtual_network_refs' not in self._pending_ref_updates:
- self._pending_ref_updates.add('virtual_network_refs')
- self._original_virtual_network_refs = (self.get_virtual_network_refs() or [])[:]
- super(VirtualMachineInterface, self).del_virtual_network(*args, **kwargs)
- #end del_virtual_network
-
- def set_virtual_network_list(self, *args, **kwargs):
- """Set virtual-network list for virtual-machine-interface.
-
- :param ref_obj_list: list of VirtualNetwork object
-
- """
- self._pending_field_updates.add('virtual_network_refs')
- self._pending_ref_updates.discard('virtual_network_refs')
- super(VirtualMachineInterface, self).set_virtual_network_list(*args, **kwargs)
- #end set_virtual_network_list
-
- def set_routing_instance(self, *args, **kwargs):
- """Set routing-instance for virtual-machine-interface.
-
- :param ref_obj: RoutingInstance object
- :param ref_data: PolicyBasedForwardingRuleType object
-
- """
- self._pending_field_updates.add('routing_instance_refs')
- self._pending_ref_updates.discard('routing_instance_refs')
- super(VirtualMachineInterface, self).set_routing_instance(*args, **kwargs)
-
- #end set_routing_instance
-
- def add_routing_instance(self, *args, **kwargs):
- """Add routing-instance to virtual-machine-interface.
-
- :param ref_obj: RoutingInstance object
- :param ref_data: PolicyBasedForwardingRuleType object
-
- """
- if 'routing_instance_refs' not in self._pending_ref_updates|self._pending_field_updates:
- self._pending_ref_updates.add('routing_instance_refs')
- self._original_routing_instance_refs = (self.get_routing_instance_refs() or [])[:]
- super(VirtualMachineInterface, self).add_routing_instance(*args, **kwargs)
- #end add_routing_instance
-
- def del_routing_instance(self, *args, **kwargs):
- if 'routing_instance_refs' not in self._pending_ref_updates:
- self._pending_ref_updates.add('routing_instance_refs')
- self._original_routing_instance_refs = (self.get_routing_instance_refs() or [])[:]
- super(VirtualMachineInterface, self).del_routing_instance(*args, **kwargs)
- #end del_routing_instance
-
- def set_routing_instance_list(self, *args, **kwargs):
- """Set routing-instance list for virtual-machine-interface.
-
- :param ref_obj_list: list of RoutingInstance object
- :param ref_data_list: list of PolicyBasedForwardingRuleType summary
-
- """
- self._pending_field_updates.add('routing_instance_refs')
- self._pending_ref_updates.discard('routing_instance_refs')
- super(VirtualMachineInterface, self).set_routing_instance_list(*args, **kwargs)
- #end set_routing_instance_list
-
- def set_interface_route_table(self, *args, **kwargs):
- """Set interface-route-table for virtual-machine-interface.
-
- :param ref_obj: InterfaceRouteTable object
-
- """
- self._pending_field_updates.add('interface_route_table_refs')
- self._pending_ref_updates.discard('interface_route_table_refs')
- super(VirtualMachineInterface, self).set_interface_route_table(*args, **kwargs)
-
- #end set_interface_route_table
-
- def add_interface_route_table(self, *args, **kwargs):
- """Add interface-route-table to virtual-machine-interface.
-
- :param ref_obj: InterfaceRouteTable object
-
- """
- if 'interface_route_table_refs' not in self._pending_ref_updates|self._pending_field_updates:
- self._pending_ref_updates.add('interface_route_table_refs')
- self._original_interface_route_table_refs = (self.get_interface_route_table_refs() or [])[:]
- super(VirtualMachineInterface, self).add_interface_route_table(*args, **kwargs)
- #end add_interface_route_table
-
- def del_interface_route_table(self, *args, **kwargs):
- if 'interface_route_table_refs' not in self._pending_ref_updates:
- self._pending_ref_updates.add('interface_route_table_refs')
- self._original_interface_route_table_refs = (self.get_interface_route_table_refs() or [])[:]
- super(VirtualMachineInterface, self).del_interface_route_table(*args, **kwargs)
- #end del_interface_route_table
-
- def set_interface_route_table_list(self, *args, **kwargs):
- """Set interface-route-table list for virtual-machine-interface.
-
- :param ref_obj_list: list of InterfaceRouteTable object
-
- """
- self._pending_field_updates.add('interface_route_table_refs')
- self._pending_ref_updates.discard('interface_route_table_refs')
- super(VirtualMachineInterface, self).set_interface_route_table_list(*args, **kwargs)
- #end set_interface_route_table_list
-
-
- def get_virtual_machine_interface_back_refs(self):
- """Return list of all virtual-machine-interfaces using this virtual-machine-interface"""
- # if object not created/read from lib can't service
- svr_conn = self._server_conn
- if not svr_conn:
- return None
-
- obj = svr_conn.virtual_machine_interface_read(id = self.uuid, fields = ['virtual_machine_interface_back_refs'])
- back_refs = getattr(obj, 'virtual_machine_interface_back_refs', None)
- self.virtual_machine_interface_back_refs = back_refs
-
- return back_refs
- #end get_virtual_machine_interface_back_refs
-
- def get_instance_ip_back_refs(self):
- """Return list of all instance-ips using this virtual-machine-interface"""
- # if object not created/read from lib can't service
- svr_conn = self._server_conn
- if not svr_conn:
- return None
-
- obj = svr_conn.virtual_machine_interface_read(id = self.uuid, fields = ['instance_ip_back_refs'])
- back_refs = getattr(obj, 'instance_ip_back_refs', None)
- self.instance_ip_back_refs = back_refs
-
- return back_refs
- #end get_instance_ip_back_refs
-
- def get_subnet_back_refs(self):
- """Return list of all subnets using this virtual-machine-interface"""
- # if object not created/read from lib can't service
- svr_conn = self._server_conn
- if not svr_conn:
- return None
-
- obj = svr_conn.virtual_machine_interface_read(id = self.uuid, fields = ['subnet_back_refs'])
- back_refs = getattr(obj, 'subnet_back_refs', None)
- self.subnet_back_refs = back_refs
-
- return back_refs
- #end get_subnet_back_refs
-
- def get_floating_ip_back_refs(self):
- """Return list of all floating-ips using this virtual-machine-interface"""
- # if object not created/read from lib can't service
- svr_conn = self._server_conn
- if not svr_conn:
- return None
-
- obj = svr_conn.virtual_machine_interface_read(id = self.uuid, fields = ['floating_ip_back_refs'])
- back_refs = getattr(obj, 'floating_ip_back_refs', None)
- self.floating_ip_back_refs = back_refs
-
- return back_refs
- #end get_floating_ip_back_refs
-
- def get_logical_interface_back_refs(self):
- """Return list of all logical-interfaces using this virtual-machine-interface"""
- # if object not created/read from lib can't service
- svr_conn = self._server_conn
- if not svr_conn:
- return None
-
- obj = svr_conn.virtual_machine_interface_read(id = self.uuid, fields = ['logical_interface_back_refs'])
- back_refs = getattr(obj, 'logical_interface_back_refs', None)
- self.logical_interface_back_refs = back_refs
-
- return back_refs
- #end get_logical_interface_back_refs
-
- def get_customer_attachment_back_refs(self):
- """Return list of all customer-attachments using this virtual-machine-interface"""
- # if object not created/read from lib can't service
- svr_conn = self._server_conn
- if not svr_conn:
- return None
-
- obj = svr_conn.virtual_machine_interface_read(id = self.uuid, fields = ['customer_attachment_back_refs'])
- back_refs = getattr(obj, 'customer_attachment_back_refs', None)
- self.customer_attachment_back_refs = back_refs
-
- return back_refs
- #end get_customer_attachment_back_refs
-
- def get_logical_router_back_refs(self):
- """Return list of all logical-routers using this virtual-machine-interface"""
- # if object not created/read from lib can't service
- svr_conn = self._server_conn
- if not svr_conn:
- return None
-
- obj = svr_conn.virtual_machine_interface_read(id = self.uuid, fields = ['logical_router_back_refs'])
- back_refs = getattr(obj, 'logical_router_back_refs', None)
- self.logical_router_back_refs = back_refs
-
- return back_refs
- #end get_logical_router_back_refs
-
- def get_loadbalancer_pool_back_refs(self):
- """Return list of all loadbalancer-pools using this virtual-machine-interface"""
- # if object not created/read from lib can't service
- svr_conn = self._server_conn
- if not svr_conn:
- return None
-
- obj = svr_conn.virtual_machine_interface_read(id = self.uuid, fields = ['loadbalancer_pool_back_refs'])
- back_refs = getattr(obj, 'loadbalancer_pool_back_refs', None)
- self.loadbalancer_pool_back_refs = back_refs
-
- return back_refs
- #end get_loadbalancer_pool_back_refs
-
- def get_virtual_ip_back_refs(self):
- """Return list of all virtual-ips using this virtual-machine-interface"""
- # if object not created/read from lib can't service
- svr_conn = self._server_conn
- if not svr_conn:
- return None
-
- obj = svr_conn.virtual_machine_interface_read(id = self.uuid, fields = ['virtual_ip_back_refs'])
- back_refs = getattr(obj, 'virtual_ip_back_refs', None)
- self.virtual_ip_back_refs = back_refs
-
- return back_refs
- #end get_virtual_ip_back_refs
-
-#end class VirtualMachineInterface
-
-class LoadbalancerHealthmonitor(vnc_api.gen.resource_common.LoadbalancerHealthmonitor):
- create_uri = ''
- resource_uri_base = {}
- def __init__(self, name = None, parent_obj = None, loadbalancer_healthmonitor_properties = None, id_perms = None, display_name = None, *args, **kwargs):
- pending_fields = ['fq_name', 'parent_type']
-
- self._server_conn = None
-
- if loadbalancer_healthmonitor_properties:
- pending_fields.append('loadbalancer_healthmonitor_properties')
- if id_perms:
- pending_fields.append('id_perms')
- if display_name:
- pending_fields.append('display_name')
-
- self._pending_field_updates = set(pending_fields)
- self._pending_ref_updates = set([])
-
- super(LoadbalancerHealthmonitor, self).__init__(name, parent_obj, loadbalancer_healthmonitor_properties, id_perms, display_name, *args, **kwargs)
- #end __init__
-
- def get_pending_updates(self):
- return self._pending_field_updates
- #end get_pending_updates
-
- def get_ref_updates(self):
- return self._pending_ref_updates
- #end get_ref_updates
-
- def clear_pending_updates(self):
- self._pending_field_updates = set([])
- self._pending_ref_updates = set([])
- #end clear_pending_updates
-
- def set_server_conn(self, vnc_api_handle):
- self._server_conn = vnc_api_handle
- #end set_server_conn
-
- @classmethod
- def from_dict(cls, **kwargs):
- props_dict = {}
- if 'loadbalancer_healthmonitor_properties' in kwargs:
- props_dict['loadbalancer_healthmonitor_properties'] = vnc_api.gen.resource_xsd.LoadbalancerHealthmonitorType(**kwargs['loadbalancer_healthmonitor_properties'])
- if 'id_perms' in kwargs:
- props_dict['id_perms'] = vnc_api.gen.resource_xsd.IdPermsType(**kwargs['id_perms'])
- if 'display_name' in kwargs:
- props_dict['display_name'] = kwargs['display_name']
-
- # obj constructor takes only props
- parent_type = kwargs.get('parent_type', None)
- fq_name = kwargs['fq_name']
- props_dict.update({'parent_type': parent_type, 'fq_name': fq_name})
- obj = LoadbalancerHealthmonitor(fq_name[-1], **props_dict)
- obj.uuid = kwargs['uuid']
- if 'parent_uuid' in kwargs:
- obj.parent_uuid = kwargs['parent_uuid']
-
- # add summary of any children...
-
- # add any specified references...
-
- # and back references but no obj api for it...
- if 'loadbalancer_pool_back_refs' in kwargs:
- obj.loadbalancer_pool_back_refs = kwargs['loadbalancer_pool_back_refs']
-
- return obj
- #end from_dict
-
- @vnc_api.gen.resource_common.LoadbalancerHealthmonitor.uuid.setter
- def uuid(self, uuid_val):
- self._uuid = uuid_val
- if 'uuid' not in self._pending_field_updates:
- self._pending_field_updates.add('uuid')
- #end uuid
-
- def set_uuid(self, uuid_val):
- self.uuid = uuid_val
- #end set_uuid
-
- @vnc_api.gen.resource_common.LoadbalancerHealthmonitor.loadbalancer_healthmonitor_properties.setter
- def loadbalancer_healthmonitor_properties(self, loadbalancer_healthmonitor_properties):
- """Set loadbalancer-healthmonitor-properties for loadbalancer-healthmonitor.
-
- :param loadbalancer_healthmonitor_properties: LoadbalancerHealthmonitorType object
-
- """
- if 'loadbalancer_healthmonitor_properties' not in self._pending_field_updates:
- self._pending_field_updates.add('loadbalancer_healthmonitor_properties')
-
- self._loadbalancer_healthmonitor_properties = loadbalancer_healthmonitor_properties
- #end loadbalancer_healthmonitor_properties
-
- def set_loadbalancer_healthmonitor_properties(self, value):
- self.loadbalancer_healthmonitor_properties = value
- #end set_loadbalancer_healthmonitor_properties
-
- @vnc_api.gen.resource_common.LoadbalancerHealthmonitor.id_perms.setter
- def id_perms(self, id_perms):
- """Set id-perms for loadbalancer-healthmonitor.
-
- :param id_perms: IdPermsType object
-
- """
- if 'id_perms' not in self._pending_field_updates:
- self._pending_field_updates.add('id_perms')
-
- self._id_perms = id_perms
- #end id_perms
-
- def set_id_perms(self, value):
- self.id_perms = value
- #end set_id_perms
-
- @vnc_api.gen.resource_common.LoadbalancerHealthmonitor.display_name.setter
- def display_name(self, display_name):
- """Set display-name for loadbalancer-healthmonitor.
-
- :param display_name: xsd:string object
-
- """
- if 'display_name' not in self._pending_field_updates:
- self._pending_field_updates.add('display_name')
-
- self._display_name = display_name
- #end display_name
-
- def set_display_name(self, value):
- self.display_name = value
- #end set_display_name
-
-
- def get_loadbalancer_pool_back_refs(self):
- """Return list of all loadbalancer-pools using this loadbalancer-healthmonitor"""
- # if object not created/read from lib can't service
- svr_conn = self._server_conn
- if not svr_conn:
- return None
-
- obj = svr_conn.loadbalancer_healthmonitor_read(id = self.uuid, fields = ['loadbalancer_pool_back_refs'])
- back_refs = getattr(obj, 'loadbalancer_pool_back_refs', None)
- self.loadbalancer_pool_back_refs = back_refs
-
- return back_refs
- #end get_loadbalancer_pool_back_refs
-
-#end class LoadbalancerHealthmonitor
-
-class VirtualNetwork(vnc_api.gen.resource_common.VirtualNetwork):
- create_uri = ''
- resource_uri_base = {}
- def __init__(self, name = None, parent_obj = None, virtual_network_properties = None, virtual_network_network_id = None, route_target_list = None, router_external = None, is_shared = None, external_ipam = None, flood_unknown_unicast = None, id_perms = None, display_name = None, *args, **kwargs):
- pending_fields = ['fq_name', 'parent_type']
-
- self._server_conn = None
-
- if virtual_network_properties:
- pending_fields.append('virtual_network_properties')
- if virtual_network_network_id:
- pending_fields.append('virtual_network_network_id')
- if route_target_list:
- pending_fields.append('route_target_list')
- if router_external:
- pending_fields.append('router_external')
- if is_shared:
- pending_fields.append('is_shared')
- if external_ipam:
- pending_fields.append('external_ipam')
- if flood_unknown_unicast:
- pending_fields.append('flood_unknown_unicast')
- if id_perms:
- pending_fields.append('id_perms')
- if display_name:
- pending_fields.append('display_name')
-
- self._pending_field_updates = set(pending_fields)
- self._pending_ref_updates = set([])
-
- super(VirtualNetwork, self).__init__(name, parent_obj, virtual_network_properties, virtual_network_network_id, route_target_list, router_external, is_shared, external_ipam, flood_unknown_unicast, id_perms, display_name, *args, **kwargs)
- #end __init__
-
- def get_pending_updates(self):
- return self._pending_field_updates
- #end get_pending_updates
-
- def get_ref_updates(self):
- return self._pending_ref_updates
- #end get_ref_updates
-
- def clear_pending_updates(self):
- self._pending_field_updates = set([])
- self._pending_ref_updates = set([])
- #end clear_pending_updates
-
- def set_server_conn(self, vnc_api_handle):
- self._server_conn = vnc_api_handle
- #end set_server_conn
-
- @classmethod
- def from_dict(cls, **kwargs):
- props_dict = {}
- if 'virtual_network_properties' in kwargs:
- props_dict['virtual_network_properties'] = vnc_api.gen.resource_xsd.VirtualNetworkType(**kwargs['virtual_network_properties'])
- if 'virtual_network_network_id' in kwargs:
- props_dict['virtual_network_network_id'] = kwargs['virtual_network_network_id']
- if 'route_target_list' in kwargs:
- props_dict['route_target_list'] = vnc_api.gen.resource_xsd.RouteTargetList(**kwargs['route_target_list'])
- if 'router_external' in kwargs:
- props_dict['router_external'] = kwargs['router_external']
- if 'is_shared' in kwargs:
- props_dict['is_shared'] = kwargs['is_shared']
- if 'external_ipam' in kwargs:
- props_dict['external_ipam'] = kwargs['external_ipam']
- if 'flood_unknown_unicast' in kwargs:
- props_dict['flood_unknown_unicast'] = kwargs['flood_unknown_unicast']
- if 'id_perms' in kwargs:
- props_dict['id_perms'] = vnc_api.gen.resource_xsd.IdPermsType(**kwargs['id_perms'])
- if 'display_name' in kwargs:
- props_dict['display_name'] = kwargs['display_name']
-
- # obj constructor takes only props
- parent_type = kwargs.get('parent_type', None)
- fq_name = kwargs['fq_name']
- props_dict.update({'parent_type': parent_type, 'fq_name': fq_name})
- obj = VirtualNetwork(fq_name[-1], **props_dict)
- obj.uuid = kwargs['uuid']
- if 'parent_uuid' in kwargs:
- obj.parent_uuid = kwargs['parent_uuid']
-
- # add summary of any children...
- if 'access_control_lists' in kwargs:
- obj.access_control_lists = kwargs['access_control_lists']
- if 'floating_ip_pools' in kwargs:
- obj.floating_ip_pools = kwargs['floating_ip_pools']
- if 'routing_instances' in kwargs:
- obj.routing_instances = kwargs['routing_instances']
-
- # add any specified references...
- if 'qos_forwarding_class_refs' in kwargs:
- obj.qos_forwarding_class_refs = kwargs['qos_forwarding_class_refs']
- if 'network_ipam_refs' in kwargs:
- obj.network_ipam_refs = kwargs['network_ipam_refs']
- for ref in obj.network_ipam_refs:
- ref['attr'] = vnc_api.gen.resource_xsd.VnSubnetsType(**ref['attr'])
- if 'network_policy_refs' in kwargs:
- obj.network_policy_refs = kwargs['network_policy_refs']
- for ref in obj.network_policy_refs:
- ref['attr'] = vnc_api.gen.resource_xsd.VirtualNetworkPolicyType(**ref['attr'])
- if 'route_table_refs' in kwargs:
- obj.route_table_refs = kwargs['route_table_refs']
-
- # and back references but no obj api for it...
- if 'virtual_machine_interface_back_refs' in kwargs:
- obj.virtual_machine_interface_back_refs = kwargs['virtual_machine_interface_back_refs']
- if 'instance_ip_back_refs' in kwargs:
- obj.instance_ip_back_refs = kwargs['instance_ip_back_refs']
- if 'physical_router_back_refs' in kwargs:
- obj.physical_router_back_refs = kwargs['physical_router_back_refs']
- if 'logical_router_back_refs' in kwargs:
- obj.logical_router_back_refs = kwargs['logical_router_back_refs']
-
- return obj
- #end from_dict
-
- @vnc_api.gen.resource_common.VirtualNetwork.uuid.setter
- def uuid(self, uuid_val):
- self._uuid = uuid_val
- if 'uuid' not in self._pending_field_updates:
- self._pending_field_updates.add('uuid')
- #end uuid
-
- def set_uuid(self, uuid_val):
- self.uuid = uuid_val
- #end set_uuid
-
- @vnc_api.gen.resource_common.VirtualNetwork.virtual_network_properties.setter
- def virtual_network_properties(self, virtual_network_properties):
- """Set virtual-network-properties for virtual-network.
-
- :param virtual_network_properties: VirtualNetworkType object
-
- """
- if 'virtual_network_properties' not in self._pending_field_updates:
- self._pending_field_updates.add('virtual_network_properties')
-
- self._virtual_network_properties = virtual_network_properties
- #end virtual_network_properties
-
- def set_virtual_network_properties(self, value):
- self.virtual_network_properties = value
- #end set_virtual_network_properties
-
- @vnc_api.gen.resource_common.VirtualNetwork.virtual_network_network_id.setter
- def virtual_network_network_id(self, virtual_network_network_id):
- """Set virtual-network-network-id for virtual-network.
-
- :param virtual_network_network_id: xsd:integer object
-
- """
- if 'virtual_network_network_id' not in self._pending_field_updates:
- self._pending_field_updates.add('virtual_network_network_id')
-
- self._virtual_network_network_id = virtual_network_network_id
- #end virtual_network_network_id
-
- def set_virtual_network_network_id(self, value):
- self.virtual_network_network_id = value
- #end set_virtual_network_network_id
-
- @vnc_api.gen.resource_common.VirtualNetwork.route_target_list.setter
- def route_target_list(self, route_target_list):
- """Set route-target-list for virtual-network.
-
- :param route_target_list: RouteTargetList object
-
- """
- if 'route_target_list' not in self._pending_field_updates:
- self._pending_field_updates.add('route_target_list')
-
- self._route_target_list = route_target_list
- #end route_target_list
-
- def set_route_target_list(self, value):
- self.route_target_list = value
- #end set_route_target_list
-
- @vnc_api.gen.resource_common.VirtualNetwork.router_external.setter
- def router_external(self, router_external):
- """Set router-external for virtual-network.
-
- :param router_external: xsd:boolean object
-
- """
- if 'router_external' not in self._pending_field_updates:
- self._pending_field_updates.add('router_external')
-
- self._router_external = router_external
- #end router_external
-
- def set_router_external(self, value):
- self.router_external = value
- #end set_router_external
-
- @vnc_api.gen.resource_common.VirtualNetwork.is_shared.setter
- def is_shared(self, is_shared):
- """Set is-shared for virtual-network.
-
- :param is_shared: xsd:boolean object
-
- """
- if 'is_shared' not in self._pending_field_updates:
- self._pending_field_updates.add('is_shared')
-
- self._is_shared = is_shared
- #end is_shared
-
- def set_is_shared(self, value):
- self.is_shared = value
- #end set_is_shared
-
- @vnc_api.gen.resource_common.VirtualNetwork.external_ipam.setter
- def external_ipam(self, external_ipam):
- """Set external-ipam for virtual-network.
-
- :param external_ipam: xsd:boolean object
-
- """
- if 'external_ipam' not in self._pending_field_updates:
- self._pending_field_updates.add('external_ipam')
-
- self._external_ipam = external_ipam
- #end external_ipam
-
- def set_external_ipam(self, value):
- self.external_ipam = value
- #end set_external_ipam
-
- @vnc_api.gen.resource_common.VirtualNetwork.flood_unknown_unicast.setter
- def flood_unknown_unicast(self, flood_unknown_unicast):
- """Set flood-unknown-unicast for virtual-network.
-
- :param flood_unknown_unicast: xsd:boolean object
-
- """
- if 'flood_unknown_unicast' not in self._pending_field_updates:
- self._pending_field_updates.add('flood_unknown_unicast')
-
- self._flood_unknown_unicast = flood_unknown_unicast
- #end flood_unknown_unicast
-
- def set_flood_unknown_unicast(self, value):
- self.flood_unknown_unicast = value
- #end set_flood_unknown_unicast
-
- @vnc_api.gen.resource_common.VirtualNetwork.id_perms.setter
- def id_perms(self, id_perms):
- """Set id-perms for virtual-network.
-
- :param id_perms: IdPermsType object
-
- """
- if 'id_perms' not in self._pending_field_updates:
- self._pending_field_updates.add('id_perms')
-
- self._id_perms = id_perms
- #end id_perms
-
- def set_id_perms(self, value):
- self.id_perms = value
- #end set_id_perms
-
- @vnc_api.gen.resource_common.VirtualNetwork.display_name.setter
- def display_name(self, display_name):
- """Set display-name for virtual-network.
-
- :param display_name: xsd:string object
-
- """
- if 'display_name' not in self._pending_field_updates:
- self._pending_field_updates.add('display_name')
-
- self._display_name = display_name
- #end display_name
-
- def set_display_name(self, value):
- self.display_name = value
- #end set_display_name
-
- def set_qos_forwarding_class(self, *args, **kwargs):
- """Set qos-forwarding-class for virtual-network.
-
- :param ref_obj: QosForwardingClass object
-
- """
- self._pending_field_updates.add('qos_forwarding_class_refs')
- self._pending_ref_updates.discard('qos_forwarding_class_refs')
- super(VirtualNetwork, self).set_qos_forwarding_class(*args, **kwargs)
-
- #end set_qos_forwarding_class
-
- def add_qos_forwarding_class(self, *args, **kwargs):
- """Add qos-forwarding-class to virtual-network.
-
- :param ref_obj: QosForwardingClass object
-
- """
- if 'qos_forwarding_class_refs' not in self._pending_ref_updates|self._pending_field_updates:
- self._pending_ref_updates.add('qos_forwarding_class_refs')
- self._original_qos_forwarding_class_refs = (self.get_qos_forwarding_class_refs() or [])[:]
- super(VirtualNetwork, self).add_qos_forwarding_class(*args, **kwargs)
- #end add_qos_forwarding_class
-
- def del_qos_forwarding_class(self, *args, **kwargs):
- if 'qos_forwarding_class_refs' not in self._pending_ref_updates:
- self._pending_ref_updates.add('qos_forwarding_class_refs')
- self._original_qos_forwarding_class_refs = (self.get_qos_forwarding_class_refs() or [])[:]
- super(VirtualNetwork, self).del_qos_forwarding_class(*args, **kwargs)
- #end del_qos_forwarding_class
-
- def set_qos_forwarding_class_list(self, *args, **kwargs):
- """Set qos-forwarding-class list for virtual-network.
-
- :param ref_obj_list: list of QosForwardingClass object
-
- """
- self._pending_field_updates.add('qos_forwarding_class_refs')
- self._pending_ref_updates.discard('qos_forwarding_class_refs')
- super(VirtualNetwork, self).set_qos_forwarding_class_list(*args, **kwargs)
- #end set_qos_forwarding_class_list
-
- def set_network_ipam(self, *args, **kwargs):
- """Set network-ipam for virtual-network.
-
- :param ref_obj: NetworkIpam object
- :param ref_data: VnSubnetsType object
-
- """
- self._pending_field_updates.add('network_ipam_refs')
- self._pending_ref_updates.discard('network_ipam_refs')
- super(VirtualNetwork, self).set_network_ipam(*args, **kwargs)
-
- #end set_network_ipam
-
- def add_network_ipam(self, *args, **kwargs):
- """Add network-ipam to virtual-network.
-
- :param ref_obj: NetworkIpam object
- :param ref_data: VnSubnetsType object
-
- """
- if 'network_ipam_refs' not in self._pending_ref_updates|self._pending_field_updates:
- self._pending_ref_updates.add('network_ipam_refs')
- self._original_network_ipam_refs = (self.get_network_ipam_refs() or [])[:]
- super(VirtualNetwork, self).add_network_ipam(*args, **kwargs)
- #end add_network_ipam
-
- def del_network_ipam(self, *args, **kwargs):
- if 'network_ipam_refs' not in self._pending_ref_updates:
- self._pending_ref_updates.add('network_ipam_refs')
- self._original_network_ipam_refs = (self.get_network_ipam_refs() or [])[:]
- super(VirtualNetwork, self).del_network_ipam(*args, **kwargs)
- #end del_network_ipam
-
- def set_network_ipam_list(self, *args, **kwargs):
- """Set network-ipam list for virtual-network.
-
- :param ref_obj_list: list of NetworkIpam object
- :param ref_data_list: list of VnSubnetsType summary
-
- """
- self._pending_field_updates.add('network_ipam_refs')
- self._pending_ref_updates.discard('network_ipam_refs')
- super(VirtualNetwork, self).set_network_ipam_list(*args, **kwargs)
- #end set_network_ipam_list
-
- def set_network_policy(self, *args, **kwargs):
- """Set network-policy for virtual-network.
-
- :param ref_obj: NetworkPolicy object
- :param ref_data: VirtualNetworkPolicyType object
-
- """
- self._pending_field_updates.add('network_policy_refs')
- self._pending_ref_updates.discard('network_policy_refs')
- super(VirtualNetwork, self).set_network_policy(*args, **kwargs)
-
- #end set_network_policy
-
- def add_network_policy(self, *args, **kwargs):
- """Add network-policy to virtual-network.
-
- :param ref_obj: NetworkPolicy object
- :param ref_data: VirtualNetworkPolicyType object
-
- """
- if 'network_policy_refs' not in self._pending_ref_updates|self._pending_field_updates:
- self._pending_ref_updates.add('network_policy_refs')
- self._original_network_policy_refs = (self.get_network_policy_refs() or [])[:]
- super(VirtualNetwork, self).add_network_policy(*args, **kwargs)
- #end add_network_policy
-
- def del_network_policy(self, *args, **kwargs):
- if 'network_policy_refs' not in self._pending_ref_updates:
- self._pending_ref_updates.add('network_policy_refs')
- self._original_network_policy_refs = (self.get_network_policy_refs() or [])[:]
- super(VirtualNetwork, self).del_network_policy(*args, **kwargs)
- #end del_network_policy
-
- def set_network_policy_list(self, *args, **kwargs):
- """Set network-policy list for virtual-network.
-
- :param ref_obj_list: list of NetworkPolicy object
- :param ref_data_list: list of VirtualNetworkPolicyType summary
-
- """
- self._pending_field_updates.add('network_policy_refs')
- self._pending_ref_updates.discard('network_policy_refs')
- super(VirtualNetwork, self).set_network_policy_list(*args, **kwargs)
- #end set_network_policy_list
-
- def set_route_table(self, *args, **kwargs):
- """Set route-table for virtual-network.
-
- :param ref_obj: RouteTable object
-
- """
- self._pending_field_updates.add('route_table_refs')
- self._pending_ref_updates.discard('route_table_refs')
- super(VirtualNetwork, self).set_route_table(*args, **kwargs)
-
- #end set_route_table
-
- def add_route_table(self, *args, **kwargs):
- """Add route-table to virtual-network.
-
- :param ref_obj: RouteTable object
-
- """
- if 'route_table_refs' not in self._pending_ref_updates|self._pending_field_updates:
- self._pending_ref_updates.add('route_table_refs')
- self._original_route_table_refs = (self.get_route_table_refs() or [])[:]
- super(VirtualNetwork, self).add_route_table(*args, **kwargs)
- #end add_route_table
-
- def del_route_table(self, *args, **kwargs):
- if 'route_table_refs' not in self._pending_ref_updates:
- self._pending_ref_updates.add('route_table_refs')
- self._original_route_table_refs = (self.get_route_table_refs() or [])[:]
- super(VirtualNetwork, self).del_route_table(*args, **kwargs)
- #end del_route_table
-
- def set_route_table_list(self, *args, **kwargs):
- """Set route-table list for virtual-network.
-
- :param ref_obj_list: list of RouteTable object
-
- """
- self._pending_field_updates.add('route_table_refs')
- self._pending_ref_updates.discard('route_table_refs')
- super(VirtualNetwork, self).set_route_table_list(*args, **kwargs)
- #end set_route_table_list
-
- def get_access_control_lists(self):
- # if object not created/read from lib can't service
- svr_conn = self._server_conn
- if not svr_conn:
- return None
-
- children = super(VirtualNetwork, self).get_access_control_lists()
- if not children: # read it for first time
- obj = svr_conn.virtual_network_read(id = self.uuid, fields = ['access_control_lists'])
- children = getattr(obj, 'access_control_lists', None)
- self.access_control_lists = children
-
- return children
- #end get_access_control_lists
-
- def get_floating_ip_pools(self):
- # if object not created/read from lib can't service
- svr_conn = self._server_conn
- if not svr_conn:
- return None
-
- children = super(VirtualNetwork, self).get_floating_ip_pools()
- if not children: # read it for first time
- obj = svr_conn.virtual_network_read(id = self.uuid, fields = ['floating_ip_pools'])
- children = getattr(obj, 'floating_ip_pools', None)
- self.floating_ip_pools = children
-
- return children
- #end get_floating_ip_pools
-
- def get_routing_instances(self):
- # if object not created/read from lib can't service
- svr_conn = self._server_conn
- if not svr_conn:
- return None
-
- children = super(VirtualNetwork, self).get_routing_instances()
- if not children: # read it for first time
- obj = svr_conn.virtual_network_read(id = self.uuid, fields = ['routing_instances'])
- children = getattr(obj, 'routing_instances', None)
- self.routing_instances = children
-
- return children
- #end get_routing_instances
-
-
- def get_virtual_machine_interface_back_refs(self):
- """Return list of all virtual-machine-interfaces using this virtual-network"""
- # if object not created/read from lib can't service
- svr_conn = self._server_conn
- if not svr_conn:
- return None
-
- obj = svr_conn.virtual_network_read(id = self.uuid, fields = ['virtual_machine_interface_back_refs'])
- back_refs = getattr(obj, 'virtual_machine_interface_back_refs', None)
- self.virtual_machine_interface_back_refs = back_refs
-
- return back_refs
- #end get_virtual_machine_interface_back_refs
-
- def get_instance_ip_back_refs(self):
- """Return list of all instance-ips using this virtual-network"""
- # if object not created/read from lib can't service
- svr_conn = self._server_conn
- if not svr_conn:
- return None
-
- obj = svr_conn.virtual_network_read(id = self.uuid, fields = ['instance_ip_back_refs'])
- back_refs = getattr(obj, 'instance_ip_back_refs', None)
- self.instance_ip_back_refs = back_refs
-
- return back_refs
- #end get_instance_ip_back_refs
-
- def get_physical_router_back_refs(self):
- """Return list of all physical-routers using this virtual-network"""
- # if object not created/read from lib can't service
- svr_conn = self._server_conn
- if not svr_conn:
- return None
-
- obj = svr_conn.virtual_network_read(id = self.uuid, fields = ['physical_router_back_refs'])
- back_refs = getattr(obj, 'physical_router_back_refs', None)
- self.physical_router_back_refs = back_refs
-
- return back_refs
- #end get_physical_router_back_refs
-
- def get_logical_router_back_refs(self):
- """Return list of all logical-routers using this virtual-network"""
- # if object not created/read from lib can't service
- svr_conn = self._server_conn
- if not svr_conn:
- return None
-
- obj = svr_conn.virtual_network_read(id = self.uuid, fields = ['logical_router_back_refs'])
- back_refs = getattr(obj, 'logical_router_back_refs', None)
- self.logical_router_back_refs = back_refs
-
- return back_refs
- #end get_logical_router_back_refs
-
-#end class VirtualNetwork
-
-class Project(vnc_api.gen.resource_common.Project):
- create_uri = ''
- resource_uri_base = {}
- def __init__(self, name = None, parent_obj = None, quota = None, id_perms = None, display_name = None, *args, **kwargs):
- pending_fields = ['fq_name', 'parent_type']
-
- self._server_conn = None
-
- if quota:
- pending_fields.append('quota')
- if id_perms:
- pending_fields.append('id_perms')
- if display_name:
- pending_fields.append('display_name')
-
- self._pending_field_updates = set(pending_fields)
- self._pending_ref_updates = set([])
-
- super(Project, self).__init__(name, parent_obj, quota, id_perms, display_name, *args, **kwargs)
- #end __init__
-
- def get_pending_updates(self):
- return self._pending_field_updates
- #end get_pending_updates
-
- def get_ref_updates(self):
- return self._pending_ref_updates
- #end get_ref_updates
-
- def clear_pending_updates(self):
- self._pending_field_updates = set([])
- self._pending_ref_updates = set([])
- #end clear_pending_updates
-
- def set_server_conn(self, vnc_api_handle):
- self._server_conn = vnc_api_handle
- #end set_server_conn
-
- @classmethod
- def from_dict(cls, **kwargs):
- props_dict = {}
- if 'quota' in kwargs:
- props_dict['quota'] = vnc_api.gen.resource_xsd.QuotaType(**kwargs['quota'])
- if 'id_perms' in kwargs:
- props_dict['id_perms'] = vnc_api.gen.resource_xsd.IdPermsType(**kwargs['id_perms'])
- if 'display_name' in kwargs:
- props_dict['display_name'] = kwargs['display_name']
-
- # obj constructor takes only props
- parent_type = kwargs.get('parent_type', None)
- fq_name = kwargs['fq_name']
- props_dict.update({'parent_type': parent_type, 'fq_name': fq_name})
- obj = Project(fq_name[-1], **props_dict)
- obj.uuid = kwargs['uuid']
- if 'parent_uuid' in kwargs:
- obj.parent_uuid = kwargs['parent_uuid']
-
- # add summary of any children...
- if 'security_groups' in kwargs:
- obj.security_groups = kwargs['security_groups']
- if 'virtual_networks' in kwargs:
- obj.virtual_networks = kwargs['virtual_networks']
- if 'qos_queues' in kwargs:
- obj.qos_queues = kwargs['qos_queues']
- if 'qos_forwarding_classs' in kwargs:
- obj.qos_forwarding_classs = kwargs['qos_forwarding_classs']
- if 'network_ipams' in kwargs:
- obj.network_ipams = kwargs['network_ipams']
- if 'network_policys' in kwargs:
- obj.network_policys = kwargs['network_policys']
- if 'virtual_machine_interfaces' in kwargs:
- obj.virtual_machine_interfaces = kwargs['virtual_machine_interfaces']
- if 'service_instances' in kwargs:
- obj.service_instances = kwargs['service_instances']
- if 'route_tables' in kwargs:
- obj.route_tables = kwargs['route_tables']
- if 'interface_route_tables' in kwargs:
- obj.interface_route_tables = kwargs['interface_route_tables']
- if 'logical_routers' in kwargs:
- obj.logical_routers = kwargs['logical_routers']
- if 'loadbalancer_pools' in kwargs:
- obj.loadbalancer_pools = kwargs['loadbalancer_pools']
- if 'loadbalancer_healthmonitors' in kwargs:
- obj.loadbalancer_healthmonitors = kwargs['loadbalancer_healthmonitors']
- if 'virtual_ips' in kwargs:
- obj.virtual_ips = kwargs['virtual_ips']
-
- # add any specified references...
- if 'namespace_refs' in kwargs:
- obj.namespace_refs = kwargs['namespace_refs']
- for ref in obj.namespace_refs:
- ref['attr'] = vnc_api.gen.resource_xsd.SubnetType(**ref['attr'])
- if 'floating_ip_pool_refs' in kwargs:
- obj.floating_ip_pool_refs = kwargs['floating_ip_pool_refs']
-
- # and back references but no obj api for it...
- if 'floating_ip_back_refs' in kwargs:
- obj.floating_ip_back_refs = kwargs['floating_ip_back_refs']
-
- return obj
- #end from_dict
-
- @vnc_api.gen.resource_common.Project.uuid.setter
- def uuid(self, uuid_val):
- self._uuid = uuid_val
- if 'uuid' not in self._pending_field_updates:
- self._pending_field_updates.add('uuid')
- #end uuid
-
- def set_uuid(self, uuid_val):
- self.uuid = uuid_val
- #end set_uuid
-
- @vnc_api.gen.resource_common.Project.quota.setter
- def quota(self, quota):
- """Set quota for project.
-
- :param quota: QuotaType object
-
- """
- if 'quota' not in self._pending_field_updates:
- self._pending_field_updates.add('quota')
-
- self._quota = quota
- #end quota
-
- def set_quota(self, value):
- self.quota = value
- #end set_quota
-
- @vnc_api.gen.resource_common.Project.id_perms.setter
- def id_perms(self, id_perms):
- """Set id-perms for project.
-
- :param id_perms: IdPermsType object
-
- """
- if 'id_perms' not in self._pending_field_updates:
- self._pending_field_updates.add('id_perms')
-
- self._id_perms = id_perms
- #end id_perms
-
- def set_id_perms(self, value):
- self.id_perms = value
- #end set_id_perms
-
- @vnc_api.gen.resource_common.Project.display_name.setter
- def display_name(self, display_name):
- """Set display-name for project.
-
- :param display_name: xsd:string object
-
- """
- if 'display_name' not in self._pending_field_updates:
- self._pending_field_updates.add('display_name')
-
- self._display_name = display_name
- #end display_name
-
- def set_display_name(self, value):
- self.display_name = value
- #end set_display_name
-
- def set_namespace(self, *args, **kwargs):
- """Set namespace for project.
-
- :param ref_obj: Namespace object
- :param ref_data: SubnetType object
-
- """
- self._pending_field_updates.add('namespace_refs')
- self._pending_ref_updates.discard('namespace_refs')
- super(Project, self).set_namespace(*args, **kwargs)
-
- #end set_namespace
-
- def add_namespace(self, *args, **kwargs):
- """Add namespace to project.
-
- :param ref_obj: Namespace object
- :param ref_data: SubnetType object
-
- """
- if 'namespace_refs' not in self._pending_ref_updates|self._pending_field_updates:
- self._pending_ref_updates.add('namespace_refs')
- self._original_namespace_refs = (self.get_namespace_refs() or [])[:]
- super(Project, self).add_namespace(*args, **kwargs)
- #end add_namespace
-
- def del_namespace(self, *args, **kwargs):
- if 'namespace_refs' not in self._pending_ref_updates:
- self._pending_ref_updates.add('namespace_refs')
- self._original_namespace_refs = (self.get_namespace_refs() or [])[:]
- super(Project, self).del_namespace(*args, **kwargs)
- #end del_namespace
-
- def set_namespace_list(self, *args, **kwargs):
- """Set namespace list for project.
-
- :param ref_obj_list: list of Namespace object
- :param ref_data_list: list of SubnetType summary
-
- """
- self._pending_field_updates.add('namespace_refs')
- self._pending_ref_updates.discard('namespace_refs')
- super(Project, self).set_namespace_list(*args, **kwargs)
- #end set_namespace_list
-
- def set_floating_ip_pool(self, *args, **kwargs):
- """Set floating-ip-pool for project.
-
- :param ref_obj: FloatingIpPool object
-
- """
- self._pending_field_updates.add('floating_ip_pool_refs')
- self._pending_ref_updates.discard('floating_ip_pool_refs')
- super(Project, self).set_floating_ip_pool(*args, **kwargs)
-
- #end set_floating_ip_pool
-
- def add_floating_ip_pool(self, *args, **kwargs):
- """Add floating-ip-pool to project.
-
- :param ref_obj: FloatingIpPool object
-
- """
- if 'floating_ip_pool_refs' not in self._pending_ref_updates|self._pending_field_updates:
- self._pending_ref_updates.add('floating_ip_pool_refs')
- self._original_floating_ip_pool_refs = (self.get_floating_ip_pool_refs() or [])[:]
- super(Project, self).add_floating_ip_pool(*args, **kwargs)
- #end add_floating_ip_pool
-
- def del_floating_ip_pool(self, *args, **kwargs):
- if 'floating_ip_pool_refs' not in self._pending_ref_updates:
- self._pending_ref_updates.add('floating_ip_pool_refs')
- self._original_floating_ip_pool_refs = (self.get_floating_ip_pool_refs() or [])[:]
- super(Project, self).del_floating_ip_pool(*args, **kwargs)
- #end del_floating_ip_pool
-
- def set_floating_ip_pool_list(self, *args, **kwargs):
- """Set floating-ip-pool list for project.
-
- :param ref_obj_list: list of FloatingIpPool object
-
- """
- self._pending_field_updates.add('floating_ip_pool_refs')
- self._pending_ref_updates.discard('floating_ip_pool_refs')
- super(Project, self).set_floating_ip_pool_list(*args, **kwargs)
- #end set_floating_ip_pool_list
-
- def get_security_groups(self):
- # if object not created/read from lib can't service
- svr_conn = self._server_conn
- if not svr_conn:
- return None
-
- children = super(Project, self).get_security_groups()
- if not children: # read it for first time
- obj = svr_conn.project_read(id = self.uuid, fields = ['security_groups'])
- children = getattr(obj, 'security_groups', None)
- self.security_groups = children
-
- return children
- #end get_security_groups
-
- def get_virtual_networks(self):
- # if object not created/read from lib can't service
- svr_conn = self._server_conn
- if not svr_conn:
- return None
-
- children = super(Project, self).get_virtual_networks()
- if not children: # read it for first time
- obj = svr_conn.project_read(id = self.uuid, fields = ['virtual_networks'])
- children = getattr(obj, 'virtual_networks', None)
- self.virtual_networks = children
-
- return children
- #end get_virtual_networks
-
- def get_qos_queues(self):
- # if object not created/read from lib can't service
- svr_conn = self._server_conn
- if not svr_conn:
- return None
-
- children = super(Project, self).get_qos_queues()
- if not children: # read it for first time
- obj = svr_conn.project_read(id = self.uuid, fields = ['qos_queues'])
- children = getattr(obj, 'qos_queues', None)
- self.qos_queues = children
-
- return children
- #end get_qos_queues
-
- def get_qos_forwarding_classs(self):
- # if object not created/read from lib can't service
- svr_conn = self._server_conn
- if not svr_conn:
- return None
-
- children = super(Project, self).get_qos_forwarding_classs()
- if not children: # read it for first time
- obj = svr_conn.project_read(id = self.uuid, fields = ['qos_forwarding_classs'])
- children = getattr(obj, 'qos_forwarding_classs', None)
- self.qos_forwarding_classs = children
-
- return children
- #end get_qos_forwarding_classs
-
- def get_network_ipams(self):
- # if object not created/read from lib can't service
- svr_conn = self._server_conn
- if not svr_conn:
- return None
-
- children = super(Project, self).get_network_ipams()
- if not children: # read it for first time
- obj = svr_conn.project_read(id = self.uuid, fields = ['network_ipams'])
- children = getattr(obj, 'network_ipams', None)
- self.network_ipams = children
-
- return children
- #end get_network_ipams
-
- def get_network_policys(self):
- # if object not created/read from lib can't service
- svr_conn = self._server_conn
- if not svr_conn:
- return None
-
- children = super(Project, self).get_network_policys()
- if not children: # read it for first time
- obj = svr_conn.project_read(id = self.uuid, fields = ['network_policys'])
- children = getattr(obj, 'network_policys', None)
- self.network_policys = children
-
- return children
- #end get_network_policys
-
- def get_virtual_machine_interfaces(self):
- # if object not created/read from lib can't service
- svr_conn = self._server_conn
- if not svr_conn:
- return None
-
- children = super(Project, self).get_virtual_machine_interfaces()
- if not children: # read it for first time
- obj = svr_conn.project_read(id = self.uuid, fields = ['virtual_machine_interfaces'])
- children = getattr(obj, 'virtual_machine_interfaces', None)
- self.virtual_machine_interfaces = children
-
- return children
- #end get_virtual_machine_interfaces
-
- def get_service_instances(self):
- # if object not created/read from lib can't service
- svr_conn = self._server_conn
- if not svr_conn:
- return None
-
- children = super(Project, self).get_service_instances()
- if not children: # read it for first time
- obj = svr_conn.project_read(id = self.uuid, fields = ['service_instances'])
- children = getattr(obj, 'service_instances', None)
- self.service_instances = children
-
- return children
- #end get_service_instances
-
- def get_route_tables(self):
- # if object not created/read from lib can't service
- svr_conn = self._server_conn
- if not svr_conn:
- return None
-
- children = super(Project, self).get_route_tables()
- if not children: # read it for first time
- obj = svr_conn.project_read(id = self.uuid, fields = ['route_tables'])
- children = getattr(obj, 'route_tables', None)
- self.route_tables = children
-
- return children
- #end get_route_tables
-
- def get_interface_route_tables(self):
- # if object not created/read from lib can't service
- svr_conn = self._server_conn
- if not svr_conn:
- return None
-
- children = super(Project, self).get_interface_route_tables()
- if not children: # read it for first time
- obj = svr_conn.project_read(id = self.uuid, fields = ['interface_route_tables'])
- children = getattr(obj, 'interface_route_tables', None)
- self.interface_route_tables = children
-
- return children
- #end get_interface_route_tables
-
- def get_logical_routers(self):
- # if object not created/read from lib can't service
- svr_conn = self._server_conn
- if not svr_conn:
- return None
-
- children = super(Project, self).get_logical_routers()
- if not children: # read it for first time
- obj = svr_conn.project_read(id = self.uuid, fields = ['logical_routers'])
- children = getattr(obj, 'logical_routers', None)
- self.logical_routers = children
-
- return children
- #end get_logical_routers
-
- def get_loadbalancer_pools(self):
- # if object not created/read from lib can't service
- svr_conn = self._server_conn
- if not svr_conn:
- return None
-
- children = super(Project, self).get_loadbalancer_pools()
- if not children: # read it for first time
- obj = svr_conn.project_read(id = self.uuid, fields = ['loadbalancer_pools'])
- children = getattr(obj, 'loadbalancer_pools', None)
- self.loadbalancer_pools = children
-
- return children
- #end get_loadbalancer_pools
-
- def get_loadbalancer_healthmonitors(self):
- # if object not created/read from lib can't service
- svr_conn = self._server_conn
- if not svr_conn:
- return None
-
- children = super(Project, self).get_loadbalancer_healthmonitors()
- if not children: # read it for first time
- obj = svr_conn.project_read(id = self.uuid, fields = ['loadbalancer_healthmonitors'])
- children = getattr(obj, 'loadbalancer_healthmonitors', None)
- self.loadbalancer_healthmonitors = children
-
- return children
- #end get_loadbalancer_healthmonitors
-
- def get_virtual_ips(self):
- # if object not created/read from lib can't service
- svr_conn = self._server_conn
- if not svr_conn:
- return None
-
- children = super(Project, self).get_virtual_ips()
- if not children: # read it for first time
- obj = svr_conn.project_read(id = self.uuid, fields = ['virtual_ips'])
- children = getattr(obj, 'virtual_ips', None)
- self.virtual_ips = children
-
- return children
- #end get_virtual_ips
-
-
- def get_floating_ip_back_refs(self):
- """Return list of all floating-ips using this project"""
- # if object not created/read from lib can't service
- svr_conn = self._server_conn
- if not svr_conn:
- return None
-
- obj = svr_conn.project_read(id = self.uuid, fields = ['floating_ip_back_refs'])
- back_refs = getattr(obj, 'floating_ip_back_refs', None)
- self.floating_ip_back_refs = back_refs
-
- return back_refs
- #end get_floating_ip_back_refs
-
-#end class Project
-
-class QosForwardingClass(vnc_api.gen.resource_common.QosForwardingClass):
- create_uri = ''
- resource_uri_base = {}
- def __init__(self, name = None, parent_obj = None, dscp = None, trusted = None, id_perms = None, display_name = None, *args, **kwargs):
- pending_fields = ['fq_name', 'parent_type']
-
- self._server_conn = None
-
- if dscp:
- pending_fields.append('dscp')
- if trusted:
- pending_fields.append('trusted')
- if id_perms:
- pending_fields.append('id_perms')
- if display_name:
- pending_fields.append('display_name')
-
- self._pending_field_updates = set(pending_fields)
- self._pending_ref_updates = set([])
-
- super(QosForwardingClass, self).__init__(name, parent_obj, dscp, trusted, id_perms, display_name, *args, **kwargs)
- #end __init__
-
- def get_pending_updates(self):
- return self._pending_field_updates
- #end get_pending_updates
-
- def get_ref_updates(self):
- return self._pending_ref_updates
- #end get_ref_updates
-
- def clear_pending_updates(self):
- self._pending_field_updates = set([])
- self._pending_ref_updates = set([])
- #end clear_pending_updates
-
- def set_server_conn(self, vnc_api_handle):
- self._server_conn = vnc_api_handle
- #end set_server_conn
-
- @classmethod
- def from_dict(cls, **kwargs):
- props_dict = {}
- if 'dscp' in kwargs:
- props_dict['dscp'] = kwargs['dscp']
- if 'trusted' in kwargs:
- props_dict['trusted'] = kwargs['trusted']
- if 'id_perms' in kwargs:
- props_dict['id_perms'] = vnc_api.gen.resource_xsd.IdPermsType(**kwargs['id_perms'])
- if 'display_name' in kwargs:
- props_dict['display_name'] = kwargs['display_name']
-
- # obj constructor takes only props
- parent_type = kwargs.get('parent_type', None)
- fq_name = kwargs['fq_name']
- props_dict.update({'parent_type': parent_type, 'fq_name': fq_name})
- obj = QosForwardingClass(fq_name[-1], **props_dict)
- obj.uuid = kwargs['uuid']
- if 'parent_uuid' in kwargs:
- obj.parent_uuid = kwargs['parent_uuid']
-
- # add summary of any children...
-
- # add any specified references...
- if 'qos_queue_refs' in kwargs:
- obj.qos_queue_refs = kwargs['qos_queue_refs']
-
- # and back references but no obj api for it...
- if 'virtual_network_back_refs' in kwargs:
- obj.virtual_network_back_refs = kwargs['virtual_network_back_refs']
- if 'virtual_machine_interface_back_refs' in kwargs:
- obj.virtual_machine_interface_back_refs = kwargs['virtual_machine_interface_back_refs']
-
- return obj
- #end from_dict
-
- @vnc_api.gen.resource_common.QosForwardingClass.uuid.setter
- def uuid(self, uuid_val):
- self._uuid = uuid_val
- if 'uuid' not in self._pending_field_updates:
- self._pending_field_updates.add('uuid')
- #end uuid
-
- def set_uuid(self, uuid_val):
- self.uuid = uuid_val
- #end set_uuid
-
- @vnc_api.gen.resource_common.QosForwardingClass.dscp.setter
- def dscp(self, dscp):
- """Set dscp for qos-forwarding-class.
-
- :param dscp: xsd:integer object
-
- """
- if 'dscp' not in self._pending_field_updates:
- self._pending_field_updates.add('dscp')
-
- self._dscp = dscp
- #end dscp
-
- def set_dscp(self, value):
- self.dscp = value
- #end set_dscp
-
- @vnc_api.gen.resource_common.QosForwardingClass.trusted.setter
- def trusted(self, trusted):
- """Set trusted for qos-forwarding-class.
-
- :param trusted: xsd:boolean object
-
- """
- if 'trusted' not in self._pending_field_updates:
- self._pending_field_updates.add('trusted')
-
- self._trusted = trusted
- #end trusted
-
- def set_trusted(self, value):
- self.trusted = value
- #end set_trusted
-
- @vnc_api.gen.resource_common.QosForwardingClass.id_perms.setter
- def id_perms(self, id_perms):
- """Set id-perms for qos-forwarding-class.
-
- :param id_perms: IdPermsType object
-
- """
- if 'id_perms' not in self._pending_field_updates:
- self._pending_field_updates.add('id_perms')
-
- self._id_perms = id_perms
- #end id_perms
-
- def set_id_perms(self, value):
- self.id_perms = value
- #end set_id_perms
-
- @vnc_api.gen.resource_common.QosForwardingClass.display_name.setter
- def display_name(self, display_name):
- """Set display-name for qos-forwarding-class.
-
- :param display_name: xsd:string object
-
- """
- if 'display_name' not in self._pending_field_updates:
- self._pending_field_updates.add('display_name')
-
- self._display_name = display_name
- #end display_name
-
- def set_display_name(self, value):
- self.display_name = value
- #end set_display_name
-
- def set_qos_queue(self, *args, **kwargs):
- """Set qos-queue for qos-forwarding-class.
-
- :param ref_obj: QosQueue object
-
- """
- self._pending_field_updates.add('qos_queue_refs')
- self._pending_ref_updates.discard('qos_queue_refs')
- super(QosForwardingClass, self).set_qos_queue(*args, **kwargs)
-
- #end set_qos_queue
-
- def add_qos_queue(self, *args, **kwargs):
- """Add qos-queue to qos-forwarding-class.
-
- :param ref_obj: QosQueue object
-
- """
- if 'qos_queue_refs' not in self._pending_ref_updates|self._pending_field_updates:
- self._pending_ref_updates.add('qos_queue_refs')
- self._original_qos_queue_refs = (self.get_qos_queue_refs() or [])[:]
- super(QosForwardingClass, self).add_qos_queue(*args, **kwargs)
- #end add_qos_queue
-
- def del_qos_queue(self, *args, **kwargs):
- if 'qos_queue_refs' not in self._pending_ref_updates:
- self._pending_ref_updates.add('qos_queue_refs')
- self._original_qos_queue_refs = (self.get_qos_queue_refs() or [])[:]
- super(QosForwardingClass, self).del_qos_queue(*args, **kwargs)
- #end del_qos_queue
-
- def set_qos_queue_list(self, *args, **kwargs):
- """Set qos-queue list for qos-forwarding-class.
-
- :param ref_obj_list: list of QosQueue object
-
- """
- self._pending_field_updates.add('qos_queue_refs')
- self._pending_ref_updates.discard('qos_queue_refs')
- super(QosForwardingClass, self).set_qos_queue_list(*args, **kwargs)
- #end set_qos_queue_list
-
-
- def get_virtual_network_back_refs(self):
- """Return list of all virtual-networks using this qos-forwarding-class"""
- # if object not created/read from lib can't service
- svr_conn = self._server_conn
- if not svr_conn:
- return None
-
- obj = svr_conn.qos_forwarding_class_read(id = self.uuid, fields = ['virtual_network_back_refs'])
- back_refs = getattr(obj, 'virtual_network_back_refs', None)
- self.virtual_network_back_refs = back_refs
-
- return back_refs
- #end get_virtual_network_back_refs
-
- def get_virtual_machine_interface_back_refs(self):
- """Return list of all virtual-machine-interfaces using this qos-forwarding-class"""
- # if object not created/read from lib can't service
- svr_conn = self._server_conn
- if not svr_conn:
- return None
-
- obj = svr_conn.qos_forwarding_class_read(id = self.uuid, fields = ['virtual_machine_interface_back_refs'])
- back_refs = getattr(obj, 'virtual_machine_interface_back_refs', None)
- self.virtual_machine_interface_back_refs = back_refs
-
- return back_refs
- #end get_virtual_machine_interface_back_refs
-
-#end class QosForwardingClass
-
-class DatabaseNode(vnc_api.gen.resource_common.DatabaseNode):
- create_uri = ''
- resource_uri_base = {}
- def __init__(self, name = None, parent_obj = None, database_node_ip_address = None, id_perms = None, display_name = None, *args, **kwargs):
- pending_fields = ['fq_name', 'parent_type']
-
- self._server_conn = None
-
- if database_node_ip_address:
- pending_fields.append('database_node_ip_address')
- if id_perms:
- pending_fields.append('id_perms')
- if display_name:
- pending_fields.append('display_name')
-
- self._pending_field_updates = set(pending_fields)
- self._pending_ref_updates = set([])
-
- super(DatabaseNode, self).__init__(name, parent_obj, database_node_ip_address, id_perms, display_name, *args, **kwargs)
- #end __init__
-
- def get_pending_updates(self):
- return self._pending_field_updates
- #end get_pending_updates
-
- def get_ref_updates(self):
- return self._pending_ref_updates
- #end get_ref_updates
-
- def clear_pending_updates(self):
- self._pending_field_updates = set([])
- self._pending_ref_updates = set([])
- #end clear_pending_updates
-
- def set_server_conn(self, vnc_api_handle):
- self._server_conn = vnc_api_handle
- #end set_server_conn
-
- @classmethod
- def from_dict(cls, **kwargs):
- props_dict = {}
- if 'database_node_ip_address' in kwargs:
- props_dict['database_node_ip_address'] = kwargs['database_node_ip_address']
- if 'id_perms' in kwargs:
- props_dict['id_perms'] = vnc_api.gen.resource_xsd.IdPermsType(**kwargs['id_perms'])
- if 'display_name' in kwargs:
- props_dict['display_name'] = kwargs['display_name']
-
- # obj constructor takes only props
- parent_type = kwargs.get('parent_type', None)
- fq_name = kwargs['fq_name']
- props_dict.update({'parent_type': parent_type, 'fq_name': fq_name})
- obj = DatabaseNode(fq_name[-1], **props_dict)
- obj.uuid = kwargs['uuid']
- if 'parent_uuid' in kwargs:
- obj.parent_uuid = kwargs['parent_uuid']
-
- # add summary of any children...
-
- # add any specified references...
-
- # and back references but no obj api for it...
-
- return obj
- #end from_dict
-
- @vnc_api.gen.resource_common.DatabaseNode.uuid.setter
- def uuid(self, uuid_val):
- self._uuid = uuid_val
- if 'uuid' not in self._pending_field_updates:
- self._pending_field_updates.add('uuid')
- #end uuid
-
- def set_uuid(self, uuid_val):
- self.uuid = uuid_val
- #end set_uuid
-
- @vnc_api.gen.resource_common.DatabaseNode.database_node_ip_address.setter
- def database_node_ip_address(self, database_node_ip_address):
- """Set database-node-ip-address for database-node.
-
- :param database_node_ip_address: IpAddressType object
-
- """
- if 'database_node_ip_address' not in self._pending_field_updates:
- self._pending_field_updates.add('database_node_ip_address')
-
- self._database_node_ip_address = database_node_ip_address
- #end database_node_ip_address
-
- def set_database_node_ip_address(self, value):
- self.database_node_ip_address = value
- #end set_database_node_ip_address
-
- @vnc_api.gen.resource_common.DatabaseNode.id_perms.setter
- def id_perms(self, id_perms):
- """Set id-perms for database-node.
-
- :param id_perms: IdPermsType object
-
- """
- if 'id_perms' not in self._pending_field_updates:
- self._pending_field_updates.add('id_perms')
-
- self._id_perms = id_perms
- #end id_perms
-
- def set_id_perms(self, value):
- self.id_perms = value
- #end set_id_perms
-
- @vnc_api.gen.resource_common.DatabaseNode.display_name.setter
- def display_name(self, display_name):
- """Set display-name for database-node.
-
- :param display_name: xsd:string object
-
- """
- if 'display_name' not in self._pending_field_updates:
- self._pending_field_updates.add('display_name')
-
- self._display_name = display_name
- #end display_name
-
- def set_display_name(self, value):
- self.display_name = value
- #end set_display_name
-
-
-#end class DatabaseNode
-
-class RoutingInstance(vnc_api.gen.resource_common.RoutingInstance):
- create_uri = ''
- resource_uri_base = {}
- def __init__(self, name = None, parent_obj = None, service_chain_information = None, routing_instance_is_default = None, static_route_entries = None, default_ce_protocol = None, id_perms = None, display_name = None, *args, **kwargs):
- pending_fields = ['fq_name', 'parent_type']
-
- self._server_conn = None
-
- if service_chain_information:
- pending_fields.append('service_chain_information')
- if routing_instance_is_default:
- pending_fields.append('routing_instance_is_default')
- if static_route_entries:
- pending_fields.append('static_route_entries')
- if default_ce_protocol:
- pending_fields.append('default_ce_protocol')
- if id_perms:
- pending_fields.append('id_perms')
- if display_name:
- pending_fields.append('display_name')
-
- self._pending_field_updates = set(pending_fields)
- self._pending_ref_updates = set([])
-
- super(RoutingInstance, self).__init__(name, parent_obj, service_chain_information, routing_instance_is_default, static_route_entries, default_ce_protocol, id_perms, display_name, *args, **kwargs)
- #end __init__
-
- def get_pending_updates(self):
- return self._pending_field_updates
- #end get_pending_updates
-
- def get_ref_updates(self):
- return self._pending_ref_updates
- #end get_ref_updates
-
- def clear_pending_updates(self):
- self._pending_field_updates = set([])
- self._pending_ref_updates = set([])
- #end clear_pending_updates
-
- def set_server_conn(self, vnc_api_handle):
- self._server_conn = vnc_api_handle
- #end set_server_conn
-
- @classmethod
- def from_dict(cls, **kwargs):
- props_dict = {}
- if 'service_chain_information' in kwargs:
- props_dict['service_chain_information'] = vnc_api.gen.resource_xsd.ServiceChainInfo(**kwargs['service_chain_information'])
- if 'routing_instance_is_default' in kwargs:
- props_dict['routing_instance_is_default'] = kwargs['routing_instance_is_default']
- if 'static_route_entries' in kwargs:
- props_dict['static_route_entries'] = vnc_api.gen.resource_xsd.StaticRouteEntriesType(**kwargs['static_route_entries'])
- if 'default_ce_protocol' in kwargs:
- props_dict['default_ce_protocol'] = vnc_api.gen.resource_xsd.DefaultProtocolType(**kwargs['default_ce_protocol'])
- if 'id_perms' in kwargs:
- props_dict['id_perms'] = vnc_api.gen.resource_xsd.IdPermsType(**kwargs['id_perms'])
- if 'display_name' in kwargs:
- props_dict['display_name'] = kwargs['display_name']
-
- # obj constructor takes only props
- parent_type = kwargs.get('parent_type', None)
- fq_name = kwargs['fq_name']
- props_dict.update({'parent_type': parent_type, 'fq_name': fq_name})
- obj = RoutingInstance(fq_name[-1], **props_dict)
- obj.uuid = kwargs['uuid']
- if 'parent_uuid' in kwargs:
- obj.parent_uuid = kwargs['parent_uuid']
-
- # add summary of any children...
- if 'bgp_routers' in kwargs:
- obj.bgp_routers = kwargs['bgp_routers']
-
- # add any specified references...
- if 'routing_instance_refs' in kwargs:
- obj.routing_instance_refs = kwargs['routing_instance_refs']
- for ref in obj.routing_instance_refs:
- ref['attr'] = vnc_api.gen.resource_xsd.ConnectionType(**ref['attr'])
- if 'route_target_refs' in kwargs:
- obj.route_target_refs = kwargs['route_target_refs']
- for ref in obj.route_target_refs:
- ref['attr'] = vnc_api.gen.resource_xsd.InstanceTargetType(**ref['attr'])
-
- # and back references but no obj api for it...
- if 'virtual_machine_interface_back_refs' in kwargs:
- obj.virtual_machine_interface_back_refs = kwargs['virtual_machine_interface_back_refs']
- if 'routing_instance_back_refs' in kwargs:
- obj.routing_instance_back_refs = kwargs['routing_instance_back_refs']
-
- return obj
- #end from_dict
-
- @vnc_api.gen.resource_common.RoutingInstance.uuid.setter
- def uuid(self, uuid_val):
- self._uuid = uuid_val
- if 'uuid' not in self._pending_field_updates:
- self._pending_field_updates.add('uuid')
- #end uuid
-
- def set_uuid(self, uuid_val):
- self.uuid = uuid_val
- #end set_uuid
-
- @vnc_api.gen.resource_common.RoutingInstance.service_chain_information.setter
- def service_chain_information(self, service_chain_information):
- """Set service-chain-information for routing-instance.
-
- :param service_chain_information: ServiceChainInfo object
-
- """
- if 'service_chain_information' not in self._pending_field_updates:
- self._pending_field_updates.add('service_chain_information')
-
- self._service_chain_information = service_chain_information
- #end service_chain_information
-
- def set_service_chain_information(self, value):
- self.service_chain_information = value
- #end set_service_chain_information
-
- @vnc_api.gen.resource_common.RoutingInstance.routing_instance_is_default.setter
- def routing_instance_is_default(self, routing_instance_is_default):
- """Set routing-instance-is-default for routing-instance.
-
- :param routing_instance_is_default: xsd:boolean object
-
- """
- if 'routing_instance_is_default' not in self._pending_field_updates:
- self._pending_field_updates.add('routing_instance_is_default')
-
- self._routing_instance_is_default = routing_instance_is_default
- #end routing_instance_is_default
-
- def set_routing_instance_is_default(self, value):
- self.routing_instance_is_default = value
- #end set_routing_instance_is_default
-
- @vnc_api.gen.resource_common.RoutingInstance.static_route_entries.setter
- def static_route_entries(self, static_route_entries):
- """Set static-route-entries for routing-instance.
-
- :param static_route_entries: StaticRouteEntriesType object
-
- """
- if 'static_route_entries' not in self._pending_field_updates:
- self._pending_field_updates.add('static_route_entries')
-
- self._static_route_entries = static_route_entries
- #end static_route_entries
-
- def set_static_route_entries(self, value):
- self.static_route_entries = value
- #end set_static_route_entries
-
- @vnc_api.gen.resource_common.RoutingInstance.default_ce_protocol.setter
- def default_ce_protocol(self, default_ce_protocol):
- """Set default-ce-protocol for routing-instance.
-
- :param default_ce_protocol: DefaultProtocolType object
-
- """
- if 'default_ce_protocol' not in self._pending_field_updates:
- self._pending_field_updates.add('default_ce_protocol')
-
- self._default_ce_protocol = default_ce_protocol
- #end default_ce_protocol
-
- def set_default_ce_protocol(self, value):
- self.default_ce_protocol = value
- #end set_default_ce_protocol
-
- @vnc_api.gen.resource_common.RoutingInstance.id_perms.setter
- def id_perms(self, id_perms):
- """Set id-perms for routing-instance.
-
- :param id_perms: IdPermsType object
-
- """
- if 'id_perms' not in self._pending_field_updates:
- self._pending_field_updates.add('id_perms')
-
- self._id_perms = id_perms
- #end id_perms
-
- def set_id_perms(self, value):
- self.id_perms = value
- #end set_id_perms
-
- @vnc_api.gen.resource_common.RoutingInstance.display_name.setter
- def display_name(self, display_name):
- """Set display-name for routing-instance.
-
- :param display_name: xsd:string object
-
- """
- if 'display_name' not in self._pending_field_updates:
- self._pending_field_updates.add('display_name')
-
- self._display_name = display_name
- #end display_name
-
- def set_display_name(self, value):
- self.display_name = value
- #end set_display_name
-
- def set_routing_instance(self, *args, **kwargs):
- """Set routing-instance for routing-instance.
-
- :param ref_obj: RoutingInstance object
- :param ref_data: ConnectionType object
-
- """
- self._pending_field_updates.add('routing_instance_refs')
- self._pending_ref_updates.discard('routing_instance_refs')
- super(RoutingInstance, self).set_routing_instance(*args, **kwargs)
-
- #end set_routing_instance
-
- def add_routing_instance(self, *args, **kwargs):
- """Add routing-instance to routing-instance.
-
- :param ref_obj: RoutingInstance object
- :param ref_data: ConnectionType object
-
- """
- if 'routing_instance_refs' not in self._pending_ref_updates|self._pending_field_updates:
- self._pending_ref_updates.add('routing_instance_refs')
- self._original_routing_instance_refs = (self.get_routing_instance_refs() or [])[:]
- super(RoutingInstance, self).add_routing_instance(*args, **kwargs)
- #end add_routing_instance
-
- def del_routing_instance(self, *args, **kwargs):
- if 'routing_instance_refs' not in self._pending_ref_updates:
- self._pending_ref_updates.add('routing_instance_refs')
- self._original_routing_instance_refs = (self.get_routing_instance_refs() or [])[:]
- super(RoutingInstance, self).del_routing_instance(*args, **kwargs)
- #end del_routing_instance
-
- def set_routing_instance_list(self, *args, **kwargs):
- """Set routing-instance list for routing-instance.
-
- :param ref_obj_list: list of RoutingInstance object
- :param ref_data_list: list of ConnectionType summary
-
- """
- self._pending_field_updates.add('routing_instance_refs')
- self._pending_ref_updates.discard('routing_instance_refs')
- super(RoutingInstance, self).set_routing_instance_list(*args, **kwargs)
- #end set_routing_instance_list
-
- def set_route_target(self, *args, **kwargs):
- """Set route-target for routing-instance.
-
- :param ref_obj: RouteTarget object
- :param ref_data: InstanceTargetType object
-
- """
- self._pending_field_updates.add('route_target_refs')
- self._pending_ref_updates.discard('route_target_refs')
- super(RoutingInstance, self).set_route_target(*args, **kwargs)
-
- #end set_route_target
-
- def add_route_target(self, *args, **kwargs):
- """Add route-target to routing-instance.
-
- :param ref_obj: RouteTarget object
- :param ref_data: InstanceTargetType object
-
- """
- if 'route_target_refs' not in self._pending_ref_updates|self._pending_field_updates:
- self._pending_ref_updates.add('route_target_refs')
- self._original_route_target_refs = (self.get_route_target_refs() or [])[:]
- super(RoutingInstance, self).add_route_target(*args, **kwargs)
- #end add_route_target
-
- def del_route_target(self, *args, **kwargs):
- if 'route_target_refs' not in self._pending_ref_updates:
- self._pending_ref_updates.add('route_target_refs')
- self._original_route_target_refs = (self.get_route_target_refs() or [])[:]
- super(RoutingInstance, self).del_route_target(*args, **kwargs)
- #end del_route_target
-
- def set_route_target_list(self, *args, **kwargs):
- """Set route-target list for routing-instance.
-
- :param ref_obj_list: list of RouteTarget object
- :param ref_data_list: list of InstanceTargetType summary
-
- """
- self._pending_field_updates.add('route_target_refs')
- self._pending_ref_updates.discard('route_target_refs')
- super(RoutingInstance, self).set_route_target_list(*args, **kwargs)
- #end set_route_target_list
-
- def get_bgp_routers(self):
- # if object not created/read from lib can't service
- svr_conn = self._server_conn
- if not svr_conn:
- return None
-
- children = super(RoutingInstance, self).get_bgp_routers()
- if not children: # read it for first time
- obj = svr_conn.routing_instance_read(id = self.uuid, fields = ['bgp_routers'])
- children = getattr(obj, 'bgp_routers', None)
- self.bgp_routers = children
-
- return children
- #end get_bgp_routers
-
-
- def get_virtual_machine_interface_back_refs(self):
- """Return list of all virtual-machine-interfaces using this routing-instance"""
- # if object not created/read from lib can't service
- svr_conn = self._server_conn
- if not svr_conn:
- return None
-
- obj = svr_conn.routing_instance_read(id = self.uuid, fields = ['virtual_machine_interface_back_refs'])
- back_refs = getattr(obj, 'virtual_machine_interface_back_refs', None)
- self.virtual_machine_interface_back_refs = back_refs
-
- return back_refs
- #end get_virtual_machine_interface_back_refs
-
- def get_routing_instance_back_refs(self):
- """Return list of all routing-instances using this routing-instance"""
- # if object not created/read from lib can't service
- svr_conn = self._server_conn
- if not svr_conn:
- return None
-
- obj = svr_conn.routing_instance_read(id = self.uuid, fields = ['routing_instance_back_refs'])
- back_refs = getattr(obj, 'routing_instance_back_refs', None)
- self.routing_instance_back_refs = back_refs
-
- return back_refs
- #end get_routing_instance_back_refs
-
-#end class RoutingInstance
-
-class NetworkIpam(vnc_api.gen.resource_common.NetworkIpam):
- create_uri = ''
- resource_uri_base = {}
- def __init__(self, name = None, parent_obj = None, network_ipam_mgmt = None, id_perms = None, display_name = None, *args, **kwargs):
- pending_fields = ['fq_name', 'parent_type']
-
- self._server_conn = None
-
- if network_ipam_mgmt:
- pending_fields.append('network_ipam_mgmt')
- if id_perms:
- pending_fields.append('id_perms')
- if display_name:
- pending_fields.append('display_name')
-
- self._pending_field_updates = set(pending_fields)
- self._pending_ref_updates = set([])
-
- super(NetworkIpam, self).__init__(name, parent_obj, network_ipam_mgmt, id_perms, display_name, *args, **kwargs)
- #end __init__
-
- def get_pending_updates(self):
- return self._pending_field_updates
- #end get_pending_updates
-
- def get_ref_updates(self):
- return self._pending_ref_updates
- #end get_ref_updates
-
- def clear_pending_updates(self):
- self._pending_field_updates = set([])
- self._pending_ref_updates = set([])
- #end clear_pending_updates
-
- def set_server_conn(self, vnc_api_handle):
- self._server_conn = vnc_api_handle
- #end set_server_conn
-
- @classmethod
- def from_dict(cls, **kwargs):
- props_dict = {}
- if 'network_ipam_mgmt' in kwargs:
- props_dict['network_ipam_mgmt'] = vnc_api.gen.resource_xsd.IpamType(**kwargs['network_ipam_mgmt'])
- if 'id_perms' in kwargs:
- props_dict['id_perms'] = vnc_api.gen.resource_xsd.IdPermsType(**kwargs['id_perms'])
- if 'display_name' in kwargs:
- props_dict['display_name'] = kwargs['display_name']
-
- # obj constructor takes only props
- parent_type = kwargs.get('parent_type', None)
- fq_name = kwargs['fq_name']
- props_dict.update({'parent_type': parent_type, 'fq_name': fq_name})
- obj = NetworkIpam(fq_name[-1], **props_dict)
- obj.uuid = kwargs['uuid']
- if 'parent_uuid' in kwargs:
- obj.parent_uuid = kwargs['parent_uuid']
-
- # add summary of any children...
-
- # add any specified references...
- if 'virtual_DNS_refs' in kwargs:
- obj.virtual_DNS_refs = kwargs['virtual_DNS_refs']
-
- # and back references but no obj api for it...
- if 'virtual_network_back_refs' in kwargs:
- obj.virtual_network_back_refs = kwargs['virtual_network_back_refs']
-
- return obj
- #end from_dict
-
- @vnc_api.gen.resource_common.NetworkIpam.uuid.setter
- def uuid(self, uuid_val):
- self._uuid = uuid_val
- if 'uuid' not in self._pending_field_updates:
- self._pending_field_updates.add('uuid')
- #end uuid
-
- def set_uuid(self, uuid_val):
- self.uuid = uuid_val
- #end set_uuid
-
- @vnc_api.gen.resource_common.NetworkIpam.network_ipam_mgmt.setter
- def network_ipam_mgmt(self, network_ipam_mgmt):
- """Set network-ipam-mgmt for network-ipam.
-
- :param network_ipam_mgmt: IpamType object
-
- """
- if 'network_ipam_mgmt' not in self._pending_field_updates:
- self._pending_field_updates.add('network_ipam_mgmt')
-
- self._network_ipam_mgmt = network_ipam_mgmt
- #end network_ipam_mgmt
-
- def set_network_ipam_mgmt(self, value):
- self.network_ipam_mgmt = value
- #end set_network_ipam_mgmt
-
- @vnc_api.gen.resource_common.NetworkIpam.id_perms.setter
- def id_perms(self, id_perms):
- """Set id-perms for network-ipam.
-
- :param id_perms: IdPermsType object
-
- """
- if 'id_perms' not in self._pending_field_updates:
- self._pending_field_updates.add('id_perms')
-
- self._id_perms = id_perms
- #end id_perms
-
- def set_id_perms(self, value):
- self.id_perms = value
- #end set_id_perms
-
- @vnc_api.gen.resource_common.NetworkIpam.display_name.setter
- def display_name(self, display_name):
- """Set display-name for network-ipam.
-
- :param display_name: xsd:string object
-
- """
- if 'display_name' not in self._pending_field_updates:
- self._pending_field_updates.add('display_name')
-
- self._display_name = display_name
- #end display_name
-
- def set_display_name(self, value):
- self.display_name = value
- #end set_display_name
-
- def set_virtual_DNS(self, *args, **kwargs):
- """Set virtual-DNS for network-ipam.
-
- :param ref_obj: VirtualDns object
-
- """
- self._pending_field_updates.add('virtual_DNS_refs')
- self._pending_ref_updates.discard('virtual_DNS_refs')
- super(NetworkIpam, self).set_virtual_DNS(*args, **kwargs)
-
- #end set_virtual_DNS
-
- def add_virtual_DNS(self, *args, **kwargs):
- """Add virtual-DNS to network-ipam.
-
- :param ref_obj: VirtualDns object
-
- """
- if 'virtual_DNS_refs' not in self._pending_ref_updates|self._pending_field_updates:
- self._pending_ref_updates.add('virtual_DNS_refs')
- self._original_virtual_DNS_refs = (self.get_virtual_DNS_refs() or [])[:]
- super(NetworkIpam, self).add_virtual_DNS(*args, **kwargs)
- #end add_virtual_DNS
-
- def del_virtual_DNS(self, *args, **kwargs):
- if 'virtual_DNS_refs' not in self._pending_ref_updates:
- self._pending_ref_updates.add('virtual_DNS_refs')
- self._original_virtual_DNS_refs = (self.get_virtual_DNS_refs() or [])[:]
- super(NetworkIpam, self).del_virtual_DNS(*args, **kwargs)
- #end del_virtual_DNS
-
- def set_virtual_DNS_list(self, *args, **kwargs):
- """Set virtual-DNS list for network-ipam.
-
- :param ref_obj_list: list of VirtualDns object
-
- """
- self._pending_field_updates.add('virtual_DNS_refs')
- self._pending_ref_updates.discard('virtual_DNS_refs')
- super(NetworkIpam, self).set_virtual_DNS_list(*args, **kwargs)
- #end set_virtual_DNS_list
-
-
- def get_virtual_network_back_refs(self):
- """Return list of all virtual-networks using this network-ipam"""
- # if object not created/read from lib can't service
- svr_conn = self._server_conn
- if not svr_conn:
- return None
-
- obj = svr_conn.network_ipam_read(id = self.uuid, fields = ['virtual_network_back_refs'])
- back_refs = getattr(obj, 'virtual_network_back_refs', None)
- self.virtual_network_back_refs = back_refs
-
- return back_refs
- #end get_virtual_network_back_refs
-
-#end class NetworkIpam
-
-class LogicalRouter(vnc_api.gen.resource_common.LogicalRouter):
- create_uri = ''
- resource_uri_base = {}
- def __init__(self, name = None, parent_obj = None, id_perms = None, display_name = None, *args, **kwargs):
- pending_fields = ['fq_name', 'parent_type']
-
- self._server_conn = None
-
- if id_perms:
- pending_fields.append('id_perms')
- if display_name:
- pending_fields.append('display_name')
-
- self._pending_field_updates = set(pending_fields)
- self._pending_ref_updates = set([])
-
- super(LogicalRouter, self).__init__(name, parent_obj, id_perms, display_name, *args, **kwargs)
- #end __init__
-
- def get_pending_updates(self):
- return self._pending_field_updates
- #end get_pending_updates
-
- def get_ref_updates(self):
- return self._pending_ref_updates
- #end get_ref_updates
-
- def clear_pending_updates(self):
- self._pending_field_updates = set([])
- self._pending_ref_updates = set([])
- #end clear_pending_updates
-
- def set_server_conn(self, vnc_api_handle):
- self._server_conn = vnc_api_handle
- #end set_server_conn
-
- @classmethod
- def from_dict(cls, **kwargs):
- props_dict = {}
- if 'id_perms' in kwargs:
- props_dict['id_perms'] = vnc_api.gen.resource_xsd.IdPermsType(**kwargs['id_perms'])
- if 'display_name' in kwargs:
- props_dict['display_name'] = kwargs['display_name']
-
- # obj constructor takes only props
- parent_type = kwargs.get('parent_type', None)
- fq_name = kwargs['fq_name']
- props_dict.update({'parent_type': parent_type, 'fq_name': fq_name})
- obj = LogicalRouter(fq_name[-1], **props_dict)
- obj.uuid = kwargs['uuid']
- if 'parent_uuid' in kwargs:
- obj.parent_uuid = kwargs['parent_uuid']
-
- # add summary of any children...
-
- # add any specified references...
- if 'virtual_machine_interface_refs' in kwargs:
- obj.virtual_machine_interface_refs = kwargs['virtual_machine_interface_refs']
- if 'route_target_refs' in kwargs:
- obj.route_target_refs = kwargs['route_target_refs']
- if 'virtual_network_refs' in kwargs:
- obj.virtual_network_refs = kwargs['virtual_network_refs']
- if 'service_instance_refs' in kwargs:
- obj.service_instance_refs = kwargs['service_instance_refs']
-
- # and back references but no obj api for it...
-
- return obj
- #end from_dict
-
- @vnc_api.gen.resource_common.LogicalRouter.uuid.setter
- def uuid(self, uuid_val):
- self._uuid = uuid_val
- if 'uuid' not in self._pending_field_updates:
- self._pending_field_updates.add('uuid')
- #end uuid
-
- def set_uuid(self, uuid_val):
- self.uuid = uuid_val
- #end set_uuid
-
- @vnc_api.gen.resource_common.LogicalRouter.id_perms.setter
- def id_perms(self, id_perms):
- """Set id-perms for logical-router.
-
- :param id_perms: IdPermsType object
-
- """
- if 'id_perms' not in self._pending_field_updates:
- self._pending_field_updates.add('id_perms')
-
- self._id_perms = id_perms
- #end id_perms
-
- def set_id_perms(self, value):
- self.id_perms = value
- #end set_id_perms
-
- @vnc_api.gen.resource_common.LogicalRouter.display_name.setter
- def display_name(self, display_name):
- """Set display-name for logical-router.
-
- :param display_name: xsd:string object
-
- """
- if 'display_name' not in self._pending_field_updates:
- self._pending_field_updates.add('display_name')
-
- self._display_name = display_name
- #end display_name
-
- def set_display_name(self, value):
- self.display_name = value
- #end set_display_name
-
- def set_virtual_machine_interface(self, *args, **kwargs):
- """Set virtual-machine-interface for logical-router.
-
- :param ref_obj: VirtualMachineInterface object
-
- """
- self._pending_field_updates.add('virtual_machine_interface_refs')
- self._pending_ref_updates.discard('virtual_machine_interface_refs')
- super(LogicalRouter, self).set_virtual_machine_interface(*args, **kwargs)
-
- #end set_virtual_machine_interface
-
- def add_virtual_machine_interface(self, *args, **kwargs):
- """Add virtual-machine-interface to logical-router.
-
- :param ref_obj: VirtualMachineInterface object
-
- """
- if 'virtual_machine_interface_refs' not in self._pending_ref_updates|self._pending_field_updates:
- self._pending_ref_updates.add('virtual_machine_interface_refs')
- self._original_virtual_machine_interface_refs = (self.get_virtual_machine_interface_refs() or [])[:]
- super(LogicalRouter, self).add_virtual_machine_interface(*args, **kwargs)
- #end add_virtual_machine_interface
-
- def del_virtual_machine_interface(self, *args, **kwargs):
- if 'virtual_machine_interface_refs' not in self._pending_ref_updates:
- self._pending_ref_updates.add('virtual_machine_interface_refs')
- self._original_virtual_machine_interface_refs = (self.get_virtual_machine_interface_refs() or [])[:]
- super(LogicalRouter, self).del_virtual_machine_interface(*args, **kwargs)
- #end del_virtual_machine_interface
-
- def set_virtual_machine_interface_list(self, *args, **kwargs):
- """Set virtual-machine-interface list for logical-router.
-
- :param ref_obj_list: list of VirtualMachineInterface object
-
- """
- self._pending_field_updates.add('virtual_machine_interface_refs')
- self._pending_ref_updates.discard('virtual_machine_interface_refs')
- super(LogicalRouter, self).set_virtual_machine_interface_list(*args, **kwargs)
- #end set_virtual_machine_interface_list
-
- def set_route_target(self, *args, **kwargs):
- """Set route-target for logical-router.
-
- :param ref_obj: RouteTarget object
-
- """
- self._pending_field_updates.add('route_target_refs')
- self._pending_ref_updates.discard('route_target_refs')
- super(LogicalRouter, self).set_route_target(*args, **kwargs)
-
- #end set_route_target
-
- def add_route_target(self, *args, **kwargs):
- """Add route-target to logical-router.
-
- :param ref_obj: RouteTarget object
-
- """
- if 'route_target_refs' not in self._pending_ref_updates|self._pending_field_updates:
- self._pending_ref_updates.add('route_target_refs')
- self._original_route_target_refs = (self.get_route_target_refs() or [])[:]
- super(LogicalRouter, self).add_route_target(*args, **kwargs)
- #end add_route_target
-
- def del_route_target(self, *args, **kwargs):
- if 'route_target_refs' not in self._pending_ref_updates:
- self._pending_ref_updates.add('route_target_refs')
- self._original_route_target_refs = (self.get_route_target_refs() or [])[:]
- super(LogicalRouter, self).del_route_target(*args, **kwargs)
- #end del_route_target
-
- def set_route_target_list(self, *args, **kwargs):
- """Set route-target list for logical-router.
-
- :param ref_obj_list: list of RouteTarget object
-
- """
- self._pending_field_updates.add('route_target_refs')
- self._pending_ref_updates.discard('route_target_refs')
- super(LogicalRouter, self).set_route_target_list(*args, **kwargs)
- #end set_route_target_list
-
- def set_virtual_network(self, *args, **kwargs):
- """Set virtual-network for logical-router.
-
- :param ref_obj: VirtualNetwork object
-
- """
- self._pending_field_updates.add('virtual_network_refs')
- self._pending_ref_updates.discard('virtual_network_refs')
- super(LogicalRouter, self).set_virtual_network(*args, **kwargs)
-
- #end set_virtual_network
-
- def add_virtual_network(self, *args, **kwargs):
- """Add virtual-network to logical-router.
-
- :param ref_obj: VirtualNetwork object
-
- """
- if 'virtual_network_refs' not in self._pending_ref_updates|self._pending_field_updates:
- self._pending_ref_updates.add('virtual_network_refs')
- self._original_virtual_network_refs = (self.get_virtual_network_refs() or [])[:]
- super(LogicalRouter, self).add_virtual_network(*args, **kwargs)
- #end add_virtual_network
-
- def del_virtual_network(self, *args, **kwargs):
- if 'virtual_network_refs' not in self._pending_ref_updates:
- self._pending_ref_updates.add('virtual_network_refs')
- self._original_virtual_network_refs = (self.get_virtual_network_refs() or [])[:]
- super(LogicalRouter, self).del_virtual_network(*args, **kwargs)
- #end del_virtual_network
-
- def set_virtual_network_list(self, *args, **kwargs):
- """Set virtual-network list for logical-router.
-
- :param ref_obj_list: list of VirtualNetwork object
-
- """
- self._pending_field_updates.add('virtual_network_refs')
- self._pending_ref_updates.discard('virtual_network_refs')
- super(LogicalRouter, self).set_virtual_network_list(*args, **kwargs)
- #end set_virtual_network_list
-
- def set_service_instance(self, *args, **kwargs):
- """Set service-instance for logical-router.
-
- :param ref_obj: ServiceInstance object
-
- """
- self._pending_field_updates.add('service_instance_refs')
- self._pending_ref_updates.discard('service_instance_refs')
- super(LogicalRouter, self).set_service_instance(*args, **kwargs)
-
- #end set_service_instance
-
- def add_service_instance(self, *args, **kwargs):
- """Add service-instance to logical-router.
-
- :param ref_obj: ServiceInstance object
-
- """
- if 'service_instance_refs' not in self._pending_ref_updates|self._pending_field_updates:
- self._pending_ref_updates.add('service_instance_refs')
- self._original_service_instance_refs = (self.get_service_instance_refs() or [])[:]
- super(LogicalRouter, self).add_service_instance(*args, **kwargs)
- #end add_service_instance
-
- def del_service_instance(self, *args, **kwargs):
- if 'service_instance_refs' not in self._pending_ref_updates:
- self._pending_ref_updates.add('service_instance_refs')
- self._original_service_instance_refs = (self.get_service_instance_refs() or [])[:]
- super(LogicalRouter, self).del_service_instance(*args, **kwargs)
- #end del_service_instance
-
- def set_service_instance_list(self, *args, **kwargs):
- """Set service-instance list for logical-router.
-
- :param ref_obj_list: list of ServiceInstance object
-
- """
- self._pending_field_updates.add('service_instance_refs')
- self._pending_ref_updates.discard('service_instance_refs')
- super(LogicalRouter, self).set_service_instance_list(*args, **kwargs)
- #end set_service_instance_list
-
-
-#end class LogicalRouter
-
diff --git a/Testcases/vnc_api/gen/resource_client.pyc b/Testcases/vnc_api/gen/resource_client.pyc
deleted file mode 100644
index e654a59..0000000
--- a/Testcases/vnc_api/gen/resource_client.pyc
+++ /dev/null
Binary files differ
diff --git a/Testcases/vnc_api/gen/resource_common.py b/Testcases/vnc_api/gen/resource_common.py
deleted file mode 100644
index 72f7824..0000000
--- a/Testcases/vnc_api/gen/resource_common.py
+++ /dev/null
@@ -1,15559 +0,0 @@
-
-# AUTO-GENERATED file from IFMapApiGenerator. Do Not Edit!
-
-"""
-This module defines the classes for every configuration element managed by the system
-"""
-
-class Domain(object):
- """
- Represents domain configuration representation.
-
- Child of:
- :class:`.ConfigRoot` object OR
-
- Properties:
- * domain-limits (:class:`.DomainLimitsType` type)
- * api-access-list (:class:`.ApiAccessListType` type)
- * id-perms (:class:`.IdPermsType` type)
- * display-name (xsd:string type)
-
- Children:
- * list of :class:`.Project` objects
- * list of :class:`.Namespace` objects
- * list of :class:`.ServiceTemplate` objects
- * list of :class:`.VirtualDns` objects
-
- References to:
-
- Referred by:
- """
-
- prop_fields = set([u'domain_limits', u'api_access_list', u'id_perms', u'display_name'])
- ref_fields = set([])
- backref_fields = set([u'config_root_back_refs'])
- children_fields = set([u'projects', u'namespaces', 'service_templates', u'virtual_DNSs'])
-
- def __init__(self, name = None, parent_obj = None, domain_limits = None, api_access_list = None, id_perms = None, display_name = None, *args, **kwargs):
- # type-independent fields
- self._type = 'domain'
- if not name:
- name = u'default-domain'
- self.name = name
- self._uuid = None
- # Determine parent type and fq_name
- kwargs_parent_type = kwargs.get('parent_type', None)
- kwargs_fq_name = kwargs.get('fq_name', None)
- if parent_obj:
- self.parent_type = parent_obj._type
- # copy parent's fq_name
- self.fq_name = list(parent_obj.fq_name)
- self.fq_name.append(name)
- elif kwargs_parent_type and kwargs_fq_name:
- self.parent_type = kwargs_parent_type
- self.fq_name = kwargs_fq_name
- else: # No parent obj specified
- self.fq_name = [name]
-
- # property fields
- if domain_limits:
- self._domain_limits = domain_limits
- if api_access_list:
- self._api_access_list = api_access_list
- if id_perms:
- self._id_perms = id_perms
- if display_name:
- self._display_name = display_name
- #end __init__
-
- def get_type(self):
- """Return object type (domain)."""
- return self._type
- #end get_type
-
- def get_fq_name(self):
- """Return FQN of domain in list form."""
- return self.fq_name
- #end get_fq_name
-
- def get_fq_name_str(self):
- """Return FQN of domain as colon delimited string."""
- return ':'.join(self.fq_name)
- #end get_fq_name_str
-
- @property
- def parent_name(self):
- return self.fq_name[:-1][-1]
- #end parent_name
-
- def get_parent_fq_name(self):
- """Return FQN of domain's parent in list form."""
- if not hasattr(self, 'parent_type'):
- # child of config-root
- return None
-
- return self.fq_name[:-1]
- #end get_parent_fq_name
-
- def get_parent_fq_name_str(self):
- """Return FQN of domain's parent as colon delimted string."""
- if not hasattr(self, 'parent_type'):
- # child of config-root
- return None
-
- return ':'.join(self.fq_name[:-1])
- #end get_parent_fq_name_str
-
- @property
- def uuid(self):
- return getattr(self, '_uuid', None)
- #end uuid
-
- @uuid.setter
- def uuid(self, uuid_val):
- self._uuid = uuid_val
- #end uuid
-
- def set_uuid(self, uuid_val):
- self.uuid = uuid_val
- #end set_uuid
-
- def get_uuid(self):
- return self.uuid
- #end get_uuid
-
- @property
- def domain_limits(self):
- """Get domain-limits for domain.
-
- :returns: DomainLimitsType object
-
- """
- return getattr(self, '_domain_limits', None)
- #end domain_limits
-
- @domain_limits.setter
- def domain_limits(self, domain_limits):
- """Set domain-limits for domain.
-
- :param domain_limits: DomainLimitsType object
-
- """
- self._domain_limits = domain_limits
- #end domain_limits
-
- def set_domain_limits(self, value):
- self.domain_limits = value
- #end set_domain_limits
-
- def get_domain_limits(self):
- return self.domain_limits
- #end get_domain_limits
-
- @property
- def api_access_list(self):
- """Get api-access-list for domain.
-
- :returns: ApiAccessListType object
-
- """
- return getattr(self, '_api_access_list', None)
- #end api_access_list
-
- @api_access_list.setter
- def api_access_list(self, api_access_list):
- """Set api-access-list for domain.
-
- :param api_access_list: ApiAccessListType object
-
- """
- self._api_access_list = api_access_list
- #end api_access_list
-
- def set_api_access_list(self, value):
- self.api_access_list = value
- #end set_api_access_list
-
- def get_api_access_list(self):
- return self.api_access_list
- #end get_api_access_list
-
- @property
- def id_perms(self):
- """Get id-perms for domain.
-
- :returns: IdPermsType object
-
- """
- return getattr(self, '_id_perms', None)
- #end id_perms
-
- @id_perms.setter
- def id_perms(self, id_perms):
- """Set id-perms for domain.
-
- :param id_perms: IdPermsType object
-
- """
- self._id_perms = id_perms
- #end id_perms
-
- def set_id_perms(self, value):
- self.id_perms = value
- #end set_id_perms
-
- def get_id_perms(self):
- return self.id_perms
- #end get_id_perms
-
- @property
- def display_name(self):
- """Get display-name for domain.
-
- :returns: xsd:string object
-
- """
- return getattr(self, '_display_name', None)
- #end display_name
-
- @display_name.setter
- def display_name(self, display_name):
- """Set display-name for domain.
-
- :param display_name: xsd:string object
-
- """
- self._display_name = display_name
- #end display_name
-
- def set_display_name(self, value):
- self.display_name = value
- #end set_display_name
-
- def get_display_name(self):
- return self.display_name
- #end get_display_name
-
- def _serialize_field_to_json(self, serialized, fields_to_serialize, field_name):
- if fields_to_serialize is None: # all fields are serialized
- serialized[field_name] = getattr(self, field_name)
- elif field_name in fields_to_serialize:
- serialized[field_name] = getattr(self, field_name)
- #end _serialize_field_to_json
-
- def serialize_to_json(self, field_names = None):
- serialized = {}
-
- # serialize common fields
- self._serialize_field_to_json(serialized, ['uuid'], 'uuid')
- self._serialize_field_to_json(serialized, field_names, 'fq_name')
- if hasattr(self, 'parent_type'):
- self._serialize_field_to_json(serialized, field_names, 'parent_type')
-
- # serialize property fields
- if hasattr(self, '_domain_limits'):
- self._serialize_field_to_json(serialized, field_names, 'domain_limits')
- if hasattr(self, '_api_access_list'):
- self._serialize_field_to_json(serialized, field_names, 'api_access_list')
- if hasattr(self, '_id_perms'):
- self._serialize_field_to_json(serialized, field_names, 'id_perms')
- if hasattr(self, '_display_name'):
- self._serialize_field_to_json(serialized, field_names, 'display_name')
-
- # serialize reference fields
- return serialized
- #end serialize_to_json
-
- def get_projects(self):
- return getattr(self, 'projects', None)
- #end get_projects
-
- def get_namespaces(self):
- return getattr(self, 'namespaces', None)
- #end get_namespaces
-
- def get_service_templates(self):
- return getattr(self, 'service_templates', None)
- #end get_service_templates
-
- def get_virtual_DNSs(self):
- return getattr(self, 'virtual_DNSs', None)
- #end get_virtual_DNSs
-
- def get_config_root_back_refs(self):
- """Return list of all config-roots using this domain"""
- return getattr(self, 'config_root_back_refs', None)
- #end get_config_root_back_refs
-
- def dump(self):
- """Display domain object in compact form."""
- print '------------ domain ------------'
- print 'Name = ', self.get_fq_name()
- print 'Uuid = ', self.uuid
- if hasattr(self, 'parent_type'): # non config-root children
- print 'Parent Type = ', self.parent_type
- print 'P domain_limits = ', self.get_domain_limits()
- print 'P api_access_list = ', self.get_api_access_list()
- print 'P id_perms = ', self.get_id_perms()
- print 'P display_name = ', self.get_display_name()
- print 'HAS project = ', self.get_projects()
- print 'HAS namespace = ', self.get_namespaces()
- print 'HAS service_template = ', self.get_service_templates()
- print 'HAS virtual_DNS = ', self.get_virtual_DNSs()
- #end dump
-
-#end class Domain
-
-class GlobalVrouterConfig(object):
- """
- Represents global-vrouter-config configuration representation.
-
- Child of:
- :class:`.GlobalSystemConfig` object OR
-
- Properties:
- * linklocal-services (:class:`.LinklocalServicesTypes` type)
- * encapsulation-priorities (:class:`.EncapsulationPrioritiesType` type)
- * vxlan-network-identifier-mode (VxlanNetworkIdentifierModeType type)
- * id-perms (:class:`.IdPermsType` type)
- * display-name (xsd:string type)
-
- Children:
-
- References to:
-
- Referred by:
- """
-
- prop_fields = set([u'linklocal_services', u'encapsulation_priorities', u'vxlan_network_identifier_mode', u'id_perms', u'display_name'])
- ref_fields = set([])
- backref_fields = set([u'global_system_config_back_refs'])
- children_fields = set([])
-
- def __init__(self, name = None, parent_obj = None, linklocal_services = None, encapsulation_priorities = None, vxlan_network_identifier_mode = None, id_perms = None, display_name = None, *args, **kwargs):
- # type-independent fields
- self._type = 'global-vrouter-config'
- if not name:
- name = u'default-global-vrouter-config'
- self.name = name
- self._uuid = None
- # Determine parent type and fq_name
- kwargs_parent_type = kwargs.get('parent_type', None)
- kwargs_fq_name = kwargs.get('fq_name', None)
- if parent_obj:
- self.parent_type = parent_obj._type
- # copy parent's fq_name
- self.fq_name = list(parent_obj.fq_name)
- self.fq_name.append(name)
- elif kwargs_parent_type and kwargs_fq_name:
- self.parent_type = kwargs_parent_type
- self.fq_name = kwargs_fq_name
- else: # No parent obj specified
- self.parent_type = 'global-system-config'
- self.fq_name = [u'default-global-system-config']
- self.fq_name.append(name)
-
-
- # property fields
- if linklocal_services:
- self._linklocal_services = linklocal_services
- if encapsulation_priorities:
- self._encapsulation_priorities = encapsulation_priorities
- if vxlan_network_identifier_mode:
- self._vxlan_network_identifier_mode = vxlan_network_identifier_mode
- if id_perms:
- self._id_perms = id_perms
- if display_name:
- self._display_name = display_name
- #end __init__
-
- def get_type(self):
- """Return object type (global-vrouter-config)."""
- return self._type
- #end get_type
-
- def get_fq_name(self):
- """Return FQN of global-vrouter-config in list form."""
- return self.fq_name
- #end get_fq_name
-
- def get_fq_name_str(self):
- """Return FQN of global-vrouter-config as colon delimited string."""
- return ':'.join(self.fq_name)
- #end get_fq_name_str
-
- @property
- def parent_name(self):
- return self.fq_name[:-1][-1]
- #end parent_name
-
- def get_parent_fq_name(self):
- """Return FQN of global-vrouter-config's parent in list form."""
- if not hasattr(self, 'parent_type'):
- # child of config-root
- return None
-
- return self.fq_name[:-1]
- #end get_parent_fq_name
-
- def get_parent_fq_name_str(self):
- """Return FQN of global-vrouter-config's parent as colon delimted string."""
- if not hasattr(self, 'parent_type'):
- # child of config-root
- return None
-
- return ':'.join(self.fq_name[:-1])
- #end get_parent_fq_name_str
-
- @property
- def uuid(self):
- return getattr(self, '_uuid', None)
- #end uuid
-
- @uuid.setter
- def uuid(self, uuid_val):
- self._uuid = uuid_val
- #end uuid
-
- def set_uuid(self, uuid_val):
- self.uuid = uuid_val
- #end set_uuid
-
- def get_uuid(self):
- return self.uuid
- #end get_uuid
-
- @property
- def linklocal_services(self):
- """Get linklocal-services for global-vrouter-config.
-
- :returns: LinklocalServicesTypes object
-
- """
- return getattr(self, '_linklocal_services', None)
- #end linklocal_services
-
- @linklocal_services.setter
- def linklocal_services(self, linklocal_services):
- """Set linklocal-services for global-vrouter-config.
-
- :param linklocal_services: LinklocalServicesTypes object
-
- """
- self._linklocal_services = linklocal_services
- #end linklocal_services
-
- def set_linklocal_services(self, value):
- self.linklocal_services = value
- #end set_linklocal_services
-
- def get_linklocal_services(self):
- return self.linklocal_services
- #end get_linklocal_services
-
- @property
- def encapsulation_priorities(self):
- """Get encapsulation-priorities for global-vrouter-config.
-
- :returns: EncapsulationPrioritiesType object
-
- """
- return getattr(self, '_encapsulation_priorities', None)
- #end encapsulation_priorities
-
- @encapsulation_priorities.setter
- def encapsulation_priorities(self, encapsulation_priorities):
- """Set encapsulation-priorities for global-vrouter-config.
-
- :param encapsulation_priorities: EncapsulationPrioritiesType object
-
- """
- self._encapsulation_priorities = encapsulation_priorities
- #end encapsulation_priorities
-
- def set_encapsulation_priorities(self, value):
- self.encapsulation_priorities = value
- #end set_encapsulation_priorities
-
- def get_encapsulation_priorities(self):
- return self.encapsulation_priorities
- #end get_encapsulation_priorities
-
- @property
- def vxlan_network_identifier_mode(self):
- """Get vxlan-network-identifier-mode for global-vrouter-config.
-
- :returns: VxlanNetworkIdentifierModeType object
-
- """
- return getattr(self, '_vxlan_network_identifier_mode', None)
- #end vxlan_network_identifier_mode
-
- @vxlan_network_identifier_mode.setter
- def vxlan_network_identifier_mode(self, vxlan_network_identifier_mode):
- """Set vxlan-network-identifier-mode for global-vrouter-config.
-
- :param vxlan_network_identifier_mode: VxlanNetworkIdentifierModeType object
-
- """
- self._vxlan_network_identifier_mode = vxlan_network_identifier_mode
- #end vxlan_network_identifier_mode
-
- def set_vxlan_network_identifier_mode(self, value):
- self.vxlan_network_identifier_mode = value
- #end set_vxlan_network_identifier_mode
-
- def get_vxlan_network_identifier_mode(self):
- return self.vxlan_network_identifier_mode
- #end get_vxlan_network_identifier_mode
-
- @property
- def id_perms(self):
- """Get id-perms for global-vrouter-config.
-
- :returns: IdPermsType object
-
- """
- return getattr(self, '_id_perms', None)
- #end id_perms
-
- @id_perms.setter
- def id_perms(self, id_perms):
- """Set id-perms for global-vrouter-config.
-
- :param id_perms: IdPermsType object
-
- """
- self._id_perms = id_perms
- #end id_perms
-
- def set_id_perms(self, value):
- self.id_perms = value
- #end set_id_perms
-
- def get_id_perms(self):
- return self.id_perms
- #end get_id_perms
-
- @property
- def display_name(self):
- """Get display-name for global-vrouter-config.
-
- :returns: xsd:string object
-
- """
- return getattr(self, '_display_name', None)
- #end display_name
-
- @display_name.setter
- def display_name(self, display_name):
- """Set display-name for global-vrouter-config.
-
- :param display_name: xsd:string object
-
- """
- self._display_name = display_name
- #end display_name
-
- def set_display_name(self, value):
- self.display_name = value
- #end set_display_name
-
- def get_display_name(self):
- return self.display_name
- #end get_display_name
-
- def _serialize_field_to_json(self, serialized, fields_to_serialize, field_name):
- if fields_to_serialize is None: # all fields are serialized
- serialized[field_name] = getattr(self, field_name)
- elif field_name in fields_to_serialize:
- serialized[field_name] = getattr(self, field_name)
- #end _serialize_field_to_json
-
- def serialize_to_json(self, field_names = None):
- serialized = {}
-
- # serialize common fields
- self._serialize_field_to_json(serialized, ['uuid'], 'uuid')
- self._serialize_field_to_json(serialized, field_names, 'fq_name')
- if hasattr(self, 'parent_type'):
- self._serialize_field_to_json(serialized, field_names, 'parent_type')
-
- # serialize property fields
- if hasattr(self, '_linklocal_services'):
- self._serialize_field_to_json(serialized, field_names, 'linklocal_services')
- if hasattr(self, '_encapsulation_priorities'):
- self._serialize_field_to_json(serialized, field_names, 'encapsulation_priorities')
- if hasattr(self, '_vxlan_network_identifier_mode'):
- self._serialize_field_to_json(serialized, field_names, 'vxlan_network_identifier_mode')
- if hasattr(self, '_id_perms'):
- self._serialize_field_to_json(serialized, field_names, 'id_perms')
- if hasattr(self, '_display_name'):
- self._serialize_field_to_json(serialized, field_names, 'display_name')
-
- # serialize reference fields
- return serialized
- #end serialize_to_json
-
- def get_global_system_config_back_refs(self):
- """Return list of all global-system-configs using this global-vrouter-config"""
- return getattr(self, 'global_system_config_back_refs', None)
- #end get_global_system_config_back_refs
-
- def dump(self):
- """Display global-vrouter-config object in compact form."""
- print '------------ global-vrouter-config ------------'
- print 'Name = ', self.get_fq_name()
- print 'Uuid = ', self.uuid
- if hasattr(self, 'parent_type'): # non config-root children
- print 'Parent Type = ', self.parent_type
- print 'P linklocal_services = ', self.get_linklocal_services()
- print 'P encapsulation_priorities = ', self.get_encapsulation_priorities()
- print 'P vxlan_network_identifier_mode = ', self.get_vxlan_network_identifier_mode()
- print 'P id_perms = ', self.get_id_perms()
- print 'P display_name = ', self.get_display_name()
- #end dump
-
-#end class GlobalVrouterConfig
-
-class InstanceIp(object):
- """
- Represents instance-ip configuration representation.
-
- Properties:
- * instance-ip-address (IpAddressType type)
- * instance-ip-family (IpAddressFamilyType type)
- * instance-ip-mode (AddressMode type)
- * subnet-uuid (xsd:string type)
- * id-perms (:class:`.IdPermsType` type)
- * display-name (xsd:string type)
-
- Children:
-
- References to:
- * list of :class:`.VirtualNetwork` objects
- * list of :class:`.VirtualMachineInterface` objects
-
- Referred by:
- """
-
- prop_fields = set([u'instance_ip_address', u'instance_ip_family', u'instance_ip_mode', u'subnet_uuid', u'id_perms', u'display_name'])
- ref_fields = set([u'virtual_network_refs', 'virtual_machine_interface_refs'])
- backref_fields = set([])
- children_fields = set([])
-
- def __init__(self, name = None, instance_ip_address = None, instance_ip_family = None, instance_ip_mode = None, subnet_uuid = None, id_perms = None, display_name = None, *args, **kwargs):
- # type-independent fields
- self._type = 'instance-ip'
- if not name:
- name = u'default-instance-ip'
- self.name = name
- self._uuid = None
- self.fq_name = [name]
-
- # property fields
- if instance_ip_address:
- self._instance_ip_address = instance_ip_address
- if instance_ip_family:
- self._instance_ip_family = instance_ip_family
- if instance_ip_mode:
- self._instance_ip_mode = instance_ip_mode
- if subnet_uuid:
- self._subnet_uuid = subnet_uuid
- if id_perms:
- self._id_perms = id_perms
- if display_name:
- self._display_name = display_name
- #end __init__
-
- def get_type(self):
- """Return object type (instance-ip)."""
- return self._type
- #end get_type
-
- def get_fq_name(self):
- """Return FQN of instance-ip in list form."""
- return self.fq_name
- #end get_fq_name
-
- def get_fq_name_str(self):
- """Return FQN of instance-ip as colon delimited string."""
- return ':'.join(self.fq_name)
- #end get_fq_name_str
-
- @property
- def uuid(self):
- return getattr(self, '_uuid', None)
- #end uuid
-
- @uuid.setter
- def uuid(self, uuid_val):
- self._uuid = uuid_val
- #end uuid
-
- def set_uuid(self, uuid_val):
- self.uuid = uuid_val
- #end set_uuid
-
- def get_uuid(self):
- return self.uuid
- #end get_uuid
-
- @property
- def instance_ip_address(self):
- """Get instance-ip-address for instance-ip.
-
- :returns: IpAddressType object
-
- """
- return getattr(self, '_instance_ip_address', None)
- #end instance_ip_address
-
- @instance_ip_address.setter
- def instance_ip_address(self, instance_ip_address):
- """Set instance-ip-address for instance-ip.
-
- :param instance_ip_address: IpAddressType object
-
- """
- self._instance_ip_address = instance_ip_address
- #end instance_ip_address
-
- def set_instance_ip_address(self, value):
- self.instance_ip_address = value
- #end set_instance_ip_address
-
- def get_instance_ip_address(self):
- return self.instance_ip_address
- #end get_instance_ip_address
-
- @property
- def instance_ip_family(self):
- """Get instance-ip-family for instance-ip.
-
- :returns: IpAddressFamilyType object
-
- """
- return getattr(self, '_instance_ip_family', None)
- #end instance_ip_family
-
- @instance_ip_family.setter
- def instance_ip_family(self, instance_ip_family):
- """Set instance-ip-family for instance-ip.
-
- :param instance_ip_family: IpAddressFamilyType object
-
- """
- self._instance_ip_family = instance_ip_family
- #end instance_ip_family
-
- def set_instance_ip_family(self, value):
- self.instance_ip_family = value
- #end set_instance_ip_family
-
- def get_instance_ip_family(self):
- return self.instance_ip_family
- #end get_instance_ip_family
-
- @property
- def instance_ip_mode(self):
- """Get instance-ip-mode for instance-ip.
-
- :returns: AddressMode object
-
- """
- return getattr(self, '_instance_ip_mode', None)
- #end instance_ip_mode
-
- @instance_ip_mode.setter
- def instance_ip_mode(self, instance_ip_mode):
- """Set instance-ip-mode for instance-ip.
-
- :param instance_ip_mode: AddressMode object
-
- """
- self._instance_ip_mode = instance_ip_mode
- #end instance_ip_mode
-
- def set_instance_ip_mode(self, value):
- self.instance_ip_mode = value
- #end set_instance_ip_mode
-
- def get_instance_ip_mode(self):
- return self.instance_ip_mode
- #end get_instance_ip_mode
-
- @property
- def subnet_uuid(self):
- """Get subnet-uuid for instance-ip.
-
- :returns: xsd:string object
-
- """
- return getattr(self, '_subnet_uuid', None)
- #end subnet_uuid
-
- @subnet_uuid.setter
- def subnet_uuid(self, subnet_uuid):
- """Set subnet-uuid for instance-ip.
-
- :param subnet_uuid: xsd:string object
-
- """
- self._subnet_uuid = subnet_uuid
- #end subnet_uuid
-
- def set_subnet_uuid(self, value):
- self.subnet_uuid = value
- #end set_subnet_uuid
-
- def get_subnet_uuid(self):
- return self.subnet_uuid
- #end get_subnet_uuid
-
- @property
- def id_perms(self):
- """Get id-perms for instance-ip.
-
- :returns: IdPermsType object
-
- """
- return getattr(self, '_id_perms', None)
- #end id_perms
-
- @id_perms.setter
- def id_perms(self, id_perms):
- """Set id-perms for instance-ip.
-
- :param id_perms: IdPermsType object
-
- """
- self._id_perms = id_perms
- #end id_perms
-
- def set_id_perms(self, value):
- self.id_perms = value
- #end set_id_perms
-
- def get_id_perms(self):
- return self.id_perms
- #end get_id_perms
-
- @property
- def display_name(self):
- """Get display-name for instance-ip.
-
- :returns: xsd:string object
-
- """
- return getattr(self, '_display_name', None)
- #end display_name
-
- @display_name.setter
- def display_name(self, display_name):
- """Set display-name for instance-ip.
-
- :param display_name: xsd:string object
-
- """
- self._display_name = display_name
- #end display_name
-
- def set_display_name(self, value):
- self.display_name = value
- #end set_display_name
-
- def get_display_name(self):
- return self.display_name
- #end get_display_name
-
- def _serialize_field_to_json(self, serialized, fields_to_serialize, field_name):
- if fields_to_serialize is None: # all fields are serialized
- serialized[field_name] = getattr(self, field_name)
- elif field_name in fields_to_serialize:
- serialized[field_name] = getattr(self, field_name)
- #end _serialize_field_to_json
-
- def serialize_to_json(self, field_names = None):
- serialized = {}
-
- # serialize common fields
- self._serialize_field_to_json(serialized, ['uuid'], 'uuid')
- self._serialize_field_to_json(serialized, field_names, 'fq_name')
- if hasattr(self, 'parent_type'):
- self._serialize_field_to_json(serialized, field_names, 'parent_type')
-
- # serialize property fields
- if hasattr(self, '_instance_ip_address'):
- self._serialize_field_to_json(serialized, field_names, 'instance_ip_address')
- if hasattr(self, '_instance_ip_family'):
- self._serialize_field_to_json(serialized, field_names, 'instance_ip_family')
- if hasattr(self, '_instance_ip_mode'):
- self._serialize_field_to_json(serialized, field_names, 'instance_ip_mode')
- if hasattr(self, '_subnet_uuid'):
- self._serialize_field_to_json(serialized, field_names, 'subnet_uuid')
- if hasattr(self, '_id_perms'):
- self._serialize_field_to_json(serialized, field_names, 'id_perms')
- if hasattr(self, '_display_name'):
- self._serialize_field_to_json(serialized, field_names, 'display_name')
-
- # serialize reference fields
- if hasattr(self, 'virtual_network_refs'):
- self._serialize_field_to_json(serialized, field_names, 'virtual_network_refs')
- if hasattr(self, 'virtual_machine_interface_refs'):
- self._serialize_field_to_json(serialized, field_names, 'virtual_machine_interface_refs')
- return serialized
- #end serialize_to_json
-
- def set_virtual_network(self, ref_obj):
- """Set virtual-network for instance-ip.
-
- :param ref_obj: VirtualNetwork object
-
- """
- self.virtual_network_refs = [{'to':ref_obj.get_fq_name()}]
- if ref_obj.uuid:
- self.virtual_network_refs[0]['uuid'] = ref_obj.uuid
-
- #end set_virtual_network
-
- def add_virtual_network(self, ref_obj):
- """Add virtual-network to instance-ip.
-
- :param ref_obj: VirtualNetwork object
-
- """
- refs = getattr(self, 'virtual_network_refs', [])
- if not refs:
- self.virtual_network_refs = []
-
- # if ref already exists, update any attr with it
- for ref in refs:
- if ref['to'] == ref_obj.get_fq_name():
- ref = {'to':ref_obj.get_fq_name()}
- if ref_obj.uuid:
- ref['uuid'] = ref_obj.uuid
- return
-
- # ref didn't exist before
- ref_info = {'to':ref_obj.get_fq_name()}
- if ref_obj.uuid:
- ref_info['uuid'] = ref_obj.uuid
-
- self.virtual_network_refs.append(ref_info)
- #end add_virtual_network
-
- def del_virtual_network(self, ref_obj):
- refs = self.get_virtual_network_refs()
- if not refs:
- return
-
- for ref in refs:
- if ref['to'] == ref_obj.get_fq_name():
- self.virtual_network_refs.remove(ref)
- return
- #end del_virtual_network
-
- def set_virtual_network_list(self, ref_obj_list):
- """Set virtual-network list for instance-ip.
-
- :param ref_obj_list: list of VirtualNetwork object
-
- """
- self.virtual_network_refs = ref_obj_list
- #end set_virtual_network_list
-
- def get_virtual_network_refs(self):
- """Return virtual-network list for instance-ip.
-
- :returns: list of <VirtualNetwork>
-
- """
- return getattr(self, 'virtual_network_refs', None)
- #end get_virtual_network_refs
-
- def set_virtual_machine_interface(self, ref_obj):
- """Set virtual-machine-interface for instance-ip.
-
- :param ref_obj: VirtualMachineInterface object
-
- """
- self.virtual_machine_interface_refs = [{'to':ref_obj.get_fq_name()}]
- if ref_obj.uuid:
- self.virtual_machine_interface_refs[0]['uuid'] = ref_obj.uuid
-
- #end set_virtual_machine_interface
-
- def add_virtual_machine_interface(self, ref_obj):
- """Add virtual-machine-interface to instance-ip.
-
- :param ref_obj: VirtualMachineInterface object
-
- """
- refs = getattr(self, 'virtual_machine_interface_refs', [])
- if not refs:
- self.virtual_machine_interface_refs = []
-
- # if ref already exists, update any attr with it
- for ref in refs:
- if ref['to'] == ref_obj.get_fq_name():
- ref = {'to':ref_obj.get_fq_name()}
- if ref_obj.uuid:
- ref['uuid'] = ref_obj.uuid
- return
-
- # ref didn't exist before
- ref_info = {'to':ref_obj.get_fq_name()}
- if ref_obj.uuid:
- ref_info['uuid'] = ref_obj.uuid
-
- self.virtual_machine_interface_refs.append(ref_info)
- #end add_virtual_machine_interface
-
- def del_virtual_machine_interface(self, ref_obj):
- refs = self.get_virtual_machine_interface_refs()
- if not refs:
- return
-
- for ref in refs:
- if ref['to'] == ref_obj.get_fq_name():
- self.virtual_machine_interface_refs.remove(ref)
- return
- #end del_virtual_machine_interface
-
- def set_virtual_machine_interface_list(self, ref_obj_list):
- """Set virtual-machine-interface list for instance-ip.
-
- :param ref_obj_list: list of VirtualMachineInterface object
-
- """
- self.virtual_machine_interface_refs = ref_obj_list
- #end set_virtual_machine_interface_list
-
- def get_virtual_machine_interface_refs(self):
- """Return virtual-machine-interface list for instance-ip.
-
- :returns: list of <VirtualMachineInterface>
-
- """
- return getattr(self, 'virtual_machine_interface_refs', None)
- #end get_virtual_machine_interface_refs
-
- def dump(self):
- """Display instance-ip object in compact form."""
- print '------------ instance-ip ------------'
- print 'Name = ', self.get_fq_name()
- print 'Uuid = ', self.uuid
- print 'P instance_ip_address = ', self.get_instance_ip_address()
- print 'P instance_ip_family = ', self.get_instance_ip_family()
- print 'P instance_ip_mode = ', self.get_instance_ip_mode()
- print 'P subnet_uuid = ', self.get_subnet_uuid()
- print 'P id_perms = ', self.get_id_perms()
- print 'P display_name = ', self.get_display_name()
- print 'REF virtual_network = ', self.get_virtual_network_refs()
- print 'REF virtual_machine_interface = ', self.get_virtual_machine_interface_refs()
- #end dump
-
-#end class InstanceIp
-
-class NetworkPolicy(object):
- """
- Represents network-policy configuration representation.
-
- Child of:
- :class:`.Project` object OR
-
- Properties:
- * network-policy-entries (:class:`.PolicyEntriesType` type)
- * id-perms (:class:`.IdPermsType` type)
- * display-name (xsd:string type)
-
- Children:
-
- References to:
-
- Referred by:
- * list of :class:`.VirtualNetwork` objects
- """
-
- prop_fields = set([u'network_policy_entries', u'id_perms', u'display_name'])
- ref_fields = set([])
- backref_fields = set([u'project_back_refs', u'virtual_network_back_refs'])
- children_fields = set([])
-
- def __init__(self, name = None, parent_obj = None, network_policy_entries = None, id_perms = None, display_name = None, *args, **kwargs):
- # type-independent fields
- self._type = 'network-policy'
- if not name:
- name = u'default-network-policy'
- self.name = name
- self._uuid = None
- # Determine parent type and fq_name
- kwargs_parent_type = kwargs.get('parent_type', None)
- kwargs_fq_name = kwargs.get('fq_name', None)
- if parent_obj:
- self.parent_type = parent_obj._type
- # copy parent's fq_name
- self.fq_name = list(parent_obj.fq_name)
- self.fq_name.append(name)
- elif kwargs_parent_type and kwargs_fq_name:
- self.parent_type = kwargs_parent_type
- self.fq_name = kwargs_fq_name
- else: # No parent obj specified
- self.parent_type = 'project'
- self.fq_name = [u'default-domain', u'default-project']
- self.fq_name.append(name)
-
-
- # property fields
- if network_policy_entries:
- self._network_policy_entries = network_policy_entries
- if id_perms:
- self._id_perms = id_perms
- if display_name:
- self._display_name = display_name
- #end __init__
-
- def get_type(self):
- """Return object type (network-policy)."""
- return self._type
- #end get_type
-
- def get_fq_name(self):
- """Return FQN of network-policy in list form."""
- return self.fq_name
- #end get_fq_name
-
- def get_fq_name_str(self):
- """Return FQN of network-policy as colon delimited string."""
- return ':'.join(self.fq_name)
- #end get_fq_name_str
-
- @property
- def parent_name(self):
- return self.fq_name[:-1][-1]
- #end parent_name
-
- def get_parent_fq_name(self):
- """Return FQN of network-policy's parent in list form."""
- if not hasattr(self, 'parent_type'):
- # child of config-root
- return None
-
- return self.fq_name[:-1]
- #end get_parent_fq_name
-
- def get_parent_fq_name_str(self):
- """Return FQN of network-policy's parent as colon delimted string."""
- if not hasattr(self, 'parent_type'):
- # child of config-root
- return None
-
- return ':'.join(self.fq_name[:-1])
- #end get_parent_fq_name_str
-
- @property
- def uuid(self):
- return getattr(self, '_uuid', None)
- #end uuid
-
- @uuid.setter
- def uuid(self, uuid_val):
- self._uuid = uuid_val
- #end uuid
-
- def set_uuid(self, uuid_val):
- self.uuid = uuid_val
- #end set_uuid
-
- def get_uuid(self):
- return self.uuid
- #end get_uuid
-
- @property
- def network_policy_entries(self):
- """Get network-policy-entries for network-policy.
-
- :returns: PolicyEntriesType object
-
- """
- return getattr(self, '_network_policy_entries', None)
- #end network_policy_entries
-
- @network_policy_entries.setter
- def network_policy_entries(self, network_policy_entries):
- """Set network-policy-entries for network-policy.
-
- :param network_policy_entries: PolicyEntriesType object
-
- """
- self._network_policy_entries = network_policy_entries
- #end network_policy_entries
-
- def set_network_policy_entries(self, value):
- self.network_policy_entries = value
- #end set_network_policy_entries
-
- def get_network_policy_entries(self):
- return self.network_policy_entries
- #end get_network_policy_entries
-
- @property
- def id_perms(self):
- """Get id-perms for network-policy.
-
- :returns: IdPermsType object
-
- """
- return getattr(self, '_id_perms', None)
- #end id_perms
-
- @id_perms.setter
- def id_perms(self, id_perms):
- """Set id-perms for network-policy.
-
- :param id_perms: IdPermsType object
-
- """
- self._id_perms = id_perms
- #end id_perms
-
- def set_id_perms(self, value):
- self.id_perms = value
- #end set_id_perms
-
- def get_id_perms(self):
- return self.id_perms
- #end get_id_perms
-
- @property
- def display_name(self):
- """Get display-name for network-policy.
-
- :returns: xsd:string object
-
- """
- return getattr(self, '_display_name', None)
- #end display_name
-
- @display_name.setter
- def display_name(self, display_name):
- """Set display-name for network-policy.
-
- :param display_name: xsd:string object
-
- """
- self._display_name = display_name
- #end display_name
-
- def set_display_name(self, value):
- self.display_name = value
- #end set_display_name
-
- def get_display_name(self):
- return self.display_name
- #end get_display_name
-
- def _serialize_field_to_json(self, serialized, fields_to_serialize, field_name):
- if fields_to_serialize is None: # all fields are serialized
- serialized[field_name] = getattr(self, field_name)
- elif field_name in fields_to_serialize:
- serialized[field_name] = getattr(self, field_name)
- #end _serialize_field_to_json
-
- def serialize_to_json(self, field_names = None):
- serialized = {}
-
- # serialize common fields
- self._serialize_field_to_json(serialized, ['uuid'], 'uuid')
- self._serialize_field_to_json(serialized, field_names, 'fq_name')
- if hasattr(self, 'parent_type'):
- self._serialize_field_to_json(serialized, field_names, 'parent_type')
-
- # serialize property fields
- if hasattr(self, '_network_policy_entries'):
- self._serialize_field_to_json(serialized, field_names, 'network_policy_entries')
- if hasattr(self, '_id_perms'):
- self._serialize_field_to_json(serialized, field_names, 'id_perms')
- if hasattr(self, '_display_name'):
- self._serialize_field_to_json(serialized, field_names, 'display_name')
-
- # serialize reference fields
- return serialized
- #end serialize_to_json
-
- def get_project_back_refs(self):
- """Return list of all projects using this network-policy"""
- return getattr(self, 'project_back_refs', None)
- #end get_project_back_refs
-
- def get_virtual_network_back_refs(self):
- """Return list of all virtual-networks using this network-policy"""
- return getattr(self, 'virtual_network_back_refs', None)
- #end get_virtual_network_back_refs
-
- def dump(self):
- """Display network-policy object in compact form."""
- print '------------ network-policy ------------'
- print 'Name = ', self.get_fq_name()
- print 'Uuid = ', self.uuid
- if hasattr(self, 'parent_type'): # non config-root children
- print 'Parent Type = ', self.parent_type
- print 'P network_policy_entries = ', self.get_network_policy_entries()
- print 'P id_perms = ', self.get_id_perms()
- print 'P display_name = ', self.get_display_name()
- print 'BCK virtual_network = ', self.get_virtual_network_back_refs()
- #end dump
-
-#end class NetworkPolicy
-
-class LoadbalancerPool(object):
- """
- Represents loadbalancer-pool configuration representation.
-
- Child of:
- :class:`.Project` object OR
-
- Properties:
- * loadbalancer-pool-properties (:class:`.LoadbalancerPoolType` type)
- * loadbalancer-pool-provider (xsd:string type)
- * id-perms (:class:`.IdPermsType` type)
- * display-name (xsd:string type)
-
- Children:
- * list of :class:`.LoadbalancerMember` objects
-
- References to:
- * list of :class:`.ServiceInstance` objects
- * list of :class:`.VirtualMachineInterface` objects
- * list of :class:`.ServiceApplianceSet` objects
- * list of :class:`.LoadbalancerHealthmonitor` objects
-
- Referred by:
- * list of :class:`.VirtualIp` objects
- """
-
- prop_fields = set([u'loadbalancer_pool_properties', u'loadbalancer_pool_provider', u'id_perms', u'display_name'])
- ref_fields = set([u'service_instance_refs', 'virtual_machine_interface_refs', u'service_appliance_set_refs', u'loadbalancer_healthmonitor_refs'])
- backref_fields = set([u'project_back_refs', u'virtual_ip_back_refs'])
- children_fields = set([u'loadbalancer_members'])
-
- def __init__(self, name = None, parent_obj = None, loadbalancer_pool_properties = None, loadbalancer_pool_provider = None, id_perms = None, display_name = None, *args, **kwargs):
- # type-independent fields
- self._type = 'loadbalancer-pool'
- if not name:
- name = u'default-loadbalancer-pool'
- self.name = name
- self._uuid = None
- # Determine parent type and fq_name
- kwargs_parent_type = kwargs.get('parent_type', None)
- kwargs_fq_name = kwargs.get('fq_name', None)
- if parent_obj:
- self.parent_type = parent_obj._type
- # copy parent's fq_name
- self.fq_name = list(parent_obj.fq_name)
- self.fq_name.append(name)
- elif kwargs_parent_type and kwargs_fq_name:
- self.parent_type = kwargs_parent_type
- self.fq_name = kwargs_fq_name
- else: # No parent obj specified
- self.parent_type = 'project'
- self.fq_name = [u'default-domain', u'default-project']
- self.fq_name.append(name)
-
-
- # property fields
- if loadbalancer_pool_properties:
- self._loadbalancer_pool_properties = loadbalancer_pool_properties
- if loadbalancer_pool_provider:
- self._loadbalancer_pool_provider = loadbalancer_pool_provider
- if id_perms:
- self._id_perms = id_perms
- if display_name:
- self._display_name = display_name
- #end __init__
-
- def get_type(self):
- """Return object type (loadbalancer-pool)."""
- return self._type
- #end get_type
-
- def get_fq_name(self):
- """Return FQN of loadbalancer-pool in list form."""
- return self.fq_name
- #end get_fq_name
-
- def get_fq_name_str(self):
- """Return FQN of loadbalancer-pool as colon delimited string."""
- return ':'.join(self.fq_name)
- #end get_fq_name_str
-
- @property
- def parent_name(self):
- return self.fq_name[:-1][-1]
- #end parent_name
-
- def get_parent_fq_name(self):
- """Return FQN of loadbalancer-pool's parent in list form."""
- if not hasattr(self, 'parent_type'):
- # child of config-root
- return None
-
- return self.fq_name[:-1]
- #end get_parent_fq_name
-
- def get_parent_fq_name_str(self):
- """Return FQN of loadbalancer-pool's parent as colon delimted string."""
- if not hasattr(self, 'parent_type'):
- # child of config-root
- return None
-
- return ':'.join(self.fq_name[:-1])
- #end get_parent_fq_name_str
-
- @property
- def uuid(self):
- return getattr(self, '_uuid', None)
- #end uuid
-
- @uuid.setter
- def uuid(self, uuid_val):
- self._uuid = uuid_val
- #end uuid
-
- def set_uuid(self, uuid_val):
- self.uuid = uuid_val
- #end set_uuid
-
- def get_uuid(self):
- return self.uuid
- #end get_uuid
-
- @property
- def loadbalancer_pool_properties(self):
- """Get loadbalancer-pool-properties for loadbalancer-pool.
-
- :returns: LoadbalancerPoolType object
-
- """
- return getattr(self, '_loadbalancer_pool_properties', None)
- #end loadbalancer_pool_properties
-
- @loadbalancer_pool_properties.setter
- def loadbalancer_pool_properties(self, loadbalancer_pool_properties):
- """Set loadbalancer-pool-properties for loadbalancer-pool.
-
- :param loadbalancer_pool_properties: LoadbalancerPoolType object
-
- """
- self._loadbalancer_pool_properties = loadbalancer_pool_properties
- #end loadbalancer_pool_properties
-
- def set_loadbalancer_pool_properties(self, value):
- self.loadbalancer_pool_properties = value
- #end set_loadbalancer_pool_properties
-
- def get_loadbalancer_pool_properties(self):
- return self.loadbalancer_pool_properties
- #end get_loadbalancer_pool_properties
-
- @property
- def loadbalancer_pool_provider(self):
- """Get loadbalancer-pool-provider for loadbalancer-pool.
-
- :returns: xsd:string object
-
- """
- return getattr(self, '_loadbalancer_pool_provider', None)
- #end loadbalancer_pool_provider
-
- @loadbalancer_pool_provider.setter
- def loadbalancer_pool_provider(self, loadbalancer_pool_provider):
- """Set loadbalancer-pool-provider for loadbalancer-pool.
-
- :param loadbalancer_pool_provider: xsd:string object
-
- """
- self._loadbalancer_pool_provider = loadbalancer_pool_provider
- #end loadbalancer_pool_provider
-
- def set_loadbalancer_pool_provider(self, value):
- self.loadbalancer_pool_provider = value
- #end set_loadbalancer_pool_provider
-
- def get_loadbalancer_pool_provider(self):
- return self.loadbalancer_pool_provider
- #end get_loadbalancer_pool_provider
-
- @property
- def id_perms(self):
- """Get id-perms for loadbalancer-pool.
-
- :returns: IdPermsType object
-
- """
- return getattr(self, '_id_perms', None)
- #end id_perms
-
- @id_perms.setter
- def id_perms(self, id_perms):
- """Set id-perms for loadbalancer-pool.
-
- :param id_perms: IdPermsType object
-
- """
- self._id_perms = id_perms
- #end id_perms
-
- def set_id_perms(self, value):
- self.id_perms = value
- #end set_id_perms
-
- def get_id_perms(self):
- return self.id_perms
- #end get_id_perms
-
- @property
- def display_name(self):
- """Get display-name for loadbalancer-pool.
-
- :returns: xsd:string object
-
- """
- return getattr(self, '_display_name', None)
- #end display_name
-
- @display_name.setter
- def display_name(self, display_name):
- """Set display-name for loadbalancer-pool.
-
- :param display_name: xsd:string object
-
- """
- self._display_name = display_name
- #end display_name
-
- def set_display_name(self, value):
- self.display_name = value
- #end set_display_name
-
- def get_display_name(self):
- return self.display_name
- #end get_display_name
-
- def _serialize_field_to_json(self, serialized, fields_to_serialize, field_name):
- if fields_to_serialize is None: # all fields are serialized
- serialized[field_name] = getattr(self, field_name)
- elif field_name in fields_to_serialize:
- serialized[field_name] = getattr(self, field_name)
- #end _serialize_field_to_json
-
- def serialize_to_json(self, field_names = None):
- serialized = {}
-
- # serialize common fields
- self._serialize_field_to_json(serialized, ['uuid'], 'uuid')
- self._serialize_field_to_json(serialized, field_names, 'fq_name')
- if hasattr(self, 'parent_type'):
- self._serialize_field_to_json(serialized, field_names, 'parent_type')
-
- # serialize property fields
- if hasattr(self, '_loadbalancer_pool_properties'):
- self._serialize_field_to_json(serialized, field_names, 'loadbalancer_pool_properties')
- if hasattr(self, '_loadbalancer_pool_provider'):
- self._serialize_field_to_json(serialized, field_names, 'loadbalancer_pool_provider')
- if hasattr(self, '_id_perms'):
- self._serialize_field_to_json(serialized, field_names, 'id_perms')
- if hasattr(self, '_display_name'):
- self._serialize_field_to_json(serialized, field_names, 'display_name')
-
- # serialize reference fields
- if hasattr(self, 'service_instance_refs'):
- self._serialize_field_to_json(serialized, field_names, 'service_instance_refs')
- if hasattr(self, 'virtual_machine_interface_refs'):
- self._serialize_field_to_json(serialized, field_names, 'virtual_machine_interface_refs')
- if hasattr(self, 'service_appliance_set_refs'):
- self._serialize_field_to_json(serialized, field_names, 'service_appliance_set_refs')
- if hasattr(self, 'loadbalancer_healthmonitor_refs'):
- self._serialize_field_to_json(serialized, field_names, 'loadbalancer_healthmonitor_refs')
- return serialized
- #end serialize_to_json
-
- def get_loadbalancer_members(self):
- return getattr(self, 'loadbalancer_members', None)
- #end get_loadbalancer_members
-
- def set_service_instance(self, ref_obj):
- """Set service-instance for loadbalancer-pool.
-
- :param ref_obj: ServiceInstance object
-
- """
- self.service_instance_refs = [{'to':ref_obj.get_fq_name()}]
- if ref_obj.uuid:
- self.service_instance_refs[0]['uuid'] = ref_obj.uuid
-
- #end set_service_instance
-
- def add_service_instance(self, ref_obj):
- """Add service-instance to loadbalancer-pool.
-
- :param ref_obj: ServiceInstance object
-
- """
- refs = getattr(self, 'service_instance_refs', [])
- if not refs:
- self.service_instance_refs = []
-
- # if ref already exists, update any attr with it
- for ref in refs:
- if ref['to'] == ref_obj.get_fq_name():
- ref = {'to':ref_obj.get_fq_name()}
- if ref_obj.uuid:
- ref['uuid'] = ref_obj.uuid
- return
-
- # ref didn't exist before
- ref_info = {'to':ref_obj.get_fq_name()}
- if ref_obj.uuid:
- ref_info['uuid'] = ref_obj.uuid
-
- self.service_instance_refs.append(ref_info)
- #end add_service_instance
-
- def del_service_instance(self, ref_obj):
- refs = self.get_service_instance_refs()
- if not refs:
- return
-
- for ref in refs:
- if ref['to'] == ref_obj.get_fq_name():
- self.service_instance_refs.remove(ref)
- return
- #end del_service_instance
-
- def set_service_instance_list(self, ref_obj_list):
- """Set service-instance list for loadbalancer-pool.
-
- :param ref_obj_list: list of ServiceInstance object
-
- """
- self.service_instance_refs = ref_obj_list
- #end set_service_instance_list
-
- def get_service_instance_refs(self):
- """Return service-instance list for loadbalancer-pool.
-
- :returns: list of <ServiceInstance>
-
- """
- return getattr(self, 'service_instance_refs', None)
- #end get_service_instance_refs
-
- def set_virtual_machine_interface(self, ref_obj):
- """Set virtual-machine-interface for loadbalancer-pool.
-
- :param ref_obj: VirtualMachineInterface object
-
- """
- self.virtual_machine_interface_refs = [{'to':ref_obj.get_fq_name()}]
- if ref_obj.uuid:
- self.virtual_machine_interface_refs[0]['uuid'] = ref_obj.uuid
-
- #end set_virtual_machine_interface
-
- def add_virtual_machine_interface(self, ref_obj):
- """Add virtual-machine-interface to loadbalancer-pool.
-
- :param ref_obj: VirtualMachineInterface object
-
- """
- refs = getattr(self, 'virtual_machine_interface_refs', [])
- if not refs:
- self.virtual_machine_interface_refs = []
-
- # if ref already exists, update any attr with it
- for ref in refs:
- if ref['to'] == ref_obj.get_fq_name():
- ref = {'to':ref_obj.get_fq_name()}
- if ref_obj.uuid:
- ref['uuid'] = ref_obj.uuid
- return
-
- # ref didn't exist before
- ref_info = {'to':ref_obj.get_fq_name()}
- if ref_obj.uuid:
- ref_info['uuid'] = ref_obj.uuid
-
- self.virtual_machine_interface_refs.append(ref_info)
- #end add_virtual_machine_interface
-
- def del_virtual_machine_interface(self, ref_obj):
- refs = self.get_virtual_machine_interface_refs()
- if not refs:
- return
-
- for ref in refs:
- if ref['to'] == ref_obj.get_fq_name():
- self.virtual_machine_interface_refs.remove(ref)
- return
- #end del_virtual_machine_interface
-
- def set_virtual_machine_interface_list(self, ref_obj_list):
- """Set virtual-machine-interface list for loadbalancer-pool.
-
- :param ref_obj_list: list of VirtualMachineInterface object
-
- """
- self.virtual_machine_interface_refs = ref_obj_list
- #end set_virtual_machine_interface_list
-
- def get_virtual_machine_interface_refs(self):
- """Return virtual-machine-interface list for loadbalancer-pool.
-
- :returns: list of <VirtualMachineInterface>
-
- """
- return getattr(self, 'virtual_machine_interface_refs', None)
- #end get_virtual_machine_interface_refs
-
- def set_service_appliance_set(self, ref_obj):
- """Set service-appliance-set for loadbalancer-pool.
-
- :param ref_obj: ServiceApplianceSet object
-
- """
- self.service_appliance_set_refs = [{'to':ref_obj.get_fq_name()}]
- if ref_obj.uuid:
- self.service_appliance_set_refs[0]['uuid'] = ref_obj.uuid
-
- #end set_service_appliance_set
-
- def add_service_appliance_set(self, ref_obj):
- """Add service-appliance-set to loadbalancer-pool.
-
- :param ref_obj: ServiceApplianceSet object
-
- """
- refs = getattr(self, 'service_appliance_set_refs', [])
- if not refs:
- self.service_appliance_set_refs = []
-
- # if ref already exists, update any attr with it
- for ref in refs:
- if ref['to'] == ref_obj.get_fq_name():
- ref = {'to':ref_obj.get_fq_name()}
- if ref_obj.uuid:
- ref['uuid'] = ref_obj.uuid
- return
-
- # ref didn't exist before
- ref_info = {'to':ref_obj.get_fq_name()}
- if ref_obj.uuid:
- ref_info['uuid'] = ref_obj.uuid
-
- self.service_appliance_set_refs.append(ref_info)
- #end add_service_appliance_set
-
- def del_service_appliance_set(self, ref_obj):
- refs = self.get_service_appliance_set_refs()
- if not refs:
- return
-
- for ref in refs:
- if ref['to'] == ref_obj.get_fq_name():
- self.service_appliance_set_refs.remove(ref)
- return
- #end del_service_appliance_set
-
- def set_service_appliance_set_list(self, ref_obj_list):
- """Set service-appliance-set list for loadbalancer-pool.
-
- :param ref_obj_list: list of ServiceApplianceSet object
-
- """
- self.service_appliance_set_refs = ref_obj_list
- #end set_service_appliance_set_list
-
- def get_service_appliance_set_refs(self):
- """Return service-appliance-set list for loadbalancer-pool.
-
- :returns: list of <ServiceApplianceSet>
-
- """
- return getattr(self, 'service_appliance_set_refs', None)
- #end get_service_appliance_set_refs
-
- def set_loadbalancer_healthmonitor(self, ref_obj):
- """Set loadbalancer-healthmonitor for loadbalancer-pool.
-
- :param ref_obj: LoadbalancerHealthmonitor object
-
- """
- self.loadbalancer_healthmonitor_refs = [{'to':ref_obj.get_fq_name()}]
- if ref_obj.uuid:
- self.loadbalancer_healthmonitor_refs[0]['uuid'] = ref_obj.uuid
-
- #end set_loadbalancer_healthmonitor
-
- def add_loadbalancer_healthmonitor(self, ref_obj):
- """Add loadbalancer-healthmonitor to loadbalancer-pool.
-
- :param ref_obj: LoadbalancerHealthmonitor object
-
- """
- refs = getattr(self, 'loadbalancer_healthmonitor_refs', [])
- if not refs:
- self.loadbalancer_healthmonitor_refs = []
-
- # if ref already exists, update any attr with it
- for ref in refs:
- if ref['to'] == ref_obj.get_fq_name():
- ref = {'to':ref_obj.get_fq_name()}
- if ref_obj.uuid:
- ref['uuid'] = ref_obj.uuid
- return
-
- # ref didn't exist before
- ref_info = {'to':ref_obj.get_fq_name()}
- if ref_obj.uuid:
- ref_info['uuid'] = ref_obj.uuid
-
- self.loadbalancer_healthmonitor_refs.append(ref_info)
- #end add_loadbalancer_healthmonitor
-
- def del_loadbalancer_healthmonitor(self, ref_obj):
- refs = self.get_loadbalancer_healthmonitor_refs()
- if not refs:
- return
-
- for ref in refs:
- if ref['to'] == ref_obj.get_fq_name():
- self.loadbalancer_healthmonitor_refs.remove(ref)
- return
- #end del_loadbalancer_healthmonitor
-
- def set_loadbalancer_healthmonitor_list(self, ref_obj_list):
- """Set loadbalancer-healthmonitor list for loadbalancer-pool.
-
- :param ref_obj_list: list of LoadbalancerHealthmonitor object
-
- """
- self.loadbalancer_healthmonitor_refs = ref_obj_list
- #end set_loadbalancer_healthmonitor_list
-
- def get_loadbalancer_healthmonitor_refs(self):
- """Return loadbalancer-healthmonitor list for loadbalancer-pool.
-
- :returns: list of <LoadbalancerHealthmonitor>
-
- """
- return getattr(self, 'loadbalancer_healthmonitor_refs', None)
- #end get_loadbalancer_healthmonitor_refs
-
- def get_project_back_refs(self):
- """Return list of all projects using this loadbalancer-pool"""
- return getattr(self, 'project_back_refs', None)
- #end get_project_back_refs
-
- def get_virtual_ip_back_refs(self):
- """Return list of all virtual-ips using this loadbalancer-pool"""
- return getattr(self, 'virtual_ip_back_refs', None)
- #end get_virtual_ip_back_refs
-
- def dump(self):
- """Display loadbalancer-pool object in compact form."""
- print '------------ loadbalancer-pool ------------'
- print 'Name = ', self.get_fq_name()
- print 'Uuid = ', self.uuid
- if hasattr(self, 'parent_type'): # non config-root children
- print 'Parent Type = ', self.parent_type
- print 'P loadbalancer_pool_properties = ', self.get_loadbalancer_pool_properties()
- print 'P loadbalancer_pool_provider = ', self.get_loadbalancer_pool_provider()
- print 'P id_perms = ', self.get_id_perms()
- print 'P display_name = ', self.get_display_name()
- print 'REF service_instance = ', self.get_service_instance_refs()
- print 'REF virtual_machine_interface = ', self.get_virtual_machine_interface_refs()
- print 'REF service_appliance_set = ', self.get_service_appliance_set_refs()
- print 'HAS loadbalancer_member = ', self.get_loadbalancer_members()
- print 'REF loadbalancer_healthmonitor = ', self.get_loadbalancer_healthmonitor_refs()
- print 'BCK virtual_ip = ', self.get_virtual_ip_back_refs()
- #end dump
-
-#end class LoadbalancerPool
-
-class VirtualDnsRecord(object):
- """
- Represents virtual-DNS-record configuration representation.
-
- Child of:
- :class:`.VirtualDns` object OR
-
- Properties:
- * virtual-DNS-record-data (:class:`.VirtualDnsRecordType` type)
- * id-perms (:class:`.IdPermsType` type)
- * display-name (xsd:string type)
-
- Children:
-
- References to:
-
- Referred by:
- """
-
- prop_fields = set([u'virtual_DNS_record_data', u'id_perms', u'display_name'])
- ref_fields = set([])
- backref_fields = set([u'virtual_DNS_back_refs'])
- children_fields = set([])
-
- def __init__(self, name = None, parent_obj = None, virtual_DNS_record_data = None, id_perms = None, display_name = None, *args, **kwargs):
- # type-independent fields
- self._type = 'virtual-DNS-record'
- if not name:
- name = u'default-virtual-DNS-record'
- self.name = name
- self._uuid = None
- # Determine parent type and fq_name
- kwargs_parent_type = kwargs.get('parent_type', None)
- kwargs_fq_name = kwargs.get('fq_name', None)
- if parent_obj:
- self.parent_type = parent_obj._type
- # copy parent's fq_name
- self.fq_name = list(parent_obj.fq_name)
- self.fq_name.append(name)
- elif kwargs_parent_type and kwargs_fq_name:
- self.parent_type = kwargs_parent_type
- self.fq_name = kwargs_fq_name
- else: # No parent obj specified
- self.parent_type = 'virtual-DNS'
- self.fq_name = [u'default-domain', u'default-virtual-DNS']
- self.fq_name.append(name)
-
-
- # property fields
- if virtual_DNS_record_data:
- self._virtual_DNS_record_data = virtual_DNS_record_data
- if id_perms:
- self._id_perms = id_perms
- if display_name:
- self._display_name = display_name
- #end __init__
-
- def get_type(self):
- """Return object type (virtual-DNS-record)."""
- return self._type
- #end get_type
-
- def get_fq_name(self):
- """Return FQN of virtual-DNS-record in list form."""
- return self.fq_name
- #end get_fq_name
-
- def get_fq_name_str(self):
- """Return FQN of virtual-DNS-record as colon delimited string."""
- return ':'.join(self.fq_name)
- #end get_fq_name_str
-
- @property
- def parent_name(self):
- return self.fq_name[:-1][-1]
- #end parent_name
-
- def get_parent_fq_name(self):
- """Return FQN of virtual-DNS-record's parent in list form."""
- if not hasattr(self, 'parent_type'):
- # child of config-root
- return None
-
- return self.fq_name[:-1]
- #end get_parent_fq_name
-
- def get_parent_fq_name_str(self):
- """Return FQN of virtual-DNS-record's parent as colon delimted string."""
- if not hasattr(self, 'parent_type'):
- # child of config-root
- return None
-
- return ':'.join(self.fq_name[:-1])
- #end get_parent_fq_name_str
-
- @property
- def uuid(self):
- return getattr(self, '_uuid', None)
- #end uuid
-
- @uuid.setter
- def uuid(self, uuid_val):
- self._uuid = uuid_val
- #end uuid
-
- def set_uuid(self, uuid_val):
- self.uuid = uuid_val
- #end set_uuid
-
- def get_uuid(self):
- return self.uuid
- #end get_uuid
-
- @property
- def virtual_DNS_record_data(self):
- """Get virtual-DNS-record-data for virtual-DNS-record.
-
- :returns: VirtualDnsRecordType object
-
- """
- return getattr(self, '_virtual_DNS_record_data', None)
- #end virtual_DNS_record_data
-
- @virtual_DNS_record_data.setter
- def virtual_DNS_record_data(self, virtual_DNS_record_data):
- """Set virtual-DNS-record-data for virtual-DNS-record.
-
- :param virtual_DNS_record_data: VirtualDnsRecordType object
-
- """
- self._virtual_DNS_record_data = virtual_DNS_record_data
- #end virtual_DNS_record_data
-
- def set_virtual_DNS_record_data(self, value):
- self.virtual_DNS_record_data = value
- #end set_virtual_DNS_record_data
-
- def get_virtual_DNS_record_data(self):
- return self.virtual_DNS_record_data
- #end get_virtual_DNS_record_data
-
- @property
- def id_perms(self):
- """Get id-perms for virtual-DNS-record.
-
- :returns: IdPermsType object
-
- """
- return getattr(self, '_id_perms', None)
- #end id_perms
-
- @id_perms.setter
- def id_perms(self, id_perms):
- """Set id-perms for virtual-DNS-record.
-
- :param id_perms: IdPermsType object
-
- """
- self._id_perms = id_perms
- #end id_perms
-
- def set_id_perms(self, value):
- self.id_perms = value
- #end set_id_perms
-
- def get_id_perms(self):
- return self.id_perms
- #end get_id_perms
-
- @property
- def display_name(self):
- """Get display-name for virtual-DNS-record.
-
- :returns: xsd:string object
-
- """
- return getattr(self, '_display_name', None)
- #end display_name
-
- @display_name.setter
- def display_name(self, display_name):
- """Set display-name for virtual-DNS-record.
-
- :param display_name: xsd:string object
-
- """
- self._display_name = display_name
- #end display_name
-
- def set_display_name(self, value):
- self.display_name = value
- #end set_display_name
-
- def get_display_name(self):
- return self.display_name
- #end get_display_name
-
- def _serialize_field_to_json(self, serialized, fields_to_serialize, field_name):
- if fields_to_serialize is None: # all fields are serialized
- serialized[field_name] = getattr(self, field_name)
- elif field_name in fields_to_serialize:
- serialized[field_name] = getattr(self, field_name)
- #end _serialize_field_to_json
-
- def serialize_to_json(self, field_names = None):
- serialized = {}
-
- # serialize common fields
- self._serialize_field_to_json(serialized, ['uuid'], 'uuid')
- self._serialize_field_to_json(serialized, field_names, 'fq_name')
- if hasattr(self, 'parent_type'):
- self._serialize_field_to_json(serialized, field_names, 'parent_type')
-
- # serialize property fields
- if hasattr(self, '_virtual_DNS_record_data'):
- self._serialize_field_to_json(serialized, field_names, 'virtual_DNS_record_data')
- if hasattr(self, '_id_perms'):
- self._serialize_field_to_json(serialized, field_names, 'id_perms')
- if hasattr(self, '_display_name'):
- self._serialize_field_to_json(serialized, field_names, 'display_name')
-
- # serialize reference fields
- return serialized
- #end serialize_to_json
-
- def get_virtual_DNS_back_refs(self):
- """Return list of all virtual-DNSs using this virtual-DNS-record"""
- return getattr(self, 'virtual_DNS_back_refs', None)
- #end get_virtual_DNS_back_refs
-
- def dump(self):
- """Display virtual-DNS-record object in compact form."""
- print '------------ virtual-DNS-record ------------'
- print 'Name = ', self.get_fq_name()
- print 'Uuid = ', self.uuid
- if hasattr(self, 'parent_type'): # non config-root children
- print 'Parent Type = ', self.parent_type
- print 'P virtual_DNS_record_data = ', self.get_virtual_DNS_record_data()
- print 'P id_perms = ', self.get_id_perms()
- print 'P display_name = ', self.get_display_name()
- #end dump
-
-#end class VirtualDnsRecord
-
-class RouteTarget(object):
- """
- Represents route-target configuration representation.
-
- Properties:
- * id-perms (:class:`.IdPermsType` type)
- * display-name (xsd:string type)
-
- Children:
-
- References to:
-
- Referred by:
- * list of :class:`.LogicalRouter` objects
- * list of :class:`.RoutingInstance` objects
- """
-
- prop_fields = set([u'id_perms', u'display_name'])
- ref_fields = set([])
- backref_fields = set([u'logical_router_back_refs', 'routing_instance_back_refs'])
- children_fields = set([])
-
- def __init__(self, name = None, id_perms = None, display_name = None, *args, **kwargs):
- # type-independent fields
- self._type = 'route-target'
- if not name:
- name = u'default-route-target'
- self.name = name
- self._uuid = None
- self.fq_name = [name]
-
- # property fields
- if id_perms:
- self._id_perms = id_perms
- if display_name:
- self._display_name = display_name
- #end __init__
-
- def get_type(self):
- """Return object type (route-target)."""
- return self._type
- #end get_type
-
- def get_fq_name(self):
- """Return FQN of route-target in list form."""
- return self.fq_name
- #end get_fq_name
-
- def get_fq_name_str(self):
- """Return FQN of route-target as colon delimited string."""
- return ':'.join(self.fq_name)
- #end get_fq_name_str
-
- @property
- def uuid(self):
- return getattr(self, '_uuid', None)
- #end uuid
-
- @uuid.setter
- def uuid(self, uuid_val):
- self._uuid = uuid_val
- #end uuid
-
- def set_uuid(self, uuid_val):
- self.uuid = uuid_val
- #end set_uuid
-
- def get_uuid(self):
- return self.uuid
- #end get_uuid
-
- @property
- def id_perms(self):
- """Get id-perms for route-target.
-
- :returns: IdPermsType object
-
- """
- return getattr(self, '_id_perms', None)
- #end id_perms
-
- @id_perms.setter
- def id_perms(self, id_perms):
- """Set id-perms for route-target.
-
- :param id_perms: IdPermsType object
-
- """
- self._id_perms = id_perms
- #end id_perms
-
- def set_id_perms(self, value):
- self.id_perms = value
- #end set_id_perms
-
- def get_id_perms(self):
- return self.id_perms
- #end get_id_perms
-
- @property
- def display_name(self):
- """Get display-name for route-target.
-
- :returns: xsd:string object
-
- """
- return getattr(self, '_display_name', None)
- #end display_name
-
- @display_name.setter
- def display_name(self, display_name):
- """Set display-name for route-target.
-
- :param display_name: xsd:string object
-
- """
- self._display_name = display_name
- #end display_name
-
- def set_display_name(self, value):
- self.display_name = value
- #end set_display_name
-
- def get_display_name(self):
- return self.display_name
- #end get_display_name
-
- def _serialize_field_to_json(self, serialized, fields_to_serialize, field_name):
- if fields_to_serialize is None: # all fields are serialized
- serialized[field_name] = getattr(self, field_name)
- elif field_name in fields_to_serialize:
- serialized[field_name] = getattr(self, field_name)
- #end _serialize_field_to_json
-
- def serialize_to_json(self, field_names = None):
- serialized = {}
-
- # serialize common fields
- self._serialize_field_to_json(serialized, ['uuid'], 'uuid')
- self._serialize_field_to_json(serialized, field_names, 'fq_name')
- if hasattr(self, 'parent_type'):
- self._serialize_field_to_json(serialized, field_names, 'parent_type')
-
- # serialize property fields
- if hasattr(self, '_id_perms'):
- self._serialize_field_to_json(serialized, field_names, 'id_perms')
- if hasattr(self, '_display_name'):
- self._serialize_field_to_json(serialized, field_names, 'display_name')
-
- # serialize reference fields
- return serialized
- #end serialize_to_json
-
- def get_logical_router_back_refs(self):
- """Return list of all logical-routers using this route-target"""
- return getattr(self, 'logical_router_back_refs', None)
- #end get_logical_router_back_refs
-
- def get_routing_instance_back_refs(self):
- """Return list of all routing-instances using this route-target"""
- return getattr(self, 'routing_instance_back_refs', None)
- #end get_routing_instance_back_refs
-
- def dump(self):
- """Display route-target object in compact form."""
- print '------------ route-target ------------'
- print 'Name = ', self.get_fq_name()
- print 'Uuid = ', self.uuid
- print 'P id_perms = ', self.get_id_perms()
- print 'P display_name = ', self.get_display_name()
- print 'BCK logical_router = ', self.get_logical_router_back_refs()
- print 'BCK routing_instance = ', self.get_routing_instance_back_refs()
- #end dump
-
-#end class RouteTarget
-
-class FloatingIp(object):
- """
- Represents floating-ip configuration representation.
-
- Child of:
- :class:`.FloatingIpPool` object OR
-
- Properties:
- * floating-ip-address (IpAddressType type)
- * floating-ip-is-virtual-ip (xsd:boolean type)
- * floating-ip-fixed-ip-address (IpAddressType type)
- * floating-ip-address-family (IpAddressFamilyType type)
- * id-perms (:class:`.IdPermsType` type)
- * display-name (xsd:string type)
-
- Children:
-
- References to:
- * list of :class:`.Project` objects
- * list of :class:`.VirtualMachineInterface` objects
-
- Referred by:
- * list of :class:`.CustomerAttachment` objects
- """
-
- prop_fields = set([u'floating_ip_address', u'floating_ip_is_virtual_ip', u'floating_ip_fixed_ip_address', u'floating_ip_address_family', u'id_perms', u'display_name'])
- ref_fields = set([u'project_refs', 'virtual_machine_interface_refs'])
- backref_fields = set([u'floating_ip_pool_back_refs', 'customer_attachment_back_refs'])
- children_fields = set([])
-
- def __init__(self, name = None, parent_obj = None, floating_ip_address = None, floating_ip_is_virtual_ip = None, floating_ip_fixed_ip_address = None, floating_ip_address_family = None, id_perms = None, display_name = None, *args, **kwargs):
- # type-independent fields
- self._type = 'floating-ip'
- if not name:
- name = u'default-floating-ip'
- self.name = name
- self._uuid = None
- # Determine parent type and fq_name
- kwargs_parent_type = kwargs.get('parent_type', None)
- kwargs_fq_name = kwargs.get('fq_name', None)
- if parent_obj:
- self.parent_type = parent_obj._type
- # copy parent's fq_name
- self.fq_name = list(parent_obj.fq_name)
- self.fq_name.append(name)
- elif kwargs_parent_type and kwargs_fq_name:
- self.parent_type = kwargs_parent_type
- self.fq_name = kwargs_fq_name
- else: # No parent obj specified
- self.parent_type = 'floating-ip-pool'
- self.fq_name = [u'default-domain', u'default-project', u'default-virtual-network', u'default-floating-ip-pool']
- self.fq_name.append(name)
-
-
- # property fields
- if floating_ip_address:
- self._floating_ip_address = floating_ip_address
- if floating_ip_is_virtual_ip:
- self._floating_ip_is_virtual_ip = floating_ip_is_virtual_ip
- if floating_ip_fixed_ip_address:
- self._floating_ip_fixed_ip_address = floating_ip_fixed_ip_address
- if floating_ip_address_family:
- self._floating_ip_address_family = floating_ip_address_family
- if id_perms:
- self._id_perms = id_perms
- if display_name:
- self._display_name = display_name
- #end __init__
-
- def get_type(self):
- """Return object type (floating-ip)."""
- return self._type
- #end get_type
-
- def get_fq_name(self):
- """Return FQN of floating-ip in list form."""
- return self.fq_name
- #end get_fq_name
-
- def get_fq_name_str(self):
- """Return FQN of floating-ip as colon delimited string."""
- return ':'.join(self.fq_name)
- #end get_fq_name_str
-
- @property
- def parent_name(self):
- return self.fq_name[:-1][-1]
- #end parent_name
-
- def get_parent_fq_name(self):
- """Return FQN of floating-ip's parent in list form."""
- if not hasattr(self, 'parent_type'):
- # child of config-root
- return None
-
- return self.fq_name[:-1]
- #end get_parent_fq_name
-
- def get_parent_fq_name_str(self):
- """Return FQN of floating-ip's parent as colon delimted string."""
- if not hasattr(self, 'parent_type'):
- # child of config-root
- return None
-
- return ':'.join(self.fq_name[:-1])
- #end get_parent_fq_name_str
-
- @property
- def uuid(self):
- return getattr(self, '_uuid', None)
- #end uuid
-
- @uuid.setter
- def uuid(self, uuid_val):
- self._uuid = uuid_val
- #end uuid
-
- def set_uuid(self, uuid_val):
- self.uuid = uuid_val
- #end set_uuid
-
- def get_uuid(self):
- return self.uuid
- #end get_uuid
-
- @property
- def floating_ip_address(self):
- """Get floating-ip-address for floating-ip.
-
- :returns: IpAddressType object
-
- """
- return getattr(self, '_floating_ip_address', None)
- #end floating_ip_address
-
- @floating_ip_address.setter
- def floating_ip_address(self, floating_ip_address):
- """Set floating-ip-address for floating-ip.
-
- :param floating_ip_address: IpAddressType object
-
- """
- self._floating_ip_address = floating_ip_address
- #end floating_ip_address
-
- def set_floating_ip_address(self, value):
- self.floating_ip_address = value
- #end set_floating_ip_address
-
- def get_floating_ip_address(self):
- return self.floating_ip_address
- #end get_floating_ip_address
-
- @property
- def floating_ip_is_virtual_ip(self):
- """Get floating-ip-is-virtual-ip for floating-ip.
-
- :returns: xsd:boolean object
-
- """
- return getattr(self, '_floating_ip_is_virtual_ip', None)
- #end floating_ip_is_virtual_ip
-
- @floating_ip_is_virtual_ip.setter
- def floating_ip_is_virtual_ip(self, floating_ip_is_virtual_ip):
- """Set floating-ip-is-virtual-ip for floating-ip.
-
- :param floating_ip_is_virtual_ip: xsd:boolean object
-
- """
- self._floating_ip_is_virtual_ip = floating_ip_is_virtual_ip
- #end floating_ip_is_virtual_ip
-
- def set_floating_ip_is_virtual_ip(self, value):
- self.floating_ip_is_virtual_ip = value
- #end set_floating_ip_is_virtual_ip
-
- def get_floating_ip_is_virtual_ip(self):
- return self.floating_ip_is_virtual_ip
- #end get_floating_ip_is_virtual_ip
-
- @property
- def floating_ip_fixed_ip_address(self):
- """Get floating-ip-fixed-ip-address for floating-ip.
-
- :returns: IpAddressType object
-
- """
- return getattr(self, '_floating_ip_fixed_ip_address', None)
- #end floating_ip_fixed_ip_address
-
- @floating_ip_fixed_ip_address.setter
- def floating_ip_fixed_ip_address(self, floating_ip_fixed_ip_address):
- """Set floating-ip-fixed-ip-address for floating-ip.
-
- :param floating_ip_fixed_ip_address: IpAddressType object
-
- """
- self._floating_ip_fixed_ip_address = floating_ip_fixed_ip_address
- #end floating_ip_fixed_ip_address
-
- def set_floating_ip_fixed_ip_address(self, value):
- self.floating_ip_fixed_ip_address = value
- #end set_floating_ip_fixed_ip_address
-
- def get_floating_ip_fixed_ip_address(self):
- return self.floating_ip_fixed_ip_address
- #end get_floating_ip_fixed_ip_address
-
- @property
- def floating_ip_address_family(self):
- """Get floating-ip-address-family for floating-ip.
-
- :returns: IpAddressFamilyType object
-
- """
- return getattr(self, '_floating_ip_address_family', None)
- #end floating_ip_address_family
-
- @floating_ip_address_family.setter
- def floating_ip_address_family(self, floating_ip_address_family):
- """Set floating-ip-address-family for floating-ip.
-
- :param floating_ip_address_family: IpAddressFamilyType object
-
- """
- self._floating_ip_address_family = floating_ip_address_family
- #end floating_ip_address_family
-
- def set_floating_ip_address_family(self, value):
- self.floating_ip_address_family = value
- #end set_floating_ip_address_family
-
- def get_floating_ip_address_family(self):
- return self.floating_ip_address_family
- #end get_floating_ip_address_family
-
- @property
- def id_perms(self):
- """Get id-perms for floating-ip.
-
- :returns: IdPermsType object
-
- """
- return getattr(self, '_id_perms', None)
- #end id_perms
-
- @id_perms.setter
- def id_perms(self, id_perms):
- """Set id-perms for floating-ip.
-
- :param id_perms: IdPermsType object
-
- """
- self._id_perms = id_perms
- #end id_perms
-
- def set_id_perms(self, value):
- self.id_perms = value
- #end set_id_perms
-
- def get_id_perms(self):
- return self.id_perms
- #end get_id_perms
-
- @property
- def display_name(self):
- """Get display-name for floating-ip.
-
- :returns: xsd:string object
-
- """
- return getattr(self, '_display_name', None)
- #end display_name
-
- @display_name.setter
- def display_name(self, display_name):
- """Set display-name for floating-ip.
-
- :param display_name: xsd:string object
-
- """
- self._display_name = display_name
- #end display_name
-
- def set_display_name(self, value):
- self.display_name = value
- #end set_display_name
-
- def get_display_name(self):
- return self.display_name
- #end get_display_name
-
- def _serialize_field_to_json(self, serialized, fields_to_serialize, field_name):
- if fields_to_serialize is None: # all fields are serialized
- serialized[field_name] = getattr(self, field_name)
- elif field_name in fields_to_serialize:
- serialized[field_name] = getattr(self, field_name)
- #end _serialize_field_to_json
-
- def serialize_to_json(self, field_names = None):
- serialized = {}
-
- # serialize common fields
- self._serialize_field_to_json(serialized, ['uuid'], 'uuid')
- self._serialize_field_to_json(serialized, field_names, 'fq_name')
- if hasattr(self, 'parent_type'):
- self._serialize_field_to_json(serialized, field_names, 'parent_type')
-
- # serialize property fields
- if hasattr(self, '_floating_ip_address'):
- self._serialize_field_to_json(serialized, field_names, 'floating_ip_address')
- if hasattr(self, '_floating_ip_is_virtual_ip'):
- self._serialize_field_to_json(serialized, field_names, 'floating_ip_is_virtual_ip')
- if hasattr(self, '_floating_ip_fixed_ip_address'):
- self._serialize_field_to_json(serialized, field_names, 'floating_ip_fixed_ip_address')
- if hasattr(self, '_floating_ip_address_family'):
- self._serialize_field_to_json(serialized, field_names, 'floating_ip_address_family')
- if hasattr(self, '_id_perms'):
- self._serialize_field_to_json(serialized, field_names, 'id_perms')
- if hasattr(self, '_display_name'):
- self._serialize_field_to_json(serialized, field_names, 'display_name')
-
- # serialize reference fields
- if hasattr(self, 'project_refs'):
- self._serialize_field_to_json(serialized, field_names, 'project_refs')
- if hasattr(self, 'virtual_machine_interface_refs'):
- self._serialize_field_to_json(serialized, field_names, 'virtual_machine_interface_refs')
- return serialized
- #end serialize_to_json
-
- def set_project(self, ref_obj):
- """Set project for floating-ip.
-
- :param ref_obj: Project object
-
- """
- self.project_refs = [{'to':ref_obj.get_fq_name()}]
- if ref_obj.uuid:
- self.project_refs[0]['uuid'] = ref_obj.uuid
-
- #end set_project
-
- def add_project(self, ref_obj):
- """Add project to floating-ip.
-
- :param ref_obj: Project object
-
- """
- refs = getattr(self, 'project_refs', [])
- if not refs:
- self.project_refs = []
-
- # if ref already exists, update any attr with it
- for ref in refs:
- if ref['to'] == ref_obj.get_fq_name():
- ref = {'to':ref_obj.get_fq_name()}
- if ref_obj.uuid:
- ref['uuid'] = ref_obj.uuid
- return
-
- # ref didn't exist before
- ref_info = {'to':ref_obj.get_fq_name()}
- if ref_obj.uuid:
- ref_info['uuid'] = ref_obj.uuid
-
- self.project_refs.append(ref_info)
- #end add_project
-
- def del_project(self, ref_obj):
- refs = self.get_project_refs()
- if not refs:
- return
-
- for ref in refs:
- if ref['to'] == ref_obj.get_fq_name():
- self.project_refs.remove(ref)
- return
- #end del_project
-
- def set_project_list(self, ref_obj_list):
- """Set project list for floating-ip.
-
- :param ref_obj_list: list of Project object
-
- """
- self.project_refs = ref_obj_list
- #end set_project_list
-
- def get_project_refs(self):
- """Return project list for floating-ip.
-
- :returns: list of <Project>
-
- """
- return getattr(self, 'project_refs', None)
- #end get_project_refs
-
- def set_virtual_machine_interface(self, ref_obj):
- """Set virtual-machine-interface for floating-ip.
-
- :param ref_obj: VirtualMachineInterface object
-
- """
- self.virtual_machine_interface_refs = [{'to':ref_obj.get_fq_name()}]
- if ref_obj.uuid:
- self.virtual_machine_interface_refs[0]['uuid'] = ref_obj.uuid
-
- #end set_virtual_machine_interface
-
- def add_virtual_machine_interface(self, ref_obj):
- """Add virtual-machine-interface to floating-ip.
-
- :param ref_obj: VirtualMachineInterface object
-
- """
- refs = getattr(self, 'virtual_machine_interface_refs', [])
- if not refs:
- self.virtual_machine_interface_refs = []
-
- # if ref already exists, update any attr with it
- for ref in refs:
- if ref['to'] == ref_obj.get_fq_name():
- ref = {'to':ref_obj.get_fq_name()}
- if ref_obj.uuid:
- ref['uuid'] = ref_obj.uuid
- return
-
- # ref didn't exist before
- ref_info = {'to':ref_obj.get_fq_name()}
- if ref_obj.uuid:
- ref_info['uuid'] = ref_obj.uuid
-
- self.virtual_machine_interface_refs.append(ref_info)
- #end add_virtual_machine_interface
-
- def del_virtual_machine_interface(self, ref_obj):
- refs = self.get_virtual_machine_interface_refs()
- if not refs:
- return
-
- for ref in refs:
- if ref['to'] == ref_obj.get_fq_name():
- self.virtual_machine_interface_refs.remove(ref)
- return
- #end del_virtual_machine_interface
-
- def set_virtual_machine_interface_list(self, ref_obj_list):
- """Set virtual-machine-interface list for floating-ip.
-
- :param ref_obj_list: list of VirtualMachineInterface object
-
- """
- self.virtual_machine_interface_refs = ref_obj_list
- #end set_virtual_machine_interface_list
-
- def get_virtual_machine_interface_refs(self):
- """Return virtual-machine-interface list for floating-ip.
-
- :returns: list of <VirtualMachineInterface>
-
- """
- return getattr(self, 'virtual_machine_interface_refs', None)
- #end get_virtual_machine_interface_refs
-
- def get_floating_ip_pool_back_refs(self):
- """Return list of all floating-ip-pools using this floating-ip"""
- return getattr(self, 'floating_ip_pool_back_refs', None)
- #end get_floating_ip_pool_back_refs
-
- def get_customer_attachment_back_refs(self):
- """Return list of all customer-attachments using this floating-ip"""
- return getattr(self, 'customer_attachment_back_refs', None)
- #end get_customer_attachment_back_refs
-
- def dump(self):
- """Display floating-ip object in compact form."""
- print '------------ floating-ip ------------'
- print 'Name = ', self.get_fq_name()
- print 'Uuid = ', self.uuid
- if hasattr(self, 'parent_type'): # non config-root children
- print 'Parent Type = ', self.parent_type
- print 'P floating_ip_address = ', self.get_floating_ip_address()
- print 'P floating_ip_is_virtual_ip = ', self.get_floating_ip_is_virtual_ip()
- print 'P floating_ip_fixed_ip_address = ', self.get_floating_ip_fixed_ip_address()
- print 'P floating_ip_address_family = ', self.get_floating_ip_address_family()
- print 'P id_perms = ', self.get_id_perms()
- print 'P display_name = ', self.get_display_name()
- print 'REF project = ', self.get_project_refs()
- print 'REF virtual_machine_interface = ', self.get_virtual_machine_interface_refs()
- print 'BCK customer_attachment = ', self.get_customer_attachment_back_refs()
- #end dump
-
-#end class FloatingIp
-
-class FloatingIpPool(object):
- """
- Represents floating-ip-pool configuration representation.
-
- Child of:
- :class:`.VirtualNetwork` object OR
-
- Properties:
- * floating-ip-pool-prefixes (:class:`.FloatingIpPoolType` type)
- * id-perms (:class:`.IdPermsType` type)
- * display-name (xsd:string type)
-
- Children:
- * list of :class:`.FloatingIp` objects
-
- References to:
-
- Referred by:
- * list of :class:`.Project` objects
- """
-
- prop_fields = set([u'floating_ip_pool_prefixes', u'id_perms', u'display_name'])
- ref_fields = set([])
- backref_fields = set([u'virtual_network_back_refs', u'project_back_refs'])
- children_fields = set([u'floating_ips'])
-
- def __init__(self, name = None, parent_obj = None, floating_ip_pool_prefixes = None, id_perms = None, display_name = None, *args, **kwargs):
- # type-independent fields
- self._type = 'floating-ip-pool'
- if not name:
- name = u'default-floating-ip-pool'
- self.name = name
- self._uuid = None
- # Determine parent type and fq_name
- kwargs_parent_type = kwargs.get('parent_type', None)
- kwargs_fq_name = kwargs.get('fq_name', None)
- if parent_obj:
- self.parent_type = parent_obj._type
- # copy parent's fq_name
- self.fq_name = list(parent_obj.fq_name)
- self.fq_name.append(name)
- elif kwargs_parent_type and kwargs_fq_name:
- self.parent_type = kwargs_parent_type
- self.fq_name = kwargs_fq_name
- else: # No parent obj specified
- self.parent_type = 'virtual-network'
- self.fq_name = [u'default-domain', u'default-project', u'default-virtual-network']
- self.fq_name.append(name)
-
-
- # property fields
- if floating_ip_pool_prefixes:
- self._floating_ip_pool_prefixes = floating_ip_pool_prefixes
- if id_perms:
- self._id_perms = id_perms
- if display_name:
- self._display_name = display_name
- #end __init__
-
- def get_type(self):
- """Return object type (floating-ip-pool)."""
- return self._type
- #end get_type
-
- def get_fq_name(self):
- """Return FQN of floating-ip-pool in list form."""
- return self.fq_name
- #end get_fq_name
-
- def get_fq_name_str(self):
- """Return FQN of floating-ip-pool as colon delimited string."""
- return ':'.join(self.fq_name)
- #end get_fq_name_str
-
- @property
- def parent_name(self):
- return self.fq_name[:-1][-1]
- #end parent_name
-
- def get_parent_fq_name(self):
- """Return FQN of floating-ip-pool's parent in list form."""
- if not hasattr(self, 'parent_type'):
- # child of config-root
- return None
-
- return self.fq_name[:-1]
- #end get_parent_fq_name
-
- def get_parent_fq_name_str(self):
- """Return FQN of floating-ip-pool's parent as colon delimted string."""
- if not hasattr(self, 'parent_type'):
- # child of config-root
- return None
-
- return ':'.join(self.fq_name[:-1])
- #end get_parent_fq_name_str
-
- @property
- def uuid(self):
- return getattr(self, '_uuid', None)
- #end uuid
-
- @uuid.setter
- def uuid(self, uuid_val):
- self._uuid = uuid_val
- #end uuid
-
- def set_uuid(self, uuid_val):
- self.uuid = uuid_val
- #end set_uuid
-
- def get_uuid(self):
- return self.uuid
- #end get_uuid
-
- @property
- def floating_ip_pool_prefixes(self):
- """Get floating-ip-pool-prefixes for floating-ip-pool.
-
- :returns: FloatingIpPoolType object
-
- """
- return getattr(self, '_floating_ip_pool_prefixes', None)
- #end floating_ip_pool_prefixes
-
- @floating_ip_pool_prefixes.setter
- def floating_ip_pool_prefixes(self, floating_ip_pool_prefixes):
- """Set floating-ip-pool-prefixes for floating-ip-pool.
-
- :param floating_ip_pool_prefixes: FloatingIpPoolType object
-
- """
- self._floating_ip_pool_prefixes = floating_ip_pool_prefixes
- #end floating_ip_pool_prefixes
-
- def set_floating_ip_pool_prefixes(self, value):
- self.floating_ip_pool_prefixes = value
- #end set_floating_ip_pool_prefixes
-
- def get_floating_ip_pool_prefixes(self):
- return self.floating_ip_pool_prefixes
- #end get_floating_ip_pool_prefixes
-
- @property
- def id_perms(self):
- """Get id-perms for floating-ip-pool.
-
- :returns: IdPermsType object
-
- """
- return getattr(self, '_id_perms', None)
- #end id_perms
-
- @id_perms.setter
- def id_perms(self, id_perms):
- """Set id-perms for floating-ip-pool.
-
- :param id_perms: IdPermsType object
-
- """
- self._id_perms = id_perms
- #end id_perms
-
- def set_id_perms(self, value):
- self.id_perms = value
- #end set_id_perms
-
- def get_id_perms(self):
- return self.id_perms
- #end get_id_perms
-
- @property
- def display_name(self):
- """Get display-name for floating-ip-pool.
-
- :returns: xsd:string object
-
- """
- return getattr(self, '_display_name', None)
- #end display_name
-
- @display_name.setter
- def display_name(self, display_name):
- """Set display-name for floating-ip-pool.
-
- :param display_name: xsd:string object
-
- """
- self._display_name = display_name
- #end display_name
-
- def set_display_name(self, value):
- self.display_name = value
- #end set_display_name
-
- def get_display_name(self):
- return self.display_name
- #end get_display_name
-
- def _serialize_field_to_json(self, serialized, fields_to_serialize, field_name):
- if fields_to_serialize is None: # all fields are serialized
- serialized[field_name] = getattr(self, field_name)
- elif field_name in fields_to_serialize:
- serialized[field_name] = getattr(self, field_name)
- #end _serialize_field_to_json
-
- def serialize_to_json(self, field_names = None):
- serialized = {}
-
- # serialize common fields
- self._serialize_field_to_json(serialized, ['uuid'], 'uuid')
- self._serialize_field_to_json(serialized, field_names, 'fq_name')
- if hasattr(self, 'parent_type'):
- self._serialize_field_to_json(serialized, field_names, 'parent_type')
-
- # serialize property fields
- if hasattr(self, '_floating_ip_pool_prefixes'):
- self._serialize_field_to_json(serialized, field_names, 'floating_ip_pool_prefixes')
- if hasattr(self, '_id_perms'):
- self._serialize_field_to_json(serialized, field_names, 'id_perms')
- if hasattr(self, '_display_name'):
- self._serialize_field_to_json(serialized, field_names, 'display_name')
-
- # serialize reference fields
- return serialized
- #end serialize_to_json
-
- def get_floating_ips(self):
- return getattr(self, 'floating_ips', None)
- #end get_floating_ips
-
- def get_virtual_network_back_refs(self):
- """Return list of all virtual-networks using this floating-ip-pool"""
- return getattr(self, 'virtual_network_back_refs', None)
- #end get_virtual_network_back_refs
-
- def get_project_back_refs(self):
- """Return list of all projects using this floating-ip-pool"""
- return getattr(self, 'project_back_refs', None)
- #end get_project_back_refs
-
- def dump(self):
- """Display floating-ip-pool object in compact form."""
- print '------------ floating-ip-pool ------------'
- print 'Name = ', self.get_fq_name()
- print 'Uuid = ', self.uuid
- if hasattr(self, 'parent_type'): # non config-root children
- print 'Parent Type = ', self.parent_type
- print 'P floating_ip_pool_prefixes = ', self.get_floating_ip_pool_prefixes()
- print 'P id_perms = ', self.get_id_perms()
- print 'P display_name = ', self.get_display_name()
- print 'HAS floating_ip = ', self.get_floating_ips()
- print 'BCK project = ', self.get_project_back_refs()
- #end dump
-
-#end class FloatingIpPool
-
-class PhysicalRouter(object):
- """
- Represents physical-router configuration representation.
-
- Child of:
- :class:`.GlobalSystemConfig` object OR
-
- Properties:
- * physical-router-management-ip (IpAddress type)
- * physical-router-dataplane-ip (IpAddress type)
- * physical-router-vendor-name (xsd:string type)
- * physical-router-product-name (xsd:string type)
- * physical-router-vnc-managed (xsd:boolean type)
- * physical-router-user-credentials (:class:`.UserCredentials` type)
- * physical-router-snmp-credentials (:class:`.SNMPCredentials` type)
- * physical-router-junos-service-ports (:class:`.JunosServicePorts` type)
- * id-perms (:class:`.IdPermsType` type)
- * display-name (xsd:string type)
-
- Children:
- * list of :class:`.PhysicalInterface` objects
- * list of :class:`.LogicalInterface` objects
-
- References to:
- * list of :class:`.VirtualRouter` objects
- * list of :class:`.BgpRouter` objects
- * list of :class:`.VirtualNetwork` objects
-
- Referred by:
- """
-
- prop_fields = set([u'physical_router_management_ip', u'physical_router_dataplane_ip', u'physical_router_vendor_name', u'physical_router_product_name', u'physical_router_vnc_managed', u'physical_router_user_credentials', u'physical_router_snmp_credentials', u'physical_router_junos_service_ports', u'id_perms', u'display_name'])
- ref_fields = set(['virtual_router_refs', 'bgp_router_refs', u'virtual_network_refs'])
- backref_fields = set([u'global_system_config_back_refs'])
- children_fields = set([u'physical_interfaces', u'logical_interfaces'])
-
- def __init__(self, name = None, parent_obj = None, physical_router_management_ip = None, physical_router_dataplane_ip = None, physical_router_vendor_name = None, physical_router_product_name = None, physical_router_vnc_managed = None, physical_router_user_credentials = None, physical_router_snmp_credentials = None, physical_router_junos_service_ports = None, id_perms = None, display_name = None, *args, **kwargs):
- # type-independent fields
- self._type = 'physical-router'
- if not name:
- name = u'default-physical-router'
- self.name = name
- self._uuid = None
- # Determine parent type and fq_name
- kwargs_parent_type = kwargs.get('parent_type', None)
- kwargs_fq_name = kwargs.get('fq_name', None)
- if parent_obj:
- self.parent_type = parent_obj._type
- # copy parent's fq_name
- self.fq_name = list(parent_obj.fq_name)
- self.fq_name.append(name)
- elif kwargs_parent_type and kwargs_fq_name:
- self.parent_type = kwargs_parent_type
- self.fq_name = kwargs_fq_name
- else: # No parent obj specified
- self.parent_type = 'global-system-config'
- self.fq_name = [u'default-global-system-config']
- self.fq_name.append(name)
-
-
- # property fields
- if physical_router_management_ip:
- self._physical_router_management_ip = physical_router_management_ip
- if physical_router_dataplane_ip:
- self._physical_router_dataplane_ip = physical_router_dataplane_ip
- if physical_router_vendor_name:
- self._physical_router_vendor_name = physical_router_vendor_name
- if physical_router_product_name:
- self._physical_router_product_name = physical_router_product_name
- if physical_router_vnc_managed:
- self._physical_router_vnc_managed = physical_router_vnc_managed
- if physical_router_user_credentials:
- self._physical_router_user_credentials = physical_router_user_credentials
- if physical_router_snmp_credentials:
- self._physical_router_snmp_credentials = physical_router_snmp_credentials
- if physical_router_junos_service_ports:
- self._physical_router_junos_service_ports = physical_router_junos_service_ports
- if id_perms:
- self._id_perms = id_perms
- if display_name:
- self._display_name = display_name
- #end __init__
-
- def get_type(self):
- """Return object type (physical-router)."""
- return self._type
- #end get_type
-
- def get_fq_name(self):
- """Return FQN of physical-router in list form."""
- return self.fq_name
- #end get_fq_name
-
- def get_fq_name_str(self):
- """Return FQN of physical-router as colon delimited string."""
- return ':'.join(self.fq_name)
- #end get_fq_name_str
-
- @property
- def parent_name(self):
- return self.fq_name[:-1][-1]
- #end parent_name
-
- def get_parent_fq_name(self):
- """Return FQN of physical-router's parent in list form."""
- if not hasattr(self, 'parent_type'):
- # child of config-root
- return None
-
- return self.fq_name[:-1]
- #end get_parent_fq_name
-
- def get_parent_fq_name_str(self):
- """Return FQN of physical-router's parent as colon delimted string."""
- if not hasattr(self, 'parent_type'):
- # child of config-root
- return None
-
- return ':'.join(self.fq_name[:-1])
- #end get_parent_fq_name_str
-
- @property
- def uuid(self):
- return getattr(self, '_uuid', None)
- #end uuid
-
- @uuid.setter
- def uuid(self, uuid_val):
- self._uuid = uuid_val
- #end uuid
-
- def set_uuid(self, uuid_val):
- self.uuid = uuid_val
- #end set_uuid
-
- def get_uuid(self):
- return self.uuid
- #end get_uuid
-
- @property
- def physical_router_management_ip(self):
- """Get physical-router-management-ip for physical-router.
-
- :returns: IpAddress object
-
- """
- return getattr(self, '_physical_router_management_ip', None)
- #end physical_router_management_ip
-
- @physical_router_management_ip.setter
- def physical_router_management_ip(self, physical_router_management_ip):
- """Set physical-router-management-ip for physical-router.
-
- :param physical_router_management_ip: IpAddress object
-
- """
- self._physical_router_management_ip = physical_router_management_ip
- #end physical_router_management_ip
-
- def set_physical_router_management_ip(self, value):
- self.physical_router_management_ip = value
- #end set_physical_router_management_ip
-
- def get_physical_router_management_ip(self):
- return self.physical_router_management_ip
- #end get_physical_router_management_ip
-
- @property
- def physical_router_dataplane_ip(self):
- """Get physical-router-dataplane-ip for physical-router.
-
- :returns: IpAddress object
-
- """
- return getattr(self, '_physical_router_dataplane_ip', None)
- #end physical_router_dataplane_ip
-
- @physical_router_dataplane_ip.setter
- def physical_router_dataplane_ip(self, physical_router_dataplane_ip):
- """Set physical-router-dataplane-ip for physical-router.
-
- :param physical_router_dataplane_ip: IpAddress object
-
- """
- self._physical_router_dataplane_ip = physical_router_dataplane_ip
- #end physical_router_dataplane_ip
-
- def set_physical_router_dataplane_ip(self, value):
- self.physical_router_dataplane_ip = value
- #end set_physical_router_dataplane_ip
-
- def get_physical_router_dataplane_ip(self):
- return self.physical_router_dataplane_ip
- #end get_physical_router_dataplane_ip
-
- @property
- def physical_router_vendor_name(self):
- """Get physical-router-vendor-name for physical-router.
-
- :returns: xsd:string object
-
- """
- return getattr(self, '_physical_router_vendor_name', None)
- #end physical_router_vendor_name
-
- @physical_router_vendor_name.setter
- def physical_router_vendor_name(self, physical_router_vendor_name):
- """Set physical-router-vendor-name for physical-router.
-
- :param physical_router_vendor_name: xsd:string object
-
- """
- self._physical_router_vendor_name = physical_router_vendor_name
- #end physical_router_vendor_name
-
- def set_physical_router_vendor_name(self, value):
- self.physical_router_vendor_name = value
- #end set_physical_router_vendor_name
-
- def get_physical_router_vendor_name(self):
- return self.physical_router_vendor_name
- #end get_physical_router_vendor_name
-
- @property
- def physical_router_product_name(self):
- """Get physical-router-product-name for physical-router.
-
- :returns: xsd:string object
-
- """
- return getattr(self, '_physical_router_product_name', None)
- #end physical_router_product_name
-
- @physical_router_product_name.setter
- def physical_router_product_name(self, physical_router_product_name):
- """Set physical-router-product-name for physical-router.
-
- :param physical_router_product_name: xsd:string object
-
- """
- self._physical_router_product_name = physical_router_product_name
- #end physical_router_product_name
-
- def set_physical_router_product_name(self, value):
- self.physical_router_product_name = value
- #end set_physical_router_product_name
-
- def get_physical_router_product_name(self):
- return self.physical_router_product_name
- #end get_physical_router_product_name
-
- @property
- def physical_router_vnc_managed(self):
- """Get physical-router-vnc-managed for physical-router.
-
- :returns: xsd:boolean object
-
- """
- return getattr(self, '_physical_router_vnc_managed', None)
- #end physical_router_vnc_managed
-
- @physical_router_vnc_managed.setter
- def physical_router_vnc_managed(self, physical_router_vnc_managed):
- """Set physical-router-vnc-managed for physical-router.
-
- :param physical_router_vnc_managed: xsd:boolean object
-
- """
- self._physical_router_vnc_managed = physical_router_vnc_managed
- #end physical_router_vnc_managed
-
- def set_physical_router_vnc_managed(self, value):
- self.physical_router_vnc_managed = value
- #end set_physical_router_vnc_managed
-
- def get_physical_router_vnc_managed(self):
- return self.physical_router_vnc_managed
- #end get_physical_router_vnc_managed
-
- @property
- def physical_router_user_credentials(self):
- """Get physical-router-user-credentials for physical-router.
-
- :returns: UserCredentials object
-
- """
- return getattr(self, '_physical_router_user_credentials', None)
- #end physical_router_user_credentials
-
- @physical_router_user_credentials.setter
- def physical_router_user_credentials(self, physical_router_user_credentials):
- """Set physical-router-user-credentials for physical-router.
-
- :param physical_router_user_credentials: UserCredentials object
-
- """
- self._physical_router_user_credentials = physical_router_user_credentials
- #end physical_router_user_credentials
-
- def set_physical_router_user_credentials(self, value):
- self.physical_router_user_credentials = value
- #end set_physical_router_user_credentials
-
- def get_physical_router_user_credentials(self):
- return self.physical_router_user_credentials
- #end get_physical_router_user_credentials
-
- @property
- def physical_router_snmp_credentials(self):
- """Get physical-router-snmp-credentials for physical-router.
-
- :returns: SNMPCredentials object
-
- """
- return getattr(self, '_physical_router_snmp_credentials', None)
- #end physical_router_snmp_credentials
-
- @physical_router_snmp_credentials.setter
- def physical_router_snmp_credentials(self, physical_router_snmp_credentials):
- """Set physical-router-snmp-credentials for physical-router.
-
- :param physical_router_snmp_credentials: SNMPCredentials object
-
- """
- self._physical_router_snmp_credentials = physical_router_snmp_credentials
- #end physical_router_snmp_credentials
-
- def set_physical_router_snmp_credentials(self, value):
- self.physical_router_snmp_credentials = value
- #end set_physical_router_snmp_credentials
-
- def get_physical_router_snmp_credentials(self):
- return self.physical_router_snmp_credentials
- #end get_physical_router_snmp_credentials
-
- @property
- def physical_router_junos_service_ports(self):
- """Get physical-router-junos-service-ports for physical-router.
-
- :returns: JunosServicePorts object
-
- """
- return getattr(self, '_physical_router_junos_service_ports', None)
- #end physical_router_junos_service_ports
-
- @physical_router_junos_service_ports.setter
- def physical_router_junos_service_ports(self, physical_router_junos_service_ports):
- """Set physical-router-junos-service-ports for physical-router.
-
- :param physical_router_junos_service_ports: JunosServicePorts object
-
- """
- self._physical_router_junos_service_ports = physical_router_junos_service_ports
- #end physical_router_junos_service_ports
-
- def set_physical_router_junos_service_ports(self, value):
- self.physical_router_junos_service_ports = value
- #end set_physical_router_junos_service_ports
-
- def get_physical_router_junos_service_ports(self):
- return self.physical_router_junos_service_ports
- #end get_physical_router_junos_service_ports
-
- @property
- def id_perms(self):
- """Get id-perms for physical-router.
-
- :returns: IdPermsType object
-
- """
- return getattr(self, '_id_perms', None)
- #end id_perms
-
- @id_perms.setter
- def id_perms(self, id_perms):
- """Set id-perms for physical-router.
-
- :param id_perms: IdPermsType object
-
- """
- self._id_perms = id_perms
- #end id_perms
-
- def set_id_perms(self, value):
- self.id_perms = value
- #end set_id_perms
-
- def get_id_perms(self):
- return self.id_perms
- #end get_id_perms
-
- @property
- def display_name(self):
- """Get display-name for physical-router.
-
- :returns: xsd:string object
-
- """
- return getattr(self, '_display_name', None)
- #end display_name
-
- @display_name.setter
- def display_name(self, display_name):
- """Set display-name for physical-router.
-
- :param display_name: xsd:string object
-
- """
- self._display_name = display_name
- #end display_name
-
- def set_display_name(self, value):
- self.display_name = value
- #end set_display_name
-
- def get_display_name(self):
- return self.display_name
- #end get_display_name
-
- def _serialize_field_to_json(self, serialized, fields_to_serialize, field_name):
- if fields_to_serialize is None: # all fields are serialized
- serialized[field_name] = getattr(self, field_name)
- elif field_name in fields_to_serialize:
- serialized[field_name] = getattr(self, field_name)
- #end _serialize_field_to_json
-
- def serialize_to_json(self, field_names = None):
- serialized = {}
-
- # serialize common fields
- self._serialize_field_to_json(serialized, ['uuid'], 'uuid')
- self._serialize_field_to_json(serialized, field_names, 'fq_name')
- if hasattr(self, 'parent_type'):
- self._serialize_field_to_json(serialized, field_names, 'parent_type')
-
- # serialize property fields
- if hasattr(self, '_physical_router_management_ip'):
- self._serialize_field_to_json(serialized, field_names, 'physical_router_management_ip')
- if hasattr(self, '_physical_router_dataplane_ip'):
- self._serialize_field_to_json(serialized, field_names, 'physical_router_dataplane_ip')
- if hasattr(self, '_physical_router_vendor_name'):
- self._serialize_field_to_json(serialized, field_names, 'physical_router_vendor_name')
- if hasattr(self, '_physical_router_product_name'):
- self._serialize_field_to_json(serialized, field_names, 'physical_router_product_name')
- if hasattr(self, '_physical_router_vnc_managed'):
- self._serialize_field_to_json(serialized, field_names, 'physical_router_vnc_managed')
- if hasattr(self, '_physical_router_user_credentials'):
- self._serialize_field_to_json(serialized, field_names, 'physical_router_user_credentials')
- if hasattr(self, '_physical_router_snmp_credentials'):
- self._serialize_field_to_json(serialized, field_names, 'physical_router_snmp_credentials')
- if hasattr(self, '_physical_router_junos_service_ports'):
- self._serialize_field_to_json(serialized, field_names, 'physical_router_junos_service_ports')
- if hasattr(self, '_id_perms'):
- self._serialize_field_to_json(serialized, field_names, 'id_perms')
- if hasattr(self, '_display_name'):
- self._serialize_field_to_json(serialized, field_names, 'display_name')
-
- # serialize reference fields
- if hasattr(self, 'virtual_router_refs'):
- self._serialize_field_to_json(serialized, field_names, 'virtual_router_refs')
- if hasattr(self, 'bgp_router_refs'):
- self._serialize_field_to_json(serialized, field_names, 'bgp_router_refs')
- if hasattr(self, 'virtual_network_refs'):
- self._serialize_field_to_json(serialized, field_names, 'virtual_network_refs')
- return serialized
- #end serialize_to_json
-
- def get_physical_interfaces(self):
- return getattr(self, 'physical_interfaces', None)
- #end get_physical_interfaces
-
- def get_logical_interfaces(self):
- return getattr(self, 'logical_interfaces', None)
- #end get_logical_interfaces
-
- def set_virtual_router(self, ref_obj):
- """Set virtual-router for physical-router.
-
- :param ref_obj: VirtualRouter object
-
- """
- self.virtual_router_refs = [{'to':ref_obj.get_fq_name()}]
- if ref_obj.uuid:
- self.virtual_router_refs[0]['uuid'] = ref_obj.uuid
-
- #end set_virtual_router
-
- def add_virtual_router(self, ref_obj):
- """Add virtual-router to physical-router.
-
- :param ref_obj: VirtualRouter object
-
- """
- refs = getattr(self, 'virtual_router_refs', [])
- if not refs:
- self.virtual_router_refs = []
-
- # if ref already exists, update any attr with it
- for ref in refs:
- if ref['to'] == ref_obj.get_fq_name():
- ref = {'to':ref_obj.get_fq_name()}
- if ref_obj.uuid:
- ref['uuid'] = ref_obj.uuid
- return
-
- # ref didn't exist before
- ref_info = {'to':ref_obj.get_fq_name()}
- if ref_obj.uuid:
- ref_info['uuid'] = ref_obj.uuid
-
- self.virtual_router_refs.append(ref_info)
- #end add_virtual_router
-
- def del_virtual_router(self, ref_obj):
- refs = self.get_virtual_router_refs()
- if not refs:
- return
-
- for ref in refs:
- if ref['to'] == ref_obj.get_fq_name():
- self.virtual_router_refs.remove(ref)
- return
- #end del_virtual_router
-
- def set_virtual_router_list(self, ref_obj_list):
- """Set virtual-router list for physical-router.
-
- :param ref_obj_list: list of VirtualRouter object
-
- """
- self.virtual_router_refs = ref_obj_list
- #end set_virtual_router_list
-
- def get_virtual_router_refs(self):
- """Return virtual-router list for physical-router.
-
- :returns: list of <VirtualRouter>
-
- """
- return getattr(self, 'virtual_router_refs', None)
- #end get_virtual_router_refs
-
- def set_bgp_router(self, ref_obj):
- """Set bgp-router for physical-router.
-
- :param ref_obj: BgpRouter object
-
- """
- self.bgp_router_refs = [{'to':ref_obj.get_fq_name()}]
- if ref_obj.uuid:
- self.bgp_router_refs[0]['uuid'] = ref_obj.uuid
-
- #end set_bgp_router
-
- def add_bgp_router(self, ref_obj):
- """Add bgp-router to physical-router.
-
- :param ref_obj: BgpRouter object
-
- """
- refs = getattr(self, 'bgp_router_refs', [])
- if not refs:
- self.bgp_router_refs = []
-
- # if ref already exists, update any attr with it
- for ref in refs:
- if ref['to'] == ref_obj.get_fq_name():
- ref = {'to':ref_obj.get_fq_name()}
- if ref_obj.uuid:
- ref['uuid'] = ref_obj.uuid
- return
-
- # ref didn't exist before
- ref_info = {'to':ref_obj.get_fq_name()}
- if ref_obj.uuid:
- ref_info['uuid'] = ref_obj.uuid
-
- self.bgp_router_refs.append(ref_info)
- #end add_bgp_router
-
- def del_bgp_router(self, ref_obj):
- refs = self.get_bgp_router_refs()
- if not refs:
- return
-
- for ref in refs:
- if ref['to'] == ref_obj.get_fq_name():
- self.bgp_router_refs.remove(ref)
- return
- #end del_bgp_router
-
- def set_bgp_router_list(self, ref_obj_list):
- """Set bgp-router list for physical-router.
-
- :param ref_obj_list: list of BgpRouter object
-
- """
- self.bgp_router_refs = ref_obj_list
- #end set_bgp_router_list
-
- def get_bgp_router_refs(self):
- """Return bgp-router list for physical-router.
-
- :returns: list of <BgpRouter>
-
- """
- return getattr(self, 'bgp_router_refs', None)
- #end get_bgp_router_refs
-
- def set_virtual_network(self, ref_obj):
- """Set virtual-network for physical-router.
-
- :param ref_obj: VirtualNetwork object
-
- """
- self.virtual_network_refs = [{'to':ref_obj.get_fq_name()}]
- if ref_obj.uuid:
- self.virtual_network_refs[0]['uuid'] = ref_obj.uuid
-
- #end set_virtual_network
-
- def add_virtual_network(self, ref_obj):
- """Add virtual-network to physical-router.
-
- :param ref_obj: VirtualNetwork object
-
- """
- refs = getattr(self, 'virtual_network_refs', [])
- if not refs:
- self.virtual_network_refs = []
-
- # if ref already exists, update any attr with it
- for ref in refs:
- if ref['to'] == ref_obj.get_fq_name():
- ref = {'to':ref_obj.get_fq_name()}
- if ref_obj.uuid:
- ref['uuid'] = ref_obj.uuid
- return
-
- # ref didn't exist before
- ref_info = {'to':ref_obj.get_fq_name()}
- if ref_obj.uuid:
- ref_info['uuid'] = ref_obj.uuid
-
- self.virtual_network_refs.append(ref_info)
- #end add_virtual_network
-
- def del_virtual_network(self, ref_obj):
- refs = self.get_virtual_network_refs()
- if not refs:
- return
-
- for ref in refs:
- if ref['to'] == ref_obj.get_fq_name():
- self.virtual_network_refs.remove(ref)
- return
- #end del_virtual_network
-
- def set_virtual_network_list(self, ref_obj_list):
- """Set virtual-network list for physical-router.
-
- :param ref_obj_list: list of VirtualNetwork object
-
- """
- self.virtual_network_refs = ref_obj_list
- #end set_virtual_network_list
-
- def get_virtual_network_refs(self):
- """Return virtual-network list for physical-router.
-
- :returns: list of <VirtualNetwork>
-
- """
- return getattr(self, 'virtual_network_refs', None)
- #end get_virtual_network_refs
-
- def get_global_system_config_back_refs(self):
- """Return list of all global-system-configs using this physical-router"""
- return getattr(self, 'global_system_config_back_refs', None)
- #end get_global_system_config_back_refs
-
- def dump(self):
- """Display physical-router object in compact form."""
- print '------------ physical-router ------------'
- print 'Name = ', self.get_fq_name()
- print 'Uuid = ', self.uuid
- if hasattr(self, 'parent_type'): # non config-root children
- print 'Parent Type = ', self.parent_type
- print 'P physical_router_management_ip = ', self.get_physical_router_management_ip()
- print 'P physical_router_dataplane_ip = ', self.get_physical_router_dataplane_ip()
- print 'P physical_router_vendor_name = ', self.get_physical_router_vendor_name()
- print 'P physical_router_product_name = ', self.get_physical_router_product_name()
- print 'P physical_router_vnc_managed = ', self.get_physical_router_vnc_managed()
- print 'P physical_router_user_credentials = ', self.get_physical_router_user_credentials()
- print 'P physical_router_snmp_credentials = ', self.get_physical_router_snmp_credentials()
- print 'P physical_router_junos_service_ports = ', self.get_physical_router_junos_service_ports()
- print 'P id_perms = ', self.get_id_perms()
- print 'P display_name = ', self.get_display_name()
- print 'REF virtual_router = ', self.get_virtual_router_refs()
- print 'REF bgp_router = ', self.get_bgp_router_refs()
- print 'REF virtual_network = ', self.get_virtual_network_refs()
- print 'HAS physical_interface = ', self.get_physical_interfaces()
- print 'HAS logical_interface = ', self.get_logical_interfaces()
- #end dump
-
-#end class PhysicalRouter
-
-class BgpRouter(object):
- """
- Represents bgp-router configuration representation.
-
- Child of:
- :class:`.RoutingInstance` object OR
-
- Properties:
- * bgp-router-parameters (:class:`.BgpRouterParams` type)
- * id-perms (:class:`.IdPermsType` type)
- * display-name (xsd:string type)
-
- Children:
-
- References to:
- * list of (:class:`.BgpRouter` object, :class:`.BgpPeeringAttributes` attribute)
-
- Referred by:
- * list of :class:`.GlobalSystemConfig` objects
- * list of :class:`.PhysicalRouter` objects
- * list of :class:`.VirtualRouter` objects
- * list of :class:`.BgpRouter` objects
- """
-
- prop_fields = set([u'bgp_router_parameters', u'id_perms', u'display_name'])
- ref_fields = set(['bgp_router_refs'])
- backref_fields = set([u'global_system_config_back_refs', u'physical_router_back_refs', 'virtual_router_back_refs', 'routing_instance_back_refs', 'bgp_router_back_refs'])
- children_fields = set([])
-
- def __init__(self, name = None, parent_obj = None, bgp_router_parameters = None, id_perms = None, display_name = None, *args, **kwargs):
- # type-independent fields
- self._type = 'bgp-router'
- if not name:
- name = u'default-bgp-router'
- self.name = name
- self._uuid = None
- # Determine parent type and fq_name
- kwargs_parent_type = kwargs.get('parent_type', None)
- kwargs_fq_name = kwargs.get('fq_name', None)
- if parent_obj:
- self.parent_type = parent_obj._type
- # copy parent's fq_name
- self.fq_name = list(parent_obj.fq_name)
- self.fq_name.append(name)
- elif kwargs_parent_type and kwargs_fq_name:
- self.parent_type = kwargs_parent_type
- self.fq_name = kwargs_fq_name
- else: # No parent obj specified
- self.parent_type = 'routing-instance'
- self.fq_name = [u'default-domain', u'default-project', u'default-virtual-network', 'default-routing-instance']
- self.fq_name.append(name)
-
-
- # property fields
- if bgp_router_parameters:
- self._bgp_router_parameters = bgp_router_parameters
- if id_perms:
- self._id_perms = id_perms
- if display_name:
- self._display_name = display_name
- #end __init__
-
- def get_type(self):
- """Return object type (bgp-router)."""
- return self._type
- #end get_type
-
- def get_fq_name(self):
- """Return FQN of bgp-router in list form."""
- return self.fq_name
- #end get_fq_name
-
- def get_fq_name_str(self):
- """Return FQN of bgp-router as colon delimited string."""
- return ':'.join(self.fq_name)
- #end get_fq_name_str
-
- @property
- def parent_name(self):
- return self.fq_name[:-1][-1]
- #end parent_name
-
- def get_parent_fq_name(self):
- """Return FQN of bgp-router's parent in list form."""
- if not hasattr(self, 'parent_type'):
- # child of config-root
- return None
-
- return self.fq_name[:-1]
- #end get_parent_fq_name
-
- def get_parent_fq_name_str(self):
- """Return FQN of bgp-router's parent as colon delimted string."""
- if not hasattr(self, 'parent_type'):
- # child of config-root
- return None
-
- return ':'.join(self.fq_name[:-1])
- #end get_parent_fq_name_str
-
- @property
- def uuid(self):
- return getattr(self, '_uuid', None)
- #end uuid
-
- @uuid.setter
- def uuid(self, uuid_val):
- self._uuid = uuid_val
- #end uuid
-
- def set_uuid(self, uuid_val):
- self.uuid = uuid_val
- #end set_uuid
-
- def get_uuid(self):
- return self.uuid
- #end get_uuid
-
- @property
- def bgp_router_parameters(self):
- """Get bgp-router-parameters for bgp-router.
-
- :returns: BgpRouterParams object
-
- """
- return getattr(self, '_bgp_router_parameters', None)
- #end bgp_router_parameters
-
- @bgp_router_parameters.setter
- def bgp_router_parameters(self, bgp_router_parameters):
- """Set bgp-router-parameters for bgp-router.
-
- :param bgp_router_parameters: BgpRouterParams object
-
- """
- self._bgp_router_parameters = bgp_router_parameters
- #end bgp_router_parameters
-
- def set_bgp_router_parameters(self, value):
- self.bgp_router_parameters = value
- #end set_bgp_router_parameters
-
- def get_bgp_router_parameters(self):
- return self.bgp_router_parameters
- #end get_bgp_router_parameters
-
- @property
- def id_perms(self):
- """Get id-perms for bgp-router.
-
- :returns: IdPermsType object
-
- """
- return getattr(self, '_id_perms', None)
- #end id_perms
-
- @id_perms.setter
- def id_perms(self, id_perms):
- """Set id-perms for bgp-router.
-
- :param id_perms: IdPermsType object
-
- """
- self._id_perms = id_perms
- #end id_perms
-
- def set_id_perms(self, value):
- self.id_perms = value
- #end set_id_perms
-
- def get_id_perms(self):
- return self.id_perms
- #end get_id_perms
-
- @property
- def display_name(self):
- """Get display-name for bgp-router.
-
- :returns: xsd:string object
-
- """
- return getattr(self, '_display_name', None)
- #end display_name
-
- @display_name.setter
- def display_name(self, display_name):
- """Set display-name for bgp-router.
-
- :param display_name: xsd:string object
-
- """
- self._display_name = display_name
- #end display_name
-
- def set_display_name(self, value):
- self.display_name = value
- #end set_display_name
-
- def get_display_name(self):
- return self.display_name
- #end get_display_name
-
- def _serialize_field_to_json(self, serialized, fields_to_serialize, field_name):
- if fields_to_serialize is None: # all fields are serialized
- serialized[field_name] = getattr(self, field_name)
- elif field_name in fields_to_serialize:
- serialized[field_name] = getattr(self, field_name)
- #end _serialize_field_to_json
-
- def serialize_to_json(self, field_names = None):
- serialized = {}
-
- # serialize common fields
- self._serialize_field_to_json(serialized, ['uuid'], 'uuid')
- self._serialize_field_to_json(serialized, field_names, 'fq_name')
- if hasattr(self, 'parent_type'):
- self._serialize_field_to_json(serialized, field_names, 'parent_type')
-
- # serialize property fields
- if hasattr(self, '_bgp_router_parameters'):
- self._serialize_field_to_json(serialized, field_names, 'bgp_router_parameters')
- if hasattr(self, '_id_perms'):
- self._serialize_field_to_json(serialized, field_names, 'id_perms')
- if hasattr(self, '_display_name'):
- self._serialize_field_to_json(serialized, field_names, 'display_name')
-
- # serialize reference fields
- if hasattr(self, 'bgp_router_refs'):
- self._serialize_field_to_json(serialized, field_names, 'bgp_router_refs')
- return serialized
- #end serialize_to_json
-
- def set_bgp_router(self, ref_obj, ref_data):
- """Set bgp-router for bgp-router.
-
- :param ref_obj: BgpRouter object
- :param ref_data: BgpPeeringAttributes object
-
- """
- self.bgp_router_refs = [{'to':ref_obj.get_fq_name(), 'attr':ref_data}]
- if ref_obj.uuid:
- self.bgp_router_refs[0]['uuid'] = ref_obj.uuid
-
- #end set_bgp_router
-
- def add_bgp_router(self, ref_obj, ref_data):
- """Add bgp-router to bgp-router.
-
- :param ref_obj: BgpRouter object
- :param ref_data: BgpPeeringAttributes object
-
- """
- refs = getattr(self, 'bgp_router_refs', [])
- if not refs:
- self.bgp_router_refs = []
-
- # if ref already exists, update any attr with it
- for ref in refs:
- if ref['to'] == ref_obj.get_fq_name():
- ref = {'to':ref_obj.get_fq_name(), 'attr':ref_data}
- if ref_obj.uuid:
- ref['uuid'] = ref_obj.uuid
- return
-
- # ref didn't exist before
- ref_info = {'to':ref_obj.get_fq_name(), 'attr':ref_data}
- if ref_obj.uuid:
- ref_info['uuid'] = ref_obj.uuid
-
- self.bgp_router_refs.append(ref_info)
- #end add_bgp_router
-
- def del_bgp_router(self, ref_obj):
- refs = self.get_bgp_router_refs()
- if not refs:
- return
-
- for ref in refs:
- if ref['to'] == ref_obj.get_fq_name():
- self.bgp_router_refs.remove(ref)
- return
- #end del_bgp_router
-
- def set_bgp_router_list(self, ref_obj_list, ref_data_list):
- """Set bgp-router list for bgp-router.
-
- :param ref_obj_list: list of BgpRouter object
- :param ref_data_list: list of BgpPeeringAttributes object
-
- """
- self.bgp_router_refs = [{'to':ref_obj_list[i], 'attr':ref_data_list[i]} for i in range(len(ref_obj_list))]
- #end set_bgp_router_list
-
- def get_bgp_router_refs(self):
- """Return bgp-router list for bgp-router.
-
- :returns: list of tuple <BgpRouter, BgpPeeringAttributes>
-
- """
- return getattr(self, 'bgp_router_refs', None)
- #end get_bgp_router_refs
-
- def get_global_system_config_back_refs(self):
- """Return list of all global-system-configs using this bgp-router"""
- return getattr(self, 'global_system_config_back_refs', None)
- #end get_global_system_config_back_refs
-
- def get_physical_router_back_refs(self):
- """Return list of all physical-routers using this bgp-router"""
- return getattr(self, 'physical_router_back_refs', None)
- #end get_physical_router_back_refs
-
- def get_virtual_router_back_refs(self):
- """Return list of all virtual-routers using this bgp-router"""
- return getattr(self, 'virtual_router_back_refs', None)
- #end get_virtual_router_back_refs
-
- def get_routing_instance_back_refs(self):
- """Return list of all routing-instances using this bgp-router"""
- return getattr(self, 'routing_instance_back_refs', None)
- #end get_routing_instance_back_refs
-
- def get_bgp_router_back_refs(self):
- """Return list of all bgp-routers using this bgp-router"""
- return getattr(self, 'bgp_router_back_refs', None)
- #end get_bgp_router_back_refs
-
- def dump(self):
- """Display bgp-router object in compact form."""
- print '------------ bgp-router ------------'
- print 'Name = ', self.get_fq_name()
- print 'Uuid = ', self.uuid
- if hasattr(self, 'parent_type'): # non config-root children
- print 'Parent Type = ', self.parent_type
- print 'P bgp_router_parameters = ', self.get_bgp_router_parameters()
- print 'P id_perms = ', self.get_id_perms()
- print 'P display_name = ', self.get_display_name()
- print 'REF bgp_router = ', self.get_bgp_router_refs()
- print 'BCK global_system_config = ', self.get_global_system_config_back_refs()
- print 'BCK physical_router = ', self.get_physical_router_back_refs()
- print 'BCK virtual_router = ', self.get_virtual_router_back_refs()
- print 'BCK bgp_router = ', self.get_bgp_router_back_refs()
- #end dump
-
-#end class BgpRouter
-
-class VirtualRouter(object):
- """
- Represents virtual-router configuration representation.
-
- Child of:
- :class:`.GlobalSystemConfig` object OR
-
- Properties:
- * virtual-router-type (VirtualRouterType type)
- * virtual-router-ip-address (IpAddressType type)
- * id-perms (:class:`.IdPermsType` type)
- * display-name (xsd:string type)
-
- Children:
-
- References to:
- * list of :class:`.BgpRouter` objects
- * list of :class:`.VirtualMachine` objects
-
- Referred by:
- * list of :class:`.PhysicalRouter` objects
- * list of :class:`.ProviderAttachment` objects
- """
-
- prop_fields = set([u'virtual_router_type', u'virtual_router_ip_address', u'id_perms', u'display_name'])
- ref_fields = set(['bgp_router_refs', u'virtual_machine_refs'])
- backref_fields = set([u'physical_router_back_refs', u'global_system_config_back_refs', 'provider_attachment_back_refs'])
- children_fields = set([])
-
- def __init__(self, name = None, parent_obj = None, virtual_router_type = None, virtual_router_ip_address = None, id_perms = None, display_name = None, *args, **kwargs):
- # type-independent fields
- self._type = 'virtual-router'
- if not name:
- name = u'default-virtual-router'
- self.name = name
- self._uuid = None
- # Determine parent type and fq_name
- kwargs_parent_type = kwargs.get('parent_type', None)
- kwargs_fq_name = kwargs.get('fq_name', None)
- if parent_obj:
- self.parent_type = parent_obj._type
- # copy parent's fq_name
- self.fq_name = list(parent_obj.fq_name)
- self.fq_name.append(name)
- elif kwargs_parent_type and kwargs_fq_name:
- self.parent_type = kwargs_parent_type
- self.fq_name = kwargs_fq_name
- else: # No parent obj specified
- self.parent_type = 'global-system-config'
- self.fq_name = [u'default-global-system-config']
- self.fq_name.append(name)
-
-
- # property fields
- if virtual_router_type:
- self._virtual_router_type = virtual_router_type
- if virtual_router_ip_address:
- self._virtual_router_ip_address = virtual_router_ip_address
- if id_perms:
- self._id_perms = id_perms
- if display_name:
- self._display_name = display_name
- #end __init__
-
- def get_type(self):
- """Return object type (virtual-router)."""
- return self._type
- #end get_type
-
- def get_fq_name(self):
- """Return FQN of virtual-router in list form."""
- return self.fq_name
- #end get_fq_name
-
- def get_fq_name_str(self):
- """Return FQN of virtual-router as colon delimited string."""
- return ':'.join(self.fq_name)
- #end get_fq_name_str
-
- @property
- def parent_name(self):
- return self.fq_name[:-1][-1]
- #end parent_name
-
- def get_parent_fq_name(self):
- """Return FQN of virtual-router's parent in list form."""
- if not hasattr(self, 'parent_type'):
- # child of config-root
- return None
-
- return self.fq_name[:-1]
- #end get_parent_fq_name
-
- def get_parent_fq_name_str(self):
- """Return FQN of virtual-router's parent as colon delimted string."""
- if not hasattr(self, 'parent_type'):
- # child of config-root
- return None
-
- return ':'.join(self.fq_name[:-1])
- #end get_parent_fq_name_str
-
- @property
- def uuid(self):
- return getattr(self, '_uuid', None)
- #end uuid
-
- @uuid.setter
- def uuid(self, uuid_val):
- self._uuid = uuid_val
- #end uuid
-
- def set_uuid(self, uuid_val):
- self.uuid = uuid_val
- #end set_uuid
-
- def get_uuid(self):
- return self.uuid
- #end get_uuid
-
- @property
- def virtual_router_type(self):
- """Get virtual-router-type for virtual-router.
-
- :returns: VirtualRouterType object
-
- """
- return getattr(self, '_virtual_router_type', None)
- #end virtual_router_type
-
- @virtual_router_type.setter
- def virtual_router_type(self, virtual_router_type):
- """Set virtual-router-type for virtual-router.
-
- :param virtual_router_type: VirtualRouterType object
-
- """
- self._virtual_router_type = virtual_router_type
- #end virtual_router_type
-
- def set_virtual_router_type(self, value):
- self.virtual_router_type = value
- #end set_virtual_router_type
-
- def get_virtual_router_type(self):
- return self.virtual_router_type
- #end get_virtual_router_type
-
- @property
- def virtual_router_ip_address(self):
- """Get virtual-router-ip-address for virtual-router.
-
- :returns: IpAddressType object
-
- """
- return getattr(self, '_virtual_router_ip_address', None)
- #end virtual_router_ip_address
-
- @virtual_router_ip_address.setter
- def virtual_router_ip_address(self, virtual_router_ip_address):
- """Set virtual-router-ip-address for virtual-router.
-
- :param virtual_router_ip_address: IpAddressType object
-
- """
- self._virtual_router_ip_address = virtual_router_ip_address
- #end virtual_router_ip_address
-
- def set_virtual_router_ip_address(self, value):
- self.virtual_router_ip_address = value
- #end set_virtual_router_ip_address
-
- def get_virtual_router_ip_address(self):
- return self.virtual_router_ip_address
- #end get_virtual_router_ip_address
-
- @property
- def id_perms(self):
- """Get id-perms for virtual-router.
-
- :returns: IdPermsType object
-
- """
- return getattr(self, '_id_perms', None)
- #end id_perms
-
- @id_perms.setter
- def id_perms(self, id_perms):
- """Set id-perms for virtual-router.
-
- :param id_perms: IdPermsType object
-
- """
- self._id_perms = id_perms
- #end id_perms
-
- def set_id_perms(self, value):
- self.id_perms = value
- #end set_id_perms
-
- def get_id_perms(self):
- return self.id_perms
- #end get_id_perms
-
- @property
- def display_name(self):
- """Get display-name for virtual-router.
-
- :returns: xsd:string object
-
- """
- return getattr(self, '_display_name', None)
- #end display_name
-
- @display_name.setter
- def display_name(self, display_name):
- """Set display-name for virtual-router.
-
- :param display_name: xsd:string object
-
- """
- self._display_name = display_name
- #end display_name
-
- def set_display_name(self, value):
- self.display_name = value
- #end set_display_name
-
- def get_display_name(self):
- return self.display_name
- #end get_display_name
-
- def _serialize_field_to_json(self, serialized, fields_to_serialize, field_name):
- if fields_to_serialize is None: # all fields are serialized
- serialized[field_name] = getattr(self, field_name)
- elif field_name in fields_to_serialize:
- serialized[field_name] = getattr(self, field_name)
- #end _serialize_field_to_json
-
- def serialize_to_json(self, field_names = None):
- serialized = {}
-
- # serialize common fields
- self._serialize_field_to_json(serialized, ['uuid'], 'uuid')
- self._serialize_field_to_json(serialized, field_names, 'fq_name')
- if hasattr(self, 'parent_type'):
- self._serialize_field_to_json(serialized, field_names, 'parent_type')
-
- # serialize property fields
- if hasattr(self, '_virtual_router_type'):
- self._serialize_field_to_json(serialized, field_names, 'virtual_router_type')
- if hasattr(self, '_virtual_router_ip_address'):
- self._serialize_field_to_json(serialized, field_names, 'virtual_router_ip_address')
- if hasattr(self, '_id_perms'):
- self._serialize_field_to_json(serialized, field_names, 'id_perms')
- if hasattr(self, '_display_name'):
- self._serialize_field_to_json(serialized, field_names, 'display_name')
-
- # serialize reference fields
- if hasattr(self, 'bgp_router_refs'):
- self._serialize_field_to_json(serialized, field_names, 'bgp_router_refs')
- if hasattr(self, 'virtual_machine_refs'):
- self._serialize_field_to_json(serialized, field_names, 'virtual_machine_refs')
- return serialized
- #end serialize_to_json
-
- def set_bgp_router(self, ref_obj):
- """Set bgp-router for virtual-router.
-
- :param ref_obj: BgpRouter object
-
- """
- self.bgp_router_refs = [{'to':ref_obj.get_fq_name()}]
- if ref_obj.uuid:
- self.bgp_router_refs[0]['uuid'] = ref_obj.uuid
-
- #end set_bgp_router
-
- def add_bgp_router(self, ref_obj):
- """Add bgp-router to virtual-router.
-
- :param ref_obj: BgpRouter object
-
- """
- refs = getattr(self, 'bgp_router_refs', [])
- if not refs:
- self.bgp_router_refs = []
-
- # if ref already exists, update any attr with it
- for ref in refs:
- if ref['to'] == ref_obj.get_fq_name():
- ref = {'to':ref_obj.get_fq_name()}
- if ref_obj.uuid:
- ref['uuid'] = ref_obj.uuid
- return
-
- # ref didn't exist before
- ref_info = {'to':ref_obj.get_fq_name()}
- if ref_obj.uuid:
- ref_info['uuid'] = ref_obj.uuid
-
- self.bgp_router_refs.append(ref_info)
- #end add_bgp_router
-
- def del_bgp_router(self, ref_obj):
- refs = self.get_bgp_router_refs()
- if not refs:
- return
-
- for ref in refs:
- if ref['to'] == ref_obj.get_fq_name():
- self.bgp_router_refs.remove(ref)
- return
- #end del_bgp_router
-
- def set_bgp_router_list(self, ref_obj_list):
- """Set bgp-router list for virtual-router.
-
- :param ref_obj_list: list of BgpRouter object
-
- """
- self.bgp_router_refs = ref_obj_list
- #end set_bgp_router_list
-
- def get_bgp_router_refs(self):
- """Return bgp-router list for virtual-router.
-
- :returns: list of <BgpRouter>
-
- """
- return getattr(self, 'bgp_router_refs', None)
- #end get_bgp_router_refs
-
- def set_virtual_machine(self, ref_obj):
- """Set virtual-machine for virtual-router.
-
- :param ref_obj: VirtualMachine object
-
- """
- self.virtual_machine_refs = [{'to':ref_obj.get_fq_name()}]
- if ref_obj.uuid:
- self.virtual_machine_refs[0]['uuid'] = ref_obj.uuid
-
- #end set_virtual_machine
-
- def add_virtual_machine(self, ref_obj):
- """Add virtual-machine to virtual-router.
-
- :param ref_obj: VirtualMachine object
-
- """
- refs = getattr(self, 'virtual_machine_refs', [])
- if not refs:
- self.virtual_machine_refs = []
-
- # if ref already exists, update any attr with it
- for ref in refs:
- if ref['to'] == ref_obj.get_fq_name():
- ref = {'to':ref_obj.get_fq_name()}
- if ref_obj.uuid:
- ref['uuid'] = ref_obj.uuid
- return
-
- # ref didn't exist before
- ref_info = {'to':ref_obj.get_fq_name()}
- if ref_obj.uuid:
- ref_info['uuid'] = ref_obj.uuid
-
- self.virtual_machine_refs.append(ref_info)
- #end add_virtual_machine
-
- def del_virtual_machine(self, ref_obj):
- refs = self.get_virtual_machine_refs()
- if not refs:
- return
-
- for ref in refs:
- if ref['to'] == ref_obj.get_fq_name():
- self.virtual_machine_refs.remove(ref)
- return
- #end del_virtual_machine
-
- def set_virtual_machine_list(self, ref_obj_list):
- """Set virtual-machine list for virtual-router.
-
- :param ref_obj_list: list of VirtualMachine object
-
- """
- self.virtual_machine_refs = ref_obj_list
- #end set_virtual_machine_list
-
- def get_virtual_machine_refs(self):
- """Return virtual-machine list for virtual-router.
-
- :returns: list of <VirtualMachine>
-
- """
- return getattr(self, 'virtual_machine_refs', None)
- #end get_virtual_machine_refs
-
- def get_physical_router_back_refs(self):
- """Return list of all physical-routers using this virtual-router"""
- return getattr(self, 'physical_router_back_refs', None)
- #end get_physical_router_back_refs
-
- def get_global_system_config_back_refs(self):
- """Return list of all global-system-configs using this virtual-router"""
- return getattr(self, 'global_system_config_back_refs', None)
- #end get_global_system_config_back_refs
-
- def get_provider_attachment_back_refs(self):
- """Return list of all provider-attachments using this virtual-router"""
- return getattr(self, 'provider_attachment_back_refs', None)
- #end get_provider_attachment_back_refs
-
- def dump(self):
- """Display virtual-router object in compact form."""
- print '------------ virtual-router ------------'
- print 'Name = ', self.get_fq_name()
- print 'Uuid = ', self.uuid
- if hasattr(self, 'parent_type'): # non config-root children
- print 'Parent Type = ', self.parent_type
- print 'P virtual_router_type = ', self.get_virtual_router_type()
- print 'P virtual_router_ip_address = ', self.get_virtual_router_ip_address()
- print 'P id_perms = ', self.get_id_perms()
- print 'P display_name = ', self.get_display_name()
- print 'REF bgp_router = ', self.get_bgp_router_refs()
- print 'REF virtual_machine = ', self.get_virtual_machine_refs()
- print 'BCK physical_router = ', self.get_physical_router_back_refs()
- print 'BCK provider_attachment = ', self.get_provider_attachment_back_refs()
- #end dump
-
-#end class VirtualRouter
-
-class ConfigRoot(object):
- """
- Represents config-root configuration representation.
-
- Properties:
- * id-perms (:class:`.IdPermsType` type)
- * display-name (xsd:string type)
-
- Children:
- * list of :class:`.GlobalSystemConfig` objects
- * list of :class:`.Domain` objects
-
- References to:
-
- Referred by:
- """
-
- prop_fields = set([u'id_perms', u'display_name'])
- ref_fields = set([])
- backref_fields = set([])
- children_fields = set([u'global_system_configs', u'domains'])
-
- def __init__(self, name = None, id_perms = None, display_name = None, *args, **kwargs):
- # type-independent fields
- self._type = 'config-root'
- if not name:
- name = u'default-config-root'
- self.name = name
- self._uuid = None
- self.fq_name = [name]
-
- # property fields
- if id_perms:
- self._id_perms = id_perms
- if display_name:
- self._display_name = display_name
- #end __init__
-
- def get_type(self):
- """Return object type (config-root)."""
- return self._type
- #end get_type
-
- def get_fq_name(self):
- """Return FQN of config-root in list form."""
- return self.fq_name
- #end get_fq_name
-
- def get_fq_name_str(self):
- """Return FQN of config-root as colon delimited string."""
- return ':'.join(self.fq_name)
- #end get_fq_name_str
-
- @property
- def uuid(self):
- return getattr(self, '_uuid', None)
- #end uuid
-
- @uuid.setter
- def uuid(self, uuid_val):
- self._uuid = uuid_val
- #end uuid
-
- def set_uuid(self, uuid_val):
- self.uuid = uuid_val
- #end set_uuid
-
- def get_uuid(self):
- return self.uuid
- #end get_uuid
-
- @property
- def id_perms(self):
- """Get id-perms for config-root.
-
- :returns: IdPermsType object
-
- """
- return getattr(self, '_id_perms', None)
- #end id_perms
-
- @id_perms.setter
- def id_perms(self, id_perms):
- """Set id-perms for config-root.
-
- :param id_perms: IdPermsType object
-
- """
- self._id_perms = id_perms
- #end id_perms
-
- def set_id_perms(self, value):
- self.id_perms = value
- #end set_id_perms
-
- def get_id_perms(self):
- return self.id_perms
- #end get_id_perms
-
- @property
- def display_name(self):
- """Get display-name for config-root.
-
- :returns: xsd:string object
-
- """
- return getattr(self, '_display_name', None)
- #end display_name
-
- @display_name.setter
- def display_name(self, display_name):
- """Set display-name for config-root.
-
- :param display_name: xsd:string object
-
- """
- self._display_name = display_name
- #end display_name
-
- def set_display_name(self, value):
- self.display_name = value
- #end set_display_name
-
- def get_display_name(self):
- return self.display_name
- #end get_display_name
-
- def _serialize_field_to_json(self, serialized, fields_to_serialize, field_name):
- if fields_to_serialize is None: # all fields are serialized
- serialized[field_name] = getattr(self, field_name)
- elif field_name in fields_to_serialize:
- serialized[field_name] = getattr(self, field_name)
- #end _serialize_field_to_json
-
- def serialize_to_json(self, field_names = None):
- serialized = {}
-
- # serialize common fields
- self._serialize_field_to_json(serialized, ['uuid'], 'uuid')
- self._serialize_field_to_json(serialized, field_names, 'fq_name')
- if hasattr(self, 'parent_type'):
- self._serialize_field_to_json(serialized, field_names, 'parent_type')
-
- # serialize property fields
- if hasattr(self, '_id_perms'):
- self._serialize_field_to_json(serialized, field_names, 'id_perms')
- if hasattr(self, '_display_name'):
- self._serialize_field_to_json(serialized, field_names, 'display_name')
-
- # serialize reference fields
- return serialized
- #end serialize_to_json
-
- def get_global_system_configs(self):
- return getattr(self, 'global_system_configs', None)
- #end get_global_system_configs
-
- def get_domains(self):
- return getattr(self, 'domains', None)
- #end get_domains
-
- def dump(self):
- """Display config-root object in compact form."""
- print '------------ config-root ------------'
- print 'Name = ', self.get_fq_name()
- print 'Uuid = ', self.uuid
- print 'P id_perms = ', self.get_id_perms()
- print 'P display_name = ', self.get_display_name()
- print 'HAS global_system_config = ', self.get_global_system_configs()
- print 'HAS domain = ', self.get_domains()
- #end dump
-
-#end class ConfigRoot
-
-class Subnet(object):
- """
- Represents subnet configuration representation.
-
- Properties:
- * subnet-ip-prefix (:class:`.SubnetType` type)
- * id-perms (:class:`.IdPermsType` type)
- * display-name (xsd:string type)
-
- Children:
-
- References to:
- * list of :class:`.VirtualMachineInterface` objects
-
- Referred by:
- """
-
- prop_fields = set([u'subnet_ip_prefix', u'id_perms', u'display_name'])
- ref_fields = set(['virtual_machine_interface_refs'])
- backref_fields = set([])
- children_fields = set([])
-
- def __init__(self, name = None, subnet_ip_prefix = None, id_perms = None, display_name = None, *args, **kwargs):
- # type-independent fields
- self._type = 'subnet'
- if not name:
- name = u'default-subnet'
- self.name = name
- self._uuid = None
- self.fq_name = [name]
-
- # property fields
- if subnet_ip_prefix:
- self._subnet_ip_prefix = subnet_ip_prefix
- if id_perms:
- self._id_perms = id_perms
- if display_name:
- self._display_name = display_name
- #end __init__
-
- def get_type(self):
- """Return object type (subnet)."""
- return self._type
- #end get_type
-
- def get_fq_name(self):
- """Return FQN of subnet in list form."""
- return self.fq_name
- #end get_fq_name
-
- def get_fq_name_str(self):
- """Return FQN of subnet as colon delimited string."""
- return ':'.join(self.fq_name)
- #end get_fq_name_str
-
- @property
- def uuid(self):
- return getattr(self, '_uuid', None)
- #end uuid
-
- @uuid.setter
- def uuid(self, uuid_val):
- self._uuid = uuid_val
- #end uuid
-
- def set_uuid(self, uuid_val):
- self.uuid = uuid_val
- #end set_uuid
-
- def get_uuid(self):
- return self.uuid
- #end get_uuid
-
- @property
- def subnet_ip_prefix(self):
- """Get subnet-ip-prefix for subnet.
-
- :returns: SubnetType object
-
- """
- return getattr(self, '_subnet_ip_prefix', None)
- #end subnet_ip_prefix
-
- @subnet_ip_prefix.setter
- def subnet_ip_prefix(self, subnet_ip_prefix):
- """Set subnet-ip-prefix for subnet.
-
- :param subnet_ip_prefix: SubnetType object
-
- """
- self._subnet_ip_prefix = subnet_ip_prefix
- #end subnet_ip_prefix
-
- def set_subnet_ip_prefix(self, value):
- self.subnet_ip_prefix = value
- #end set_subnet_ip_prefix
-
- def get_subnet_ip_prefix(self):
- return self.subnet_ip_prefix
- #end get_subnet_ip_prefix
-
- @property
- def id_perms(self):
- """Get id-perms for subnet.
-
- :returns: IdPermsType object
-
- """
- return getattr(self, '_id_perms', None)
- #end id_perms
-
- @id_perms.setter
- def id_perms(self, id_perms):
- """Set id-perms for subnet.
-
- :param id_perms: IdPermsType object
-
- """
- self._id_perms = id_perms
- #end id_perms
-
- def set_id_perms(self, value):
- self.id_perms = value
- #end set_id_perms
-
- def get_id_perms(self):
- return self.id_perms
- #end get_id_perms
-
- @property
- def display_name(self):
- """Get display-name for subnet.
-
- :returns: xsd:string object
-
- """
- return getattr(self, '_display_name', None)
- #end display_name
-
- @display_name.setter
- def display_name(self, display_name):
- """Set display-name for subnet.
-
- :param display_name: xsd:string object
-
- """
- self._display_name = display_name
- #end display_name
-
- def set_display_name(self, value):
- self.display_name = value
- #end set_display_name
-
- def get_display_name(self):
- return self.display_name
- #end get_display_name
-
- def _serialize_field_to_json(self, serialized, fields_to_serialize, field_name):
- if fields_to_serialize is None: # all fields are serialized
- serialized[field_name] = getattr(self, field_name)
- elif field_name in fields_to_serialize:
- serialized[field_name] = getattr(self, field_name)
- #end _serialize_field_to_json
-
- def serialize_to_json(self, field_names = None):
- serialized = {}
-
- # serialize common fields
- self._serialize_field_to_json(serialized, ['uuid'], 'uuid')
- self._serialize_field_to_json(serialized, field_names, 'fq_name')
- if hasattr(self, 'parent_type'):
- self._serialize_field_to_json(serialized, field_names, 'parent_type')
-
- # serialize property fields
- if hasattr(self, '_subnet_ip_prefix'):
- self._serialize_field_to_json(serialized, field_names, 'subnet_ip_prefix')
- if hasattr(self, '_id_perms'):
- self._serialize_field_to_json(serialized, field_names, 'id_perms')
- if hasattr(self, '_display_name'):
- self._serialize_field_to_json(serialized, field_names, 'display_name')
-
- # serialize reference fields
- if hasattr(self, 'virtual_machine_interface_refs'):
- self._serialize_field_to_json(serialized, field_names, 'virtual_machine_interface_refs')
- return serialized
- #end serialize_to_json
-
- def set_virtual_machine_interface(self, ref_obj):
- """Set virtual-machine-interface for subnet.
-
- :param ref_obj: VirtualMachineInterface object
-
- """
- self.virtual_machine_interface_refs = [{'to':ref_obj.get_fq_name()}]
- if ref_obj.uuid:
- self.virtual_machine_interface_refs[0]['uuid'] = ref_obj.uuid
-
- #end set_virtual_machine_interface
-
- def add_virtual_machine_interface(self, ref_obj):
- """Add virtual-machine-interface to subnet.
-
- :param ref_obj: VirtualMachineInterface object
-
- """
- refs = getattr(self, 'virtual_machine_interface_refs', [])
- if not refs:
- self.virtual_machine_interface_refs = []
-
- # if ref already exists, update any attr with it
- for ref in refs:
- if ref['to'] == ref_obj.get_fq_name():
- ref = {'to':ref_obj.get_fq_name()}
- if ref_obj.uuid:
- ref['uuid'] = ref_obj.uuid
- return
-
- # ref didn't exist before
- ref_info = {'to':ref_obj.get_fq_name()}
- if ref_obj.uuid:
- ref_info['uuid'] = ref_obj.uuid
-
- self.virtual_machine_interface_refs.append(ref_info)
- #end add_virtual_machine_interface
-
- def del_virtual_machine_interface(self, ref_obj):
- refs = self.get_virtual_machine_interface_refs()
- if not refs:
- return
-
- for ref in refs:
- if ref['to'] == ref_obj.get_fq_name():
- self.virtual_machine_interface_refs.remove(ref)
- return
- #end del_virtual_machine_interface
-
- def set_virtual_machine_interface_list(self, ref_obj_list):
- """Set virtual-machine-interface list for subnet.
-
- :param ref_obj_list: list of VirtualMachineInterface object
-
- """
- self.virtual_machine_interface_refs = ref_obj_list
- #end set_virtual_machine_interface_list
-
- def get_virtual_machine_interface_refs(self):
- """Return virtual-machine-interface list for subnet.
-
- :returns: list of <VirtualMachineInterface>
-
- """
- return getattr(self, 'virtual_machine_interface_refs', None)
- #end get_virtual_machine_interface_refs
-
- def dump(self):
- """Display subnet object in compact form."""
- print '------------ subnet ------------'
- print 'Name = ', self.get_fq_name()
- print 'Uuid = ', self.uuid
- print 'P subnet_ip_prefix = ', self.get_subnet_ip_prefix()
- print 'P id_perms = ', self.get_id_perms()
- print 'P display_name = ', self.get_display_name()
- print 'REF virtual_machine_interface = ', self.get_virtual_machine_interface_refs()
- #end dump
-
-#end class Subnet
-
-class GlobalSystemConfig(object):
- """
- Represents global-system-config configuration representation.
-
- Child of:
- :class:`.ConfigRoot` object OR
-
- Properties:
- * autonomous-system (AutonomousSystemType type)
- * config-version (xsd:string type)
- * plugin-tuning (:class:`.PluginProperties` type)
- * ibgp-auto-mesh (xsd:boolean type)
- * ip-fabric-subnets (:class:`.SubnetListType` type)
- * id-perms (:class:`.IdPermsType` type)
- * display-name (xsd:string type)
-
- Children:
- * list of :class:`.GlobalVrouterConfig` objects
- * list of :class:`.PhysicalRouter` objects
- * list of :class:`.VirtualRouter` objects
- * list of :class:`.ConfigNode` objects
- * list of :class:`.AnalyticsNode` objects
- * list of :class:`.DatabaseNode` objects
- * list of :class:`.ServiceApplianceSet` objects
-
- References to:
- * list of :class:`.BgpRouter` objects
-
- Referred by:
- """
-
- prop_fields = set([u'autonomous_system', u'config_version', u'plugin_tuning', u'ibgp_auto_mesh', u'ip_fabric_subnets', u'id_perms', u'display_name'])
- ref_fields = set(['bgp_router_refs'])
- backref_fields = set([u'config_root_back_refs'])
- children_fields = set([u'global_vrouter_configs', u'physical_routers', 'virtual_routers', u'config_nodes', u'analytics_nodes', u'database_nodes', u'service_appliance_sets'])
-
- def __init__(self, name = None, parent_obj = None, autonomous_system = None, config_version = None, plugin_tuning = None, ibgp_auto_mesh = None, ip_fabric_subnets = None, id_perms = None, display_name = None, *args, **kwargs):
- # type-independent fields
- self._type = 'global-system-config'
- if not name:
- name = u'default-global-system-config'
- self.name = name
- self._uuid = None
- # Determine parent type and fq_name
- kwargs_parent_type = kwargs.get('parent_type', None)
- kwargs_fq_name = kwargs.get('fq_name', None)
- if parent_obj:
- self.parent_type = parent_obj._type
- # copy parent's fq_name
- self.fq_name = list(parent_obj.fq_name)
- self.fq_name.append(name)
- elif kwargs_parent_type and kwargs_fq_name:
- self.parent_type = kwargs_parent_type
- self.fq_name = kwargs_fq_name
- else: # No parent obj specified
- self.fq_name = [name]
-
- # property fields
- if autonomous_system:
- self._autonomous_system = autonomous_system
- if config_version:
- self._config_version = config_version
- if plugin_tuning:
- self._plugin_tuning = plugin_tuning
- if ibgp_auto_mesh:
- self._ibgp_auto_mesh = ibgp_auto_mesh
- if ip_fabric_subnets:
- self._ip_fabric_subnets = ip_fabric_subnets
- if id_perms:
- self._id_perms = id_perms
- if display_name:
- self._display_name = display_name
- #end __init__
-
- def get_type(self):
- """Return object type (global-system-config)."""
- return self._type
- #end get_type
-
- def get_fq_name(self):
- """Return FQN of global-system-config in list form."""
- return self.fq_name
- #end get_fq_name
-
- def get_fq_name_str(self):
- """Return FQN of global-system-config as colon delimited string."""
- return ':'.join(self.fq_name)
- #end get_fq_name_str
-
- @property
- def parent_name(self):
- return self.fq_name[:-1][-1]
- #end parent_name
-
- def get_parent_fq_name(self):
- """Return FQN of global-system-config's parent in list form."""
- if not hasattr(self, 'parent_type'):
- # child of config-root
- return None
-
- return self.fq_name[:-1]
- #end get_parent_fq_name
-
- def get_parent_fq_name_str(self):
- """Return FQN of global-system-config's parent as colon delimted string."""
- if not hasattr(self, 'parent_type'):
- # child of config-root
- return None
-
- return ':'.join(self.fq_name[:-1])
- #end get_parent_fq_name_str
-
- @property
- def uuid(self):
- return getattr(self, '_uuid', None)
- #end uuid
-
- @uuid.setter
- def uuid(self, uuid_val):
- self._uuid = uuid_val
- #end uuid
-
- def set_uuid(self, uuid_val):
- self.uuid = uuid_val
- #end set_uuid
-
- def get_uuid(self):
- return self.uuid
- #end get_uuid
-
- @property
- def autonomous_system(self):
- """Get autonomous-system for global-system-config.
-
- :returns: AutonomousSystemType object
-
- """
- return getattr(self, '_autonomous_system', None)
- #end autonomous_system
-
- @autonomous_system.setter
- def autonomous_system(self, autonomous_system):
- """Set autonomous-system for global-system-config.
-
- :param autonomous_system: AutonomousSystemType object
-
- """
- self._autonomous_system = autonomous_system
- #end autonomous_system
-
- def set_autonomous_system(self, value):
- self.autonomous_system = value
- #end set_autonomous_system
-
- def get_autonomous_system(self):
- return self.autonomous_system
- #end get_autonomous_system
-
- @property
- def config_version(self):
- """Get config-version for global-system-config.
-
- :returns: xsd:string object
-
- """
- return getattr(self, '_config_version', None)
- #end config_version
-
- @config_version.setter
- def config_version(self, config_version):
- """Set config-version for global-system-config.
-
- :param config_version: xsd:string object
-
- """
- self._config_version = config_version
- #end config_version
-
- def set_config_version(self, value):
- self.config_version = value
- #end set_config_version
-
- def get_config_version(self):
- return self.config_version
- #end get_config_version
-
- @property
- def plugin_tuning(self):
- """Get plugin-tuning for global-system-config.
-
- :returns: PluginProperties object
-
- """
- return getattr(self, '_plugin_tuning', None)
- #end plugin_tuning
-
- @plugin_tuning.setter
- def plugin_tuning(self, plugin_tuning):
- """Set plugin-tuning for global-system-config.
-
- :param plugin_tuning: PluginProperties object
-
- """
- self._plugin_tuning = plugin_tuning
- #end plugin_tuning
-
- def set_plugin_tuning(self, value):
- self.plugin_tuning = value
- #end set_plugin_tuning
-
- def get_plugin_tuning(self):
- return self.plugin_tuning
- #end get_plugin_tuning
-
- @property
- def ibgp_auto_mesh(self):
- """Get ibgp-auto-mesh for global-system-config.
-
- :returns: xsd:boolean object
-
- """
- return getattr(self, '_ibgp_auto_mesh', None)
- #end ibgp_auto_mesh
-
- @ibgp_auto_mesh.setter
- def ibgp_auto_mesh(self, ibgp_auto_mesh):
- """Set ibgp-auto-mesh for global-system-config.
-
- :param ibgp_auto_mesh: xsd:boolean object
-
- """
- self._ibgp_auto_mesh = ibgp_auto_mesh
- #end ibgp_auto_mesh
-
- def set_ibgp_auto_mesh(self, value):
- self.ibgp_auto_mesh = value
- #end set_ibgp_auto_mesh
-
- def get_ibgp_auto_mesh(self):
- return self.ibgp_auto_mesh
- #end get_ibgp_auto_mesh
-
- @property
- def ip_fabric_subnets(self):
- """Get ip-fabric-subnets for global-system-config.
-
- :returns: SubnetListType object
-
- """
- return getattr(self, '_ip_fabric_subnets', None)
- #end ip_fabric_subnets
-
- @ip_fabric_subnets.setter
- def ip_fabric_subnets(self, ip_fabric_subnets):
- """Set ip-fabric-subnets for global-system-config.
-
- :param ip_fabric_subnets: SubnetListType object
-
- """
- self._ip_fabric_subnets = ip_fabric_subnets
- #end ip_fabric_subnets
-
- def set_ip_fabric_subnets(self, value):
- self.ip_fabric_subnets = value
- #end set_ip_fabric_subnets
-
- def get_ip_fabric_subnets(self):
- return self.ip_fabric_subnets
- #end get_ip_fabric_subnets
-
- @property
- def id_perms(self):
- """Get id-perms for global-system-config.
-
- :returns: IdPermsType object
-
- """
- return getattr(self, '_id_perms', None)
- #end id_perms
-
- @id_perms.setter
- def id_perms(self, id_perms):
- """Set id-perms for global-system-config.
-
- :param id_perms: IdPermsType object
-
- """
- self._id_perms = id_perms
- #end id_perms
-
- def set_id_perms(self, value):
- self.id_perms = value
- #end set_id_perms
-
- def get_id_perms(self):
- return self.id_perms
- #end get_id_perms
-
- @property
- def display_name(self):
- """Get display-name for global-system-config.
-
- :returns: xsd:string object
-
- """
- return getattr(self, '_display_name', None)
- #end display_name
-
- @display_name.setter
- def display_name(self, display_name):
- """Set display-name for global-system-config.
-
- :param display_name: xsd:string object
-
- """
- self._display_name = display_name
- #end display_name
-
- def set_display_name(self, value):
- self.display_name = value
- #end set_display_name
-
- def get_display_name(self):
- return self.display_name
- #end get_display_name
-
- def _serialize_field_to_json(self, serialized, fields_to_serialize, field_name):
- if fields_to_serialize is None: # all fields are serialized
- serialized[field_name] = getattr(self, field_name)
- elif field_name in fields_to_serialize:
- serialized[field_name] = getattr(self, field_name)
- #end _serialize_field_to_json
-
- def serialize_to_json(self, field_names = None):
- serialized = {}
-
- # serialize common fields
- self._serialize_field_to_json(serialized, ['uuid'], 'uuid')
- self._serialize_field_to_json(serialized, field_names, 'fq_name')
- if hasattr(self, 'parent_type'):
- self._serialize_field_to_json(serialized, field_names, 'parent_type')
-
- # serialize property fields
- if hasattr(self, '_autonomous_system'):
- self._serialize_field_to_json(serialized, field_names, 'autonomous_system')
- if hasattr(self, '_config_version'):
- self._serialize_field_to_json(serialized, field_names, 'config_version')
- if hasattr(self, '_plugin_tuning'):
- self._serialize_field_to_json(serialized, field_names, 'plugin_tuning')
- if hasattr(self, '_ibgp_auto_mesh'):
- self._serialize_field_to_json(serialized, field_names, 'ibgp_auto_mesh')
- if hasattr(self, '_ip_fabric_subnets'):
- self._serialize_field_to_json(serialized, field_names, 'ip_fabric_subnets')
- if hasattr(self, '_id_perms'):
- self._serialize_field_to_json(serialized, field_names, 'id_perms')
- if hasattr(self, '_display_name'):
- self._serialize_field_to_json(serialized, field_names, 'display_name')
-
- # serialize reference fields
- if hasattr(self, 'bgp_router_refs'):
- self._serialize_field_to_json(serialized, field_names, 'bgp_router_refs')
- return serialized
- #end serialize_to_json
-
- def get_global_vrouter_configs(self):
- return getattr(self, 'global_vrouter_configs', None)
- #end get_global_vrouter_configs
-
- def get_physical_routers(self):
- return getattr(self, 'physical_routers', None)
- #end get_physical_routers
-
- def get_virtual_routers(self):
- return getattr(self, 'virtual_routers', None)
- #end get_virtual_routers
-
- def get_config_nodes(self):
- return getattr(self, 'config_nodes', None)
- #end get_config_nodes
-
- def get_analytics_nodes(self):
- return getattr(self, 'analytics_nodes', None)
- #end get_analytics_nodes
-
- def get_database_nodes(self):
- return getattr(self, 'database_nodes', None)
- #end get_database_nodes
-
- def get_service_appliance_sets(self):
- return getattr(self, 'service_appliance_sets', None)
- #end get_service_appliance_sets
-
- def set_bgp_router(self, ref_obj):
- """Set bgp-router for global-system-config.
-
- :param ref_obj: BgpRouter object
-
- """
- self.bgp_router_refs = [{'to':ref_obj.get_fq_name()}]
- if ref_obj.uuid:
- self.bgp_router_refs[0]['uuid'] = ref_obj.uuid
-
- #end set_bgp_router
-
- def add_bgp_router(self, ref_obj):
- """Add bgp-router to global-system-config.
-
- :param ref_obj: BgpRouter object
-
- """
- refs = getattr(self, 'bgp_router_refs', [])
- if not refs:
- self.bgp_router_refs = []
-
- # if ref already exists, update any attr with it
- for ref in refs:
- if ref['to'] == ref_obj.get_fq_name():
- ref = {'to':ref_obj.get_fq_name()}
- if ref_obj.uuid:
- ref['uuid'] = ref_obj.uuid
- return
-
- # ref didn't exist before
- ref_info = {'to':ref_obj.get_fq_name()}
- if ref_obj.uuid:
- ref_info['uuid'] = ref_obj.uuid
-
- self.bgp_router_refs.append(ref_info)
- #end add_bgp_router
-
- def del_bgp_router(self, ref_obj):
- refs = self.get_bgp_router_refs()
- if not refs:
- return
-
- for ref in refs:
- if ref['to'] == ref_obj.get_fq_name():
- self.bgp_router_refs.remove(ref)
- return
- #end del_bgp_router
-
- def set_bgp_router_list(self, ref_obj_list):
- """Set bgp-router list for global-system-config.
-
- :param ref_obj_list: list of BgpRouter object
-
- """
- self.bgp_router_refs = ref_obj_list
- #end set_bgp_router_list
-
- def get_bgp_router_refs(self):
- """Return bgp-router list for global-system-config.
-
- :returns: list of <BgpRouter>
-
- """
- return getattr(self, 'bgp_router_refs', None)
- #end get_bgp_router_refs
-
- def get_config_root_back_refs(self):
- """Return list of all config-roots using this global-system-config"""
- return getattr(self, 'config_root_back_refs', None)
- #end get_config_root_back_refs
-
- def dump(self):
- """Display global-system-config object in compact form."""
- print '------------ global-system-config ------------'
- print 'Name = ', self.get_fq_name()
- print 'Uuid = ', self.uuid
- if hasattr(self, 'parent_type'): # non config-root children
- print 'Parent Type = ', self.parent_type
- print 'P autonomous_system = ', self.get_autonomous_system()
- print 'P config_version = ', self.get_config_version()
- print 'P plugin_tuning = ', self.get_plugin_tuning()
- print 'P ibgp_auto_mesh = ', self.get_ibgp_auto_mesh()
- print 'P ip_fabric_subnets = ', self.get_ip_fabric_subnets()
- print 'P id_perms = ', self.get_id_perms()
- print 'P display_name = ', self.get_display_name()
- print 'REF bgp_router = ', self.get_bgp_router_refs()
- print 'HAS global_vrouter_config = ', self.get_global_vrouter_configs()
- print 'HAS physical_router = ', self.get_physical_routers()
- print 'HAS virtual_router = ', self.get_virtual_routers()
- print 'HAS config_node = ', self.get_config_nodes()
- print 'HAS analytics_node = ', self.get_analytics_nodes()
- print 'HAS database_node = ', self.get_database_nodes()
- print 'HAS service_appliance_set = ', self.get_service_appliance_sets()
- #end dump
-
-#end class GlobalSystemConfig
-
-class ServiceAppliance(object):
- """
- Represents service-appliance configuration representation.
-
- Child of:
- :class:`.ServiceApplianceSet` object OR
-
- Properties:
- * service-appliance-user-credentials (:class:`.UserCredentials` type)
- * service-appliance-ip-address (IpAddressType type)
- * service-appliance-properties (:class:`.KeyValuePairs` type)
- * id-perms (:class:`.IdPermsType` type)
- * display-name (xsd:string type)
-
- Children:
-
- References to:
-
- Referred by:
- """
-
- prop_fields = set([u'service_appliance_user_credentials', u'service_appliance_ip_address', u'service_appliance_properties', u'id_perms', u'display_name'])
- ref_fields = set([])
- backref_fields = set([u'service_appliance_set_back_refs'])
- children_fields = set([])
-
- def __init__(self, name = None, parent_obj = None, service_appliance_user_credentials = None, service_appliance_ip_address = None, service_appliance_properties = None, id_perms = None, display_name = None, *args, **kwargs):
- # type-independent fields
- self._type = 'service-appliance'
- if not name:
- name = u'default-service-appliance'
- self.name = name
- self._uuid = None
- # Determine parent type and fq_name
- kwargs_parent_type = kwargs.get('parent_type', None)
- kwargs_fq_name = kwargs.get('fq_name', None)
- if parent_obj:
- self.parent_type = parent_obj._type
- # copy parent's fq_name
- self.fq_name = list(parent_obj.fq_name)
- self.fq_name.append(name)
- elif kwargs_parent_type and kwargs_fq_name:
- self.parent_type = kwargs_parent_type
- self.fq_name = kwargs_fq_name
- else: # No parent obj specified
- self.parent_type = 'service-appliance-set'
- self.fq_name = [u'default-global-system-config', u'default-service-appliance-set']
- self.fq_name.append(name)
-
-
- # property fields
- if service_appliance_user_credentials:
- self._service_appliance_user_credentials = service_appliance_user_credentials
- if service_appliance_ip_address:
- self._service_appliance_ip_address = service_appliance_ip_address
- if service_appliance_properties:
- self._service_appliance_properties = service_appliance_properties
- if id_perms:
- self._id_perms = id_perms
- if display_name:
- self._display_name = display_name
- #end __init__
-
- def get_type(self):
- """Return object type (service-appliance)."""
- return self._type
- #end get_type
-
- def get_fq_name(self):
- """Return FQN of service-appliance in list form."""
- return self.fq_name
- #end get_fq_name
-
- def get_fq_name_str(self):
- """Return FQN of service-appliance as colon delimited string."""
- return ':'.join(self.fq_name)
- #end get_fq_name_str
-
- @property
- def parent_name(self):
- return self.fq_name[:-1][-1]
- #end parent_name
-
- def get_parent_fq_name(self):
- """Return FQN of service-appliance's parent in list form."""
- if not hasattr(self, 'parent_type'):
- # child of config-root
- return None
-
- return self.fq_name[:-1]
- #end get_parent_fq_name
-
- def get_parent_fq_name_str(self):
- """Return FQN of service-appliance's parent as colon delimted string."""
- if not hasattr(self, 'parent_type'):
- # child of config-root
- return None
-
- return ':'.join(self.fq_name[:-1])
- #end get_parent_fq_name_str
-
- @property
- def uuid(self):
- return getattr(self, '_uuid', None)
- #end uuid
-
- @uuid.setter
- def uuid(self, uuid_val):
- self._uuid = uuid_val
- #end uuid
-
- def set_uuid(self, uuid_val):
- self.uuid = uuid_val
- #end set_uuid
-
- def get_uuid(self):
- return self.uuid
- #end get_uuid
-
- @property
- def service_appliance_user_credentials(self):
- """Get service-appliance-user-credentials for service-appliance.
-
- :returns: UserCredentials object
-
- """
- return getattr(self, '_service_appliance_user_credentials', None)
- #end service_appliance_user_credentials
-
- @service_appliance_user_credentials.setter
- def service_appliance_user_credentials(self, service_appliance_user_credentials):
- """Set service-appliance-user-credentials for service-appliance.
-
- :param service_appliance_user_credentials: UserCredentials object
-
- """
- self._service_appliance_user_credentials = service_appliance_user_credentials
- #end service_appliance_user_credentials
-
- def set_service_appliance_user_credentials(self, value):
- self.service_appliance_user_credentials = value
- #end set_service_appliance_user_credentials
-
- def get_service_appliance_user_credentials(self):
- return self.service_appliance_user_credentials
- #end get_service_appliance_user_credentials
-
- @property
- def service_appliance_ip_address(self):
- """Get service-appliance-ip-address for service-appliance.
-
- :returns: IpAddressType object
-
- """
- return getattr(self, '_service_appliance_ip_address', None)
- #end service_appliance_ip_address
-
- @service_appliance_ip_address.setter
- def service_appliance_ip_address(self, service_appliance_ip_address):
- """Set service-appliance-ip-address for service-appliance.
-
- :param service_appliance_ip_address: IpAddressType object
-
- """
- self._service_appliance_ip_address = service_appliance_ip_address
- #end service_appliance_ip_address
-
- def set_service_appliance_ip_address(self, value):
- self.service_appliance_ip_address = value
- #end set_service_appliance_ip_address
-
- def get_service_appliance_ip_address(self):
- return self.service_appliance_ip_address
- #end get_service_appliance_ip_address
-
- @property
- def service_appliance_properties(self):
- """Get service-appliance-properties for service-appliance.
-
- :returns: KeyValuePairs object
-
- """
- return getattr(self, '_service_appliance_properties', None)
- #end service_appliance_properties
-
- @service_appliance_properties.setter
- def service_appliance_properties(self, service_appliance_properties):
- """Set service-appliance-properties for service-appliance.
-
- :param service_appliance_properties: KeyValuePairs object
-
- """
- self._service_appliance_properties = service_appliance_properties
- #end service_appliance_properties
-
- def set_service_appliance_properties(self, value):
- self.service_appliance_properties = value
- #end set_service_appliance_properties
-
- def get_service_appliance_properties(self):
- return self.service_appliance_properties
- #end get_service_appliance_properties
-
- @property
- def id_perms(self):
- """Get id-perms for service-appliance.
-
- :returns: IdPermsType object
-
- """
- return getattr(self, '_id_perms', None)
- #end id_perms
-
- @id_perms.setter
- def id_perms(self, id_perms):
- """Set id-perms for service-appliance.
-
- :param id_perms: IdPermsType object
-
- """
- self._id_perms = id_perms
- #end id_perms
-
- def set_id_perms(self, value):
- self.id_perms = value
- #end set_id_perms
-
- def get_id_perms(self):
- return self.id_perms
- #end get_id_perms
-
- @property
- def display_name(self):
- """Get display-name for service-appliance.
-
- :returns: xsd:string object
-
- """
- return getattr(self, '_display_name', None)
- #end display_name
-
- @display_name.setter
- def display_name(self, display_name):
- """Set display-name for service-appliance.
-
- :param display_name: xsd:string object
-
- """
- self._display_name = display_name
- #end display_name
-
- def set_display_name(self, value):
- self.display_name = value
- #end set_display_name
-
- def get_display_name(self):
- return self.display_name
- #end get_display_name
-
- def _serialize_field_to_json(self, serialized, fields_to_serialize, field_name):
- if fields_to_serialize is None: # all fields are serialized
- serialized[field_name] = getattr(self, field_name)
- elif field_name in fields_to_serialize:
- serialized[field_name] = getattr(self, field_name)
- #end _serialize_field_to_json
-
- def serialize_to_json(self, field_names = None):
- serialized = {}
-
- # serialize common fields
- self._serialize_field_to_json(serialized, ['uuid'], 'uuid')
- self._serialize_field_to_json(serialized, field_names, 'fq_name')
- if hasattr(self, 'parent_type'):
- self._serialize_field_to_json(serialized, field_names, 'parent_type')
-
- # serialize property fields
- if hasattr(self, '_service_appliance_user_credentials'):
- self._serialize_field_to_json(serialized, field_names, 'service_appliance_user_credentials')
- if hasattr(self, '_service_appliance_ip_address'):
- self._serialize_field_to_json(serialized, field_names, 'service_appliance_ip_address')
- if hasattr(self, '_service_appliance_properties'):
- self._serialize_field_to_json(serialized, field_names, 'service_appliance_properties')
- if hasattr(self, '_id_perms'):
- self._serialize_field_to_json(serialized, field_names, 'id_perms')
- if hasattr(self, '_display_name'):
- self._serialize_field_to_json(serialized, field_names, 'display_name')
-
- # serialize reference fields
- return serialized
- #end serialize_to_json
-
- def get_service_appliance_set_back_refs(self):
- """Return list of all service-appliance-sets using this service-appliance"""
- return getattr(self, 'service_appliance_set_back_refs', None)
- #end get_service_appliance_set_back_refs
-
- def dump(self):
- """Display service-appliance object in compact form."""
- print '------------ service-appliance ------------'
- print 'Name = ', self.get_fq_name()
- print 'Uuid = ', self.uuid
- if hasattr(self, 'parent_type'): # non config-root children
- print 'Parent Type = ', self.parent_type
- print 'P service_appliance_user_credentials = ', self.get_service_appliance_user_credentials()
- print 'P service_appliance_ip_address = ', self.get_service_appliance_ip_address()
- print 'P service_appliance_properties = ', self.get_service_appliance_properties()
- print 'P id_perms = ', self.get_id_perms()
- print 'P display_name = ', self.get_display_name()
- #end dump
-
-#end class ServiceAppliance
-
-class ServiceInstance(object):
- """
- Represents service-instance configuration representation.
-
- Child of:
- :class:`.Project` object OR
-
- Properties:
- * service-instance-properties (:class:`.ServiceInstanceType` type)
- * id-perms (:class:`.IdPermsType` type)
- * display-name (xsd:string type)
-
- Children:
-
- References to:
- * list of :class:`.ServiceTemplate` objects
-
- Referred by:
- * list of :class:`.VirtualMachine` objects
- * list of :class:`.LogicalRouter` objects
- * list of :class:`.LoadbalancerPool` objects
- """
-
- prop_fields = set([u'service_instance_properties', u'id_perms', u'display_name'])
- ref_fields = set(['service_template_refs'])
- backref_fields = set([u'project_back_refs', u'virtual_machine_back_refs', u'logical_router_back_refs', u'loadbalancer_pool_back_refs'])
- children_fields = set([])
-
- def __init__(self, name = None, parent_obj = None, service_instance_properties = None, id_perms = None, display_name = None, *args, **kwargs):
- # type-independent fields
- self._type = 'service-instance'
- if not name:
- name = u'default-service-instance'
- self.name = name
- self._uuid = None
- # Determine parent type and fq_name
- kwargs_parent_type = kwargs.get('parent_type', None)
- kwargs_fq_name = kwargs.get('fq_name', None)
- if parent_obj:
- self.parent_type = parent_obj._type
- # copy parent's fq_name
- self.fq_name = list(parent_obj.fq_name)
- self.fq_name.append(name)
- elif kwargs_parent_type and kwargs_fq_name:
- self.parent_type = kwargs_parent_type
- self.fq_name = kwargs_fq_name
- else: # No parent obj specified
- self.parent_type = 'project'
- self.fq_name = [u'default-domain', u'default-project']
- self.fq_name.append(name)
-
-
- # property fields
- if service_instance_properties:
- self._service_instance_properties = service_instance_properties
- if id_perms:
- self._id_perms = id_perms
- if display_name:
- self._display_name = display_name
- #end __init__
-
- def get_type(self):
- """Return object type (service-instance)."""
- return self._type
- #end get_type
-
- def get_fq_name(self):
- """Return FQN of service-instance in list form."""
- return self.fq_name
- #end get_fq_name
-
- def get_fq_name_str(self):
- """Return FQN of service-instance as colon delimited string."""
- return ':'.join(self.fq_name)
- #end get_fq_name_str
-
- @property
- def parent_name(self):
- return self.fq_name[:-1][-1]
- #end parent_name
-
- def get_parent_fq_name(self):
- """Return FQN of service-instance's parent in list form."""
- if not hasattr(self, 'parent_type'):
- # child of config-root
- return None
-
- return self.fq_name[:-1]
- #end get_parent_fq_name
-
- def get_parent_fq_name_str(self):
- """Return FQN of service-instance's parent as colon delimted string."""
- if not hasattr(self, 'parent_type'):
- # child of config-root
- return None
-
- return ':'.join(self.fq_name[:-1])
- #end get_parent_fq_name_str
-
- @property
- def uuid(self):
- return getattr(self, '_uuid', None)
- #end uuid
-
- @uuid.setter
- def uuid(self, uuid_val):
- self._uuid = uuid_val
- #end uuid
-
- def set_uuid(self, uuid_val):
- self.uuid = uuid_val
- #end set_uuid
-
- def get_uuid(self):
- return self.uuid
- #end get_uuid
-
- @property
- def service_instance_properties(self):
- """Get service-instance-properties for service-instance.
-
- :returns: ServiceInstanceType object
-
- """
- return getattr(self, '_service_instance_properties', None)
- #end service_instance_properties
-
- @service_instance_properties.setter
- def service_instance_properties(self, service_instance_properties):
- """Set service-instance-properties for service-instance.
-
- :param service_instance_properties: ServiceInstanceType object
-
- """
- self._service_instance_properties = service_instance_properties
- #end service_instance_properties
-
- def set_service_instance_properties(self, value):
- self.service_instance_properties = value
- #end set_service_instance_properties
-
- def get_service_instance_properties(self):
- return self.service_instance_properties
- #end get_service_instance_properties
-
- @property
- def id_perms(self):
- """Get id-perms for service-instance.
-
- :returns: IdPermsType object
-
- """
- return getattr(self, '_id_perms', None)
- #end id_perms
-
- @id_perms.setter
- def id_perms(self, id_perms):
- """Set id-perms for service-instance.
-
- :param id_perms: IdPermsType object
-
- """
- self._id_perms = id_perms
- #end id_perms
-
- def set_id_perms(self, value):
- self.id_perms = value
- #end set_id_perms
-
- def get_id_perms(self):
- return self.id_perms
- #end get_id_perms
-
- @property
- def display_name(self):
- """Get display-name for service-instance.
-
- :returns: xsd:string object
-
- """
- return getattr(self, '_display_name', None)
- #end display_name
-
- @display_name.setter
- def display_name(self, display_name):
- """Set display-name for service-instance.
-
- :param display_name: xsd:string object
-
- """
- self._display_name = display_name
- #end display_name
-
- def set_display_name(self, value):
- self.display_name = value
- #end set_display_name
-
- def get_display_name(self):
- return self.display_name
- #end get_display_name
-
- def _serialize_field_to_json(self, serialized, fields_to_serialize, field_name):
- if fields_to_serialize is None: # all fields are serialized
- serialized[field_name] = getattr(self, field_name)
- elif field_name in fields_to_serialize:
- serialized[field_name] = getattr(self, field_name)
- #end _serialize_field_to_json
-
- def serialize_to_json(self, field_names = None):
- serialized = {}
-
- # serialize common fields
- self._serialize_field_to_json(serialized, ['uuid'], 'uuid')
- self._serialize_field_to_json(serialized, field_names, 'fq_name')
- if hasattr(self, 'parent_type'):
- self._serialize_field_to_json(serialized, field_names, 'parent_type')
-
- # serialize property fields
- if hasattr(self, '_service_instance_properties'):
- self._serialize_field_to_json(serialized, field_names, 'service_instance_properties')
- if hasattr(self, '_id_perms'):
- self._serialize_field_to_json(serialized, field_names, 'id_perms')
- if hasattr(self, '_display_name'):
- self._serialize_field_to_json(serialized, field_names, 'display_name')
-
- # serialize reference fields
- if hasattr(self, 'service_template_refs'):
- self._serialize_field_to_json(serialized, field_names, 'service_template_refs')
- return serialized
- #end serialize_to_json
-
- def set_service_template(self, ref_obj):
- """Set service-template for service-instance.
-
- :param ref_obj: ServiceTemplate object
-
- """
- self.service_template_refs = [{'to':ref_obj.get_fq_name()}]
- if ref_obj.uuid:
- self.service_template_refs[0]['uuid'] = ref_obj.uuid
-
- #end set_service_template
-
- def add_service_template(self, ref_obj):
- """Add service-template to service-instance.
-
- :param ref_obj: ServiceTemplate object
-
- """
- refs = getattr(self, 'service_template_refs', [])
- if not refs:
- self.service_template_refs = []
-
- # if ref already exists, update any attr with it
- for ref in refs:
- if ref['to'] == ref_obj.get_fq_name():
- ref = {'to':ref_obj.get_fq_name()}
- if ref_obj.uuid:
- ref['uuid'] = ref_obj.uuid
- return
-
- # ref didn't exist before
- ref_info = {'to':ref_obj.get_fq_name()}
- if ref_obj.uuid:
- ref_info['uuid'] = ref_obj.uuid
-
- self.service_template_refs.append(ref_info)
- #end add_service_template
-
- def del_service_template(self, ref_obj):
- refs = self.get_service_template_refs()
- if not refs:
- return
-
- for ref in refs:
- if ref['to'] == ref_obj.get_fq_name():
- self.service_template_refs.remove(ref)
- return
- #end del_service_template
-
- def set_service_template_list(self, ref_obj_list):
- """Set service-template list for service-instance.
-
- :param ref_obj_list: list of ServiceTemplate object
-
- """
- self.service_template_refs = ref_obj_list
- #end set_service_template_list
-
- def get_service_template_refs(self):
- """Return service-template list for service-instance.
-
- :returns: list of <ServiceTemplate>
-
- """
- return getattr(self, 'service_template_refs', None)
- #end get_service_template_refs
-
- def get_project_back_refs(self):
- """Return list of all projects using this service-instance"""
- return getattr(self, 'project_back_refs', None)
- #end get_project_back_refs
-
- def get_virtual_machine_back_refs(self):
- """Return list of all virtual-machines using this service-instance"""
- return getattr(self, 'virtual_machine_back_refs', None)
- #end get_virtual_machine_back_refs
-
- def get_logical_router_back_refs(self):
- """Return list of all logical-routers using this service-instance"""
- return getattr(self, 'logical_router_back_refs', None)
- #end get_logical_router_back_refs
-
- def get_loadbalancer_pool_back_refs(self):
- """Return list of all loadbalancer-pools using this service-instance"""
- return getattr(self, 'loadbalancer_pool_back_refs', None)
- #end get_loadbalancer_pool_back_refs
-
- def dump(self):
- """Display service-instance object in compact form."""
- print '------------ service-instance ------------'
- print 'Name = ', self.get_fq_name()
- print 'Uuid = ', self.uuid
- if hasattr(self, 'parent_type'): # non config-root children
- print 'Parent Type = ', self.parent_type
- print 'P service_instance_properties = ', self.get_service_instance_properties()
- print 'P id_perms = ', self.get_id_perms()
- print 'P display_name = ', self.get_display_name()
- print 'REF service_template = ', self.get_service_template_refs()
- print 'BCK virtual_machine = ', self.get_virtual_machine_back_refs()
- print 'BCK logical_router = ', self.get_logical_router_back_refs()
- print 'BCK loadbalancer_pool = ', self.get_loadbalancer_pool_back_refs()
- #end dump
-
-#end class ServiceInstance
-
-class Namespace(object):
- """
- Represents namespace configuration representation.
-
- Child of:
- :class:`.Domain` object OR
-
- Properties:
- * namespace-cidr (:class:`.SubnetType` type)
- * id-perms (:class:`.IdPermsType` type)
- * display-name (xsd:string type)
-
- Children:
-
- References to:
-
- Referred by:
- * list of :class:`.Project` objects
- """
-
- prop_fields = set([u'namespace_cidr', u'id_perms', u'display_name'])
- ref_fields = set([])
- backref_fields = set([u'domain_back_refs', u'project_back_refs'])
- children_fields = set([])
-
- def __init__(self, name = None, parent_obj = None, namespace_cidr = None, id_perms = None, display_name = None, *args, **kwargs):
- # type-independent fields
- self._type = 'namespace'
- if not name:
- name = u'default-namespace'
- self.name = name
- self._uuid = None
- # Determine parent type and fq_name
- kwargs_parent_type = kwargs.get('parent_type', None)
- kwargs_fq_name = kwargs.get('fq_name', None)
- if parent_obj:
- self.parent_type = parent_obj._type
- # copy parent's fq_name
- self.fq_name = list(parent_obj.fq_name)
- self.fq_name.append(name)
- elif kwargs_parent_type and kwargs_fq_name:
- self.parent_type = kwargs_parent_type
- self.fq_name = kwargs_fq_name
- else: # No parent obj specified
- self.parent_type = 'domain'
- self.fq_name = [u'default-domain']
- self.fq_name.append(name)
-
-
- # property fields
- if namespace_cidr:
- self._namespace_cidr = namespace_cidr
- if id_perms:
- self._id_perms = id_perms
- if display_name:
- self._display_name = display_name
- #end __init__
-
- def get_type(self):
- """Return object type (namespace)."""
- return self._type
- #end get_type
-
- def get_fq_name(self):
- """Return FQN of namespace in list form."""
- return self.fq_name
- #end get_fq_name
-
- def get_fq_name_str(self):
- """Return FQN of namespace as colon delimited string."""
- return ':'.join(self.fq_name)
- #end get_fq_name_str
-
- @property
- def parent_name(self):
- return self.fq_name[:-1][-1]
- #end parent_name
-
- def get_parent_fq_name(self):
- """Return FQN of namespace's parent in list form."""
- if not hasattr(self, 'parent_type'):
- # child of config-root
- return None
-
- return self.fq_name[:-1]
- #end get_parent_fq_name
-
- def get_parent_fq_name_str(self):
- """Return FQN of namespace's parent as colon delimted string."""
- if not hasattr(self, 'parent_type'):
- # child of config-root
- return None
-
- return ':'.join(self.fq_name[:-1])
- #end get_parent_fq_name_str
-
- @property
- def uuid(self):
- return getattr(self, '_uuid', None)
- #end uuid
-
- @uuid.setter
- def uuid(self, uuid_val):
- self._uuid = uuid_val
- #end uuid
-
- def set_uuid(self, uuid_val):
- self.uuid = uuid_val
- #end set_uuid
-
- def get_uuid(self):
- return self.uuid
- #end get_uuid
-
- @property
- def namespace_cidr(self):
- """Get namespace-cidr for namespace.
-
- :returns: SubnetType object
-
- """
- return getattr(self, '_namespace_cidr', None)
- #end namespace_cidr
-
- @namespace_cidr.setter
- def namespace_cidr(self, namespace_cidr):
- """Set namespace-cidr for namespace.
-
- :param namespace_cidr: SubnetType object
-
- """
- self._namespace_cidr = namespace_cidr
- #end namespace_cidr
-
- def set_namespace_cidr(self, value):
- self.namespace_cidr = value
- #end set_namespace_cidr
-
- def get_namespace_cidr(self):
- return self.namespace_cidr
- #end get_namespace_cidr
-
- @property
- def id_perms(self):
- """Get id-perms for namespace.
-
- :returns: IdPermsType object
-
- """
- return getattr(self, '_id_perms', None)
- #end id_perms
-
- @id_perms.setter
- def id_perms(self, id_perms):
- """Set id-perms for namespace.
-
- :param id_perms: IdPermsType object
-
- """
- self._id_perms = id_perms
- #end id_perms
-
- def set_id_perms(self, value):
- self.id_perms = value
- #end set_id_perms
-
- def get_id_perms(self):
- return self.id_perms
- #end get_id_perms
-
- @property
- def display_name(self):
- """Get display-name for namespace.
-
- :returns: xsd:string object
-
- """
- return getattr(self, '_display_name', None)
- #end display_name
-
- @display_name.setter
- def display_name(self, display_name):
- """Set display-name for namespace.
-
- :param display_name: xsd:string object
-
- """
- self._display_name = display_name
- #end display_name
-
- def set_display_name(self, value):
- self.display_name = value
- #end set_display_name
-
- def get_display_name(self):
- return self.display_name
- #end get_display_name
-
- def _serialize_field_to_json(self, serialized, fields_to_serialize, field_name):
- if fields_to_serialize is None: # all fields are serialized
- serialized[field_name] = getattr(self, field_name)
- elif field_name in fields_to_serialize:
- serialized[field_name] = getattr(self, field_name)
- #end _serialize_field_to_json
-
- def serialize_to_json(self, field_names = None):
- serialized = {}
-
- # serialize common fields
- self._serialize_field_to_json(serialized, ['uuid'], 'uuid')
- self._serialize_field_to_json(serialized, field_names, 'fq_name')
- if hasattr(self, 'parent_type'):
- self._serialize_field_to_json(serialized, field_names, 'parent_type')
-
- # serialize property fields
- if hasattr(self, '_namespace_cidr'):
- self._serialize_field_to_json(serialized, field_names, 'namespace_cidr')
- if hasattr(self, '_id_perms'):
- self._serialize_field_to_json(serialized, field_names, 'id_perms')
- if hasattr(self, '_display_name'):
- self._serialize_field_to_json(serialized, field_names, 'display_name')
-
- # serialize reference fields
- return serialized
- #end serialize_to_json
-
- def get_domain_back_refs(self):
- """Return list of all domains using this namespace"""
- return getattr(self, 'domain_back_refs', None)
- #end get_domain_back_refs
-
- def get_project_back_refs(self):
- """Return list of all projects using this namespace"""
- return getattr(self, 'project_back_refs', None)
- #end get_project_back_refs
-
- def dump(self):
- """Display namespace object in compact form."""
- print '------------ namespace ------------'
- print 'Name = ', self.get_fq_name()
- print 'Uuid = ', self.uuid
- if hasattr(self, 'parent_type'): # non config-root children
- print 'Parent Type = ', self.parent_type
- print 'P namespace_cidr = ', self.get_namespace_cidr()
- print 'P id_perms = ', self.get_id_perms()
- print 'P display_name = ', self.get_display_name()
- print 'BCK project = ', self.get_project_back_refs()
- #end dump
-
-#end class Namespace
-
-class LogicalInterface(object):
- """
- Represents logical-interface configuration representation.
-
- Child of:
- :class:`.PhysicalRouter` object OR
- :class:`.PhysicalInterface` object OR
-
- Properties:
- * logical-interface-vlan-tag (xsd:integer type)
- * logical-interface-type (LogicalInterfaceType type)
- * id-perms (:class:`.IdPermsType` type)
- * display-name (xsd:string type)
-
- Children:
-
- References to:
- * list of :class:`.VirtualMachineInterface` objects
-
- Referred by:
- """
-
- prop_fields = set([u'logical_interface_vlan_tag', u'logical_interface_type', u'id_perms', u'display_name'])
- ref_fields = set(['virtual_machine_interface_refs'])
- backref_fields = set([u'physical_router_back_refs', u'physical_interface_back_refs'])
- children_fields = set([])
-
- def __init__(self, name = None, parent_obj = None, logical_interface_vlan_tag = None, logical_interface_type = None, id_perms = None, display_name = None, *args, **kwargs):
- # type-independent fields
- self._type = 'logical-interface'
- if not name:
- name = u'default-logical-interface'
- self.name = name
- self._uuid = None
- # Determine parent type and fq_name
- kwargs_parent_type = kwargs.get('parent_type', None)
- kwargs_fq_name = kwargs.get('fq_name', None)
- if parent_obj:
- self.parent_type = parent_obj._type
- # copy parent's fq_name
- self.fq_name = list(parent_obj.fq_name)
- self.fq_name.append(name)
- elif kwargs_parent_type and kwargs_fq_name:
- self.parent_type = kwargs_parent_type
- self.fq_name = kwargs_fq_name
- else: # No parent obj specified
- # if obj constructed from within server, ignore if parent not specified
- if not kwargs['parent_type']:
- raise AmbiguousParentError("[[u'default-global-system-config', u'default-physical-router'], [u'default-global-system-config', u'default-physical-router', u'default-physical-interface']]")
-
- # property fields
- if logical_interface_vlan_tag:
- self._logical_interface_vlan_tag = logical_interface_vlan_tag
- if logical_interface_type:
- self._logical_interface_type = logical_interface_type
- if id_perms:
- self._id_perms = id_perms
- if display_name:
- self._display_name = display_name
- #end __init__
-
- def get_type(self):
- """Return object type (logical-interface)."""
- return self._type
- #end get_type
-
- def get_fq_name(self):
- """Return FQN of logical-interface in list form."""
- return self.fq_name
- #end get_fq_name
-
- def get_fq_name_str(self):
- """Return FQN of logical-interface as colon delimited string."""
- return ':'.join(self.fq_name)
- #end get_fq_name_str
-
- @property
- def parent_name(self):
- return self.fq_name[:-1][-1]
- #end parent_name
-
- def get_parent_fq_name(self):
- """Return FQN of logical-interface's parent in list form."""
- if not hasattr(self, 'parent_type'):
- # child of config-root
- return None
-
- return self.fq_name[:-1]
- #end get_parent_fq_name
-
- def get_parent_fq_name_str(self):
- """Return FQN of logical-interface's parent as colon delimted string."""
- if not hasattr(self, 'parent_type'):
- # child of config-root
- return None
-
- return ':'.join(self.fq_name[:-1])
- #end get_parent_fq_name_str
-
- @property
- def uuid(self):
- return getattr(self, '_uuid', None)
- #end uuid
-
- @uuid.setter
- def uuid(self, uuid_val):
- self._uuid = uuid_val
- #end uuid
-
- def set_uuid(self, uuid_val):
- self.uuid = uuid_val
- #end set_uuid
-
- def get_uuid(self):
- return self.uuid
- #end get_uuid
-
- @property
- def logical_interface_vlan_tag(self):
- """Get logical-interface-vlan-tag for logical-interface.
-
- :returns: xsd:integer object
-
- """
- return getattr(self, '_logical_interface_vlan_tag', None)
- #end logical_interface_vlan_tag
-
- @logical_interface_vlan_tag.setter
- def logical_interface_vlan_tag(self, logical_interface_vlan_tag):
- """Set logical-interface-vlan-tag for logical-interface.
-
- :param logical_interface_vlan_tag: xsd:integer object
-
- """
- self._logical_interface_vlan_tag = logical_interface_vlan_tag
- #end logical_interface_vlan_tag
-
- def set_logical_interface_vlan_tag(self, value):
- self.logical_interface_vlan_tag = value
- #end set_logical_interface_vlan_tag
-
- def get_logical_interface_vlan_tag(self):
- return self.logical_interface_vlan_tag
- #end get_logical_interface_vlan_tag
-
- @property
- def logical_interface_type(self):
- """Get logical-interface-type for logical-interface.
-
- :returns: LogicalInterfaceType object
-
- """
- return getattr(self, '_logical_interface_type', None)
- #end logical_interface_type
-
- @logical_interface_type.setter
- def logical_interface_type(self, logical_interface_type):
- """Set logical-interface-type for logical-interface.
-
- :param logical_interface_type: LogicalInterfaceType object
-
- """
- self._logical_interface_type = logical_interface_type
- #end logical_interface_type
-
- def set_logical_interface_type(self, value):
- self.logical_interface_type = value
- #end set_logical_interface_type
-
- def get_logical_interface_type(self):
- return self.logical_interface_type
- #end get_logical_interface_type
-
- @property
- def id_perms(self):
- """Get id-perms for logical-interface.
-
- :returns: IdPermsType object
-
- """
- return getattr(self, '_id_perms', None)
- #end id_perms
-
- @id_perms.setter
- def id_perms(self, id_perms):
- """Set id-perms for logical-interface.
-
- :param id_perms: IdPermsType object
-
- """
- self._id_perms = id_perms
- #end id_perms
-
- def set_id_perms(self, value):
- self.id_perms = value
- #end set_id_perms
-
- def get_id_perms(self):
- return self.id_perms
- #end get_id_perms
-
- @property
- def display_name(self):
- """Get display-name for logical-interface.
-
- :returns: xsd:string object
-
- """
- return getattr(self, '_display_name', None)
- #end display_name
-
- @display_name.setter
- def display_name(self, display_name):
- """Set display-name for logical-interface.
-
- :param display_name: xsd:string object
-
- """
- self._display_name = display_name
- #end display_name
-
- def set_display_name(self, value):
- self.display_name = value
- #end set_display_name
-
- def get_display_name(self):
- return self.display_name
- #end get_display_name
-
- def _serialize_field_to_json(self, serialized, fields_to_serialize, field_name):
- if fields_to_serialize is None: # all fields are serialized
- serialized[field_name] = getattr(self, field_name)
- elif field_name in fields_to_serialize:
- serialized[field_name] = getattr(self, field_name)
- #end _serialize_field_to_json
-
- def serialize_to_json(self, field_names = None):
- serialized = {}
-
- # serialize common fields
- self._serialize_field_to_json(serialized, ['uuid'], 'uuid')
- self._serialize_field_to_json(serialized, field_names, 'fq_name')
- if hasattr(self, 'parent_type'):
- self._serialize_field_to_json(serialized, field_names, 'parent_type')
-
- # serialize property fields
- if hasattr(self, '_logical_interface_vlan_tag'):
- self._serialize_field_to_json(serialized, field_names, 'logical_interface_vlan_tag')
- if hasattr(self, '_logical_interface_type'):
- self._serialize_field_to_json(serialized, field_names, 'logical_interface_type')
- if hasattr(self, '_id_perms'):
- self._serialize_field_to_json(serialized, field_names, 'id_perms')
- if hasattr(self, '_display_name'):
- self._serialize_field_to_json(serialized, field_names, 'display_name')
-
- # serialize reference fields
- if hasattr(self, 'virtual_machine_interface_refs'):
- self._serialize_field_to_json(serialized, field_names, 'virtual_machine_interface_refs')
- return serialized
- #end serialize_to_json
-
- def set_virtual_machine_interface(self, ref_obj):
- """Set virtual-machine-interface for logical-interface.
-
- :param ref_obj: VirtualMachineInterface object
-
- """
- self.virtual_machine_interface_refs = [{'to':ref_obj.get_fq_name()}]
- if ref_obj.uuid:
- self.virtual_machine_interface_refs[0]['uuid'] = ref_obj.uuid
-
- #end set_virtual_machine_interface
-
- def add_virtual_machine_interface(self, ref_obj):
- """Add virtual-machine-interface to logical-interface.
-
- :param ref_obj: VirtualMachineInterface object
-
- """
- refs = getattr(self, 'virtual_machine_interface_refs', [])
- if not refs:
- self.virtual_machine_interface_refs = []
-
- # if ref already exists, update any attr with it
- for ref in refs:
- if ref['to'] == ref_obj.get_fq_name():
- ref = {'to':ref_obj.get_fq_name()}
- if ref_obj.uuid:
- ref['uuid'] = ref_obj.uuid
- return
-
- # ref didn't exist before
- ref_info = {'to':ref_obj.get_fq_name()}
- if ref_obj.uuid:
- ref_info['uuid'] = ref_obj.uuid
-
- self.virtual_machine_interface_refs.append(ref_info)
- #end add_virtual_machine_interface
-
- def del_virtual_machine_interface(self, ref_obj):
- refs = self.get_virtual_machine_interface_refs()
- if not refs:
- return
-
- for ref in refs:
- if ref['to'] == ref_obj.get_fq_name():
- self.virtual_machine_interface_refs.remove(ref)
- return
- #end del_virtual_machine_interface
-
- def set_virtual_machine_interface_list(self, ref_obj_list):
- """Set virtual-machine-interface list for logical-interface.
-
- :param ref_obj_list: list of VirtualMachineInterface object
-
- """
- self.virtual_machine_interface_refs = ref_obj_list
- #end set_virtual_machine_interface_list
-
- def get_virtual_machine_interface_refs(self):
- """Return virtual-machine-interface list for logical-interface.
-
- :returns: list of <VirtualMachineInterface>
-
- """
- return getattr(self, 'virtual_machine_interface_refs', None)
- #end get_virtual_machine_interface_refs
-
- def get_physical_router_back_refs(self):
- """Return list of all physical-routers using this logical-interface"""
- return getattr(self, 'physical_router_back_refs', None)
- #end get_physical_router_back_refs
-
- def get_physical_interface_back_refs(self):
- """Return list of all physical-interfaces using this logical-interface"""
- return getattr(self, 'physical_interface_back_refs', None)
- #end get_physical_interface_back_refs
-
- def dump(self):
- """Display logical-interface object in compact form."""
- print '------------ logical-interface ------------'
- print 'Name = ', self.get_fq_name()
- print 'Uuid = ', self.uuid
- if hasattr(self, 'parent_type'): # non config-root children
- print 'Parent Type = ', self.parent_type
- print 'P logical_interface_vlan_tag = ', self.get_logical_interface_vlan_tag()
- print 'P logical_interface_type = ', self.get_logical_interface_type()
- print 'P id_perms = ', self.get_id_perms()
- print 'P display_name = ', self.get_display_name()
- print 'REF virtual_machine_interface = ', self.get_virtual_machine_interface_refs()
- #end dump
-
-#end class LogicalInterface
-
-class RouteTable(object):
- """
- Represents route-table configuration representation.
-
- Child of:
- :class:`.Project` object OR
-
- Properties:
- * routes (:class:`.RouteTableType` type)
- * id-perms (:class:`.IdPermsType` type)
- * display-name (xsd:string type)
-
- Children:
-
- References to:
-
- Referred by:
- * list of :class:`.VirtualNetwork` objects
- """
-
- prop_fields = set([u'routes', u'id_perms', u'display_name'])
- ref_fields = set([])
- backref_fields = set([u'project_back_refs', u'virtual_network_back_refs'])
- children_fields = set([])
-
- def __init__(self, name = None, parent_obj = None, routes = None, id_perms = None, display_name = None, *args, **kwargs):
- # type-independent fields
- self._type = 'route-table'
- if not name:
- name = u'default-route-table'
- self.name = name
- self._uuid = None
- # Determine parent type and fq_name
- kwargs_parent_type = kwargs.get('parent_type', None)
- kwargs_fq_name = kwargs.get('fq_name', None)
- if parent_obj:
- self.parent_type = parent_obj._type
- # copy parent's fq_name
- self.fq_name = list(parent_obj.fq_name)
- self.fq_name.append(name)
- elif kwargs_parent_type and kwargs_fq_name:
- self.parent_type = kwargs_parent_type
- self.fq_name = kwargs_fq_name
- else: # No parent obj specified
- self.parent_type = 'project'
- self.fq_name = [u'default-domain', u'default-project']
- self.fq_name.append(name)
-
-
- # property fields
- if routes:
- self._routes = routes
- if id_perms:
- self._id_perms = id_perms
- if display_name:
- self._display_name = display_name
- #end __init__
-
- def get_type(self):
- """Return object type (route-table)."""
- return self._type
- #end get_type
-
- def get_fq_name(self):
- """Return FQN of route-table in list form."""
- return self.fq_name
- #end get_fq_name
-
- def get_fq_name_str(self):
- """Return FQN of route-table as colon delimited string."""
- return ':'.join(self.fq_name)
- #end get_fq_name_str
-
- @property
- def parent_name(self):
- return self.fq_name[:-1][-1]
- #end parent_name
-
- def get_parent_fq_name(self):
- """Return FQN of route-table's parent in list form."""
- if not hasattr(self, 'parent_type'):
- # child of config-root
- return None
-
- return self.fq_name[:-1]
- #end get_parent_fq_name
-
- def get_parent_fq_name_str(self):
- """Return FQN of route-table's parent as colon delimted string."""
- if not hasattr(self, 'parent_type'):
- # child of config-root
- return None
-
- return ':'.join(self.fq_name[:-1])
- #end get_parent_fq_name_str
-
- @property
- def uuid(self):
- return getattr(self, '_uuid', None)
- #end uuid
-
- @uuid.setter
- def uuid(self, uuid_val):
- self._uuid = uuid_val
- #end uuid
-
- def set_uuid(self, uuid_val):
- self.uuid = uuid_val
- #end set_uuid
-
- def get_uuid(self):
- return self.uuid
- #end get_uuid
-
- @property
- def routes(self):
- """Get routes for route-table.
-
- :returns: RouteTableType object
-
- """
- return getattr(self, '_routes', None)
- #end routes
-
- @routes.setter
- def routes(self, routes):
- """Set routes for route-table.
-
- :param routes: RouteTableType object
-
- """
- self._routes = routes
- #end routes
-
- def set_routes(self, value):
- self.routes = value
- #end set_routes
-
- def get_routes(self):
- return self.routes
- #end get_routes
-
- @property
- def id_perms(self):
- """Get id-perms for route-table.
-
- :returns: IdPermsType object
-
- """
- return getattr(self, '_id_perms', None)
- #end id_perms
-
- @id_perms.setter
- def id_perms(self, id_perms):
- """Set id-perms for route-table.
-
- :param id_perms: IdPermsType object
-
- """
- self._id_perms = id_perms
- #end id_perms
-
- def set_id_perms(self, value):
- self.id_perms = value
- #end set_id_perms
-
- def get_id_perms(self):
- return self.id_perms
- #end get_id_perms
-
- @property
- def display_name(self):
- """Get display-name for route-table.
-
- :returns: xsd:string object
-
- """
- return getattr(self, '_display_name', None)
- #end display_name
-
- @display_name.setter
- def display_name(self, display_name):
- """Set display-name for route-table.
-
- :param display_name: xsd:string object
-
- """
- self._display_name = display_name
- #end display_name
-
- def set_display_name(self, value):
- self.display_name = value
- #end set_display_name
-
- def get_display_name(self):
- return self.display_name
- #end get_display_name
-
- def _serialize_field_to_json(self, serialized, fields_to_serialize, field_name):
- if fields_to_serialize is None: # all fields are serialized
- serialized[field_name] = getattr(self, field_name)
- elif field_name in fields_to_serialize:
- serialized[field_name] = getattr(self, field_name)
- #end _serialize_field_to_json
-
- def serialize_to_json(self, field_names = None):
- serialized = {}
-
- # serialize common fields
- self._serialize_field_to_json(serialized, ['uuid'], 'uuid')
- self._serialize_field_to_json(serialized, field_names, 'fq_name')
- if hasattr(self, 'parent_type'):
- self._serialize_field_to_json(serialized, field_names, 'parent_type')
-
- # serialize property fields
- if hasattr(self, '_routes'):
- self._serialize_field_to_json(serialized, field_names, 'routes')
- if hasattr(self, '_id_perms'):
- self._serialize_field_to_json(serialized, field_names, 'id_perms')
- if hasattr(self, '_display_name'):
- self._serialize_field_to_json(serialized, field_names, 'display_name')
-
- # serialize reference fields
- return serialized
- #end serialize_to_json
-
- def get_project_back_refs(self):
- """Return list of all projects using this route-table"""
- return getattr(self, 'project_back_refs', None)
- #end get_project_back_refs
-
- def get_virtual_network_back_refs(self):
- """Return list of all virtual-networks using this route-table"""
- return getattr(self, 'virtual_network_back_refs', None)
- #end get_virtual_network_back_refs
-
- def dump(self):
- """Display route-table object in compact form."""
- print '------------ route-table ------------'
- print 'Name = ', self.get_fq_name()
- print 'Uuid = ', self.uuid
- if hasattr(self, 'parent_type'): # non config-root children
- print 'Parent Type = ', self.parent_type
- print 'P routes = ', self.get_routes()
- print 'P id_perms = ', self.get_id_perms()
- print 'P display_name = ', self.get_display_name()
- print 'BCK virtual_network = ', self.get_virtual_network_back_refs()
- #end dump
-
-#end class RouteTable
-
-class PhysicalInterface(object):
- """
- Represents physical-interface configuration representation.
-
- Child of:
- :class:`.PhysicalRouter` object OR
-
- Properties:
- * id-perms (:class:`.IdPermsType` type)
- * display-name (xsd:string type)
-
- Children:
- * list of :class:`.LogicalInterface` objects
-
- References to:
-
- Referred by:
- """
-
- prop_fields = set([u'id_perms', u'display_name'])
- ref_fields = set([])
- backref_fields = set([u'physical_router_back_refs'])
- children_fields = set([u'logical_interfaces'])
-
- def __init__(self, name = None, parent_obj = None, id_perms = None, display_name = None, *args, **kwargs):
- # type-independent fields
- self._type = 'physical-interface'
- if not name:
- name = u'default-physical-interface'
- self.name = name
- self._uuid = None
- # Determine parent type and fq_name
- kwargs_parent_type = kwargs.get('parent_type', None)
- kwargs_fq_name = kwargs.get('fq_name', None)
- if parent_obj:
- self.parent_type = parent_obj._type
- # copy parent's fq_name
- self.fq_name = list(parent_obj.fq_name)
- self.fq_name.append(name)
- elif kwargs_parent_type and kwargs_fq_name:
- self.parent_type = kwargs_parent_type
- self.fq_name = kwargs_fq_name
- else: # No parent obj specified
- self.parent_type = 'physical-router'
- self.fq_name = [u'default-global-system-config', u'default-physical-router']
- self.fq_name.append(name)
-
-
- # property fields
- if id_perms:
- self._id_perms = id_perms
- if display_name:
- self._display_name = display_name
- #end __init__
-
- def get_type(self):
- """Return object type (physical-interface)."""
- return self._type
- #end get_type
-
- def get_fq_name(self):
- """Return FQN of physical-interface in list form."""
- return self.fq_name
- #end get_fq_name
-
- def get_fq_name_str(self):
- """Return FQN of physical-interface as colon delimited string."""
- return ':'.join(self.fq_name)
- #end get_fq_name_str
-
- @property
- def parent_name(self):
- return self.fq_name[:-1][-1]
- #end parent_name
-
- def get_parent_fq_name(self):
- """Return FQN of physical-interface's parent in list form."""
- if not hasattr(self, 'parent_type'):
- # child of config-root
- return None
-
- return self.fq_name[:-1]
- #end get_parent_fq_name
-
- def get_parent_fq_name_str(self):
- """Return FQN of physical-interface's parent as colon delimted string."""
- if not hasattr(self, 'parent_type'):
- # child of config-root
- return None
-
- return ':'.join(self.fq_name[:-1])
- #end get_parent_fq_name_str
-
- @property
- def uuid(self):
- return getattr(self, '_uuid', None)
- #end uuid
-
- @uuid.setter
- def uuid(self, uuid_val):
- self._uuid = uuid_val
- #end uuid
-
- def set_uuid(self, uuid_val):
- self.uuid = uuid_val
- #end set_uuid
-
- def get_uuid(self):
- return self.uuid
- #end get_uuid
-
- @property
- def id_perms(self):
- """Get id-perms for physical-interface.
-
- :returns: IdPermsType object
-
- """
- return getattr(self, '_id_perms', None)
- #end id_perms
-
- @id_perms.setter
- def id_perms(self, id_perms):
- """Set id-perms for physical-interface.
-
- :param id_perms: IdPermsType object
-
- """
- self._id_perms = id_perms
- #end id_perms
-
- def set_id_perms(self, value):
- self.id_perms = value
- #end set_id_perms
-
- def get_id_perms(self):
- return self.id_perms
- #end get_id_perms
-
- @property
- def display_name(self):
- """Get display-name for physical-interface.
-
- :returns: xsd:string object
-
- """
- return getattr(self, '_display_name', None)
- #end display_name
-
- @display_name.setter
- def display_name(self, display_name):
- """Set display-name for physical-interface.
-
- :param display_name: xsd:string object
-
- """
- self._display_name = display_name
- #end display_name
-
- def set_display_name(self, value):
- self.display_name = value
- #end set_display_name
-
- def get_display_name(self):
- return self.display_name
- #end get_display_name
-
- def _serialize_field_to_json(self, serialized, fields_to_serialize, field_name):
- if fields_to_serialize is None: # all fields are serialized
- serialized[field_name] = getattr(self, field_name)
- elif field_name in fields_to_serialize:
- serialized[field_name] = getattr(self, field_name)
- #end _serialize_field_to_json
-
- def serialize_to_json(self, field_names = None):
- serialized = {}
-
- # serialize common fields
- self._serialize_field_to_json(serialized, ['uuid'], 'uuid')
- self._serialize_field_to_json(serialized, field_names, 'fq_name')
- if hasattr(self, 'parent_type'):
- self._serialize_field_to_json(serialized, field_names, 'parent_type')
-
- # serialize property fields
- if hasattr(self, '_id_perms'):
- self._serialize_field_to_json(serialized, field_names, 'id_perms')
- if hasattr(self, '_display_name'):
- self._serialize_field_to_json(serialized, field_names, 'display_name')
-
- # serialize reference fields
- return serialized
- #end serialize_to_json
-
- def get_logical_interfaces(self):
- return getattr(self, 'logical_interfaces', None)
- #end get_logical_interfaces
-
- def get_physical_router_back_refs(self):
- """Return list of all physical-routers using this physical-interface"""
- return getattr(self, 'physical_router_back_refs', None)
- #end get_physical_router_back_refs
-
- def dump(self):
- """Display physical-interface object in compact form."""
- print '------------ physical-interface ------------'
- print 'Name = ', self.get_fq_name()
- print 'Uuid = ', self.uuid
- if hasattr(self, 'parent_type'): # non config-root children
- print 'Parent Type = ', self.parent_type
- print 'P id_perms = ', self.get_id_perms()
- print 'P display_name = ', self.get_display_name()
- print 'HAS logical_interface = ', self.get_logical_interfaces()
- #end dump
-
-#end class PhysicalInterface
-
-class AccessControlList(object):
- """
- Represents access-control-list configuration representation.
-
- Child of:
- :class:`.VirtualNetwork` object OR
- :class:`.SecurityGroup` object OR
-
- Properties:
- * access-control-list-entries (:class:`.AclEntriesType` type)
- * id-perms (:class:`.IdPermsType` type)
- * display-name (xsd:string type)
-
- Children:
-
- References to:
-
- Referred by:
- """
-
- prop_fields = set([u'access_control_list_entries', u'id_perms', u'display_name'])
- ref_fields = set([])
- backref_fields = set([u'virtual_network_back_refs', u'security_group_back_refs'])
- children_fields = set([])
-
- def __init__(self, name = None, parent_obj = None, access_control_list_entries = None, id_perms = None, display_name = None, *args, **kwargs):
- # type-independent fields
- self._type = 'access-control-list'
- if not name:
- name = u'default-access-control-list'
- self.name = name
- self._uuid = None
- # Determine parent type and fq_name
- kwargs_parent_type = kwargs.get('parent_type', None)
- kwargs_fq_name = kwargs.get('fq_name', None)
- if parent_obj:
- self.parent_type = parent_obj._type
- # copy parent's fq_name
- self.fq_name = list(parent_obj.fq_name)
- self.fq_name.append(name)
- elif kwargs_parent_type and kwargs_fq_name:
- self.parent_type = kwargs_parent_type
- self.fq_name = kwargs_fq_name
- else: # No parent obj specified
- # if obj constructed from within server, ignore if parent not specified
- if not kwargs['parent_type']:
- raise AmbiguousParentError("[[u'default-domain', u'default-project', u'default-virtual-network'], [u'default-domain', u'default-project', u'default-security-group']]")
-
- # property fields
- if access_control_list_entries:
- self._access_control_list_entries = access_control_list_entries
- if id_perms:
- self._id_perms = id_perms
- if display_name:
- self._display_name = display_name
- #end __init__
-
- def get_type(self):
- """Return object type (access-control-list)."""
- return self._type
- #end get_type
-
- def get_fq_name(self):
- """Return FQN of access-control-list in list form."""
- return self.fq_name
- #end get_fq_name
-
- def get_fq_name_str(self):
- """Return FQN of access-control-list as colon delimited string."""
- return ':'.join(self.fq_name)
- #end get_fq_name_str
-
- @property
- def parent_name(self):
- return self.fq_name[:-1][-1]
- #end parent_name
-
- def get_parent_fq_name(self):
- """Return FQN of access-control-list's parent in list form."""
- if not hasattr(self, 'parent_type'):
- # child of config-root
- return None
-
- return self.fq_name[:-1]
- #end get_parent_fq_name
-
- def get_parent_fq_name_str(self):
- """Return FQN of access-control-list's parent as colon delimted string."""
- if not hasattr(self, 'parent_type'):
- # child of config-root
- return None
-
- return ':'.join(self.fq_name[:-1])
- #end get_parent_fq_name_str
-
- @property
- def uuid(self):
- return getattr(self, '_uuid', None)
- #end uuid
-
- @uuid.setter
- def uuid(self, uuid_val):
- self._uuid = uuid_val
- #end uuid
-
- def set_uuid(self, uuid_val):
- self.uuid = uuid_val
- #end set_uuid
-
- def get_uuid(self):
- return self.uuid
- #end get_uuid
-
- @property
- def access_control_list_entries(self):
- """Get access-control-list-entries for access-control-list.
-
- :returns: AclEntriesType object
-
- """
- return getattr(self, '_access_control_list_entries', None)
- #end access_control_list_entries
-
- @access_control_list_entries.setter
- def access_control_list_entries(self, access_control_list_entries):
- """Set access-control-list-entries for access-control-list.
-
- :param access_control_list_entries: AclEntriesType object
-
- """
- self._access_control_list_entries = access_control_list_entries
- #end access_control_list_entries
-
- def set_access_control_list_entries(self, value):
- self.access_control_list_entries = value
- #end set_access_control_list_entries
-
- def get_access_control_list_entries(self):
- return self.access_control_list_entries
- #end get_access_control_list_entries
-
- @property
- def id_perms(self):
- """Get id-perms for access-control-list.
-
- :returns: IdPermsType object
-
- """
- return getattr(self, '_id_perms', None)
- #end id_perms
-
- @id_perms.setter
- def id_perms(self, id_perms):
- """Set id-perms for access-control-list.
-
- :param id_perms: IdPermsType object
-
- """
- self._id_perms = id_perms
- #end id_perms
-
- def set_id_perms(self, value):
- self.id_perms = value
- #end set_id_perms
-
- def get_id_perms(self):
- return self.id_perms
- #end get_id_perms
-
- @property
- def display_name(self):
- """Get display-name for access-control-list.
-
- :returns: xsd:string object
-
- """
- return getattr(self, '_display_name', None)
- #end display_name
-
- @display_name.setter
- def display_name(self, display_name):
- """Set display-name for access-control-list.
-
- :param display_name: xsd:string object
-
- """
- self._display_name = display_name
- #end display_name
-
- def set_display_name(self, value):
- self.display_name = value
- #end set_display_name
-
- def get_display_name(self):
- return self.display_name
- #end get_display_name
-
- def _serialize_field_to_json(self, serialized, fields_to_serialize, field_name):
- if fields_to_serialize is None: # all fields are serialized
- serialized[field_name] = getattr(self, field_name)
- elif field_name in fields_to_serialize:
- serialized[field_name] = getattr(self, field_name)
- #end _serialize_field_to_json
-
- def serialize_to_json(self, field_names = None):
- serialized = {}
-
- # serialize common fields
- self._serialize_field_to_json(serialized, ['uuid'], 'uuid')
- self._serialize_field_to_json(serialized, field_names, 'fq_name')
- if hasattr(self, 'parent_type'):
- self._serialize_field_to_json(serialized, field_names, 'parent_type')
-
- # serialize property fields
- if hasattr(self, '_access_control_list_entries'):
- self._serialize_field_to_json(serialized, field_names, 'access_control_list_entries')
- if hasattr(self, '_id_perms'):
- self._serialize_field_to_json(serialized, field_names, 'id_perms')
- if hasattr(self, '_display_name'):
- self._serialize_field_to_json(serialized, field_names, 'display_name')
-
- # serialize reference fields
- return serialized
- #end serialize_to_json
-
- def get_virtual_network_back_refs(self):
- """Return list of all virtual-networks using this access-control-list"""
- return getattr(self, 'virtual_network_back_refs', None)
- #end get_virtual_network_back_refs
-
- def get_security_group_back_refs(self):
- """Return list of all security-groups using this access-control-list"""
- return getattr(self, 'security_group_back_refs', None)
- #end get_security_group_back_refs
-
- def dump(self):
- """Display access-control-list object in compact form."""
- print '------------ access-control-list ------------'
- print 'Name = ', self.get_fq_name()
- print 'Uuid = ', self.uuid
- if hasattr(self, 'parent_type'): # non config-root children
- print 'Parent Type = ', self.parent_type
- print 'P access_control_list_entries = ', self.get_access_control_list_entries()
- print 'P id_perms = ', self.get_id_perms()
- print 'P display_name = ', self.get_display_name()
- #end dump
-
-#end class AccessControlList
-
-class AnalyticsNode(object):
- """
- Represents analytics-node configuration representation.
-
- Child of:
- :class:`.GlobalSystemConfig` object OR
-
- Properties:
- * analytics-node-ip-address (IpAddressType type)
- * id-perms (:class:`.IdPermsType` type)
- * display-name (xsd:string type)
-
- Children:
-
- References to:
-
- Referred by:
- """
-
- prop_fields = set([u'analytics_node_ip_address', u'id_perms', u'display_name'])
- ref_fields = set([])
- backref_fields = set([u'global_system_config_back_refs'])
- children_fields = set([])
-
- def __init__(self, name = None, parent_obj = None, analytics_node_ip_address = None, id_perms = None, display_name = None, *args, **kwargs):
- # type-independent fields
- self._type = 'analytics-node'
- if not name:
- name = u'default-analytics-node'
- self.name = name
- self._uuid = None
- # Determine parent type and fq_name
- kwargs_parent_type = kwargs.get('parent_type', None)
- kwargs_fq_name = kwargs.get('fq_name', None)
- if parent_obj:
- self.parent_type = parent_obj._type
- # copy parent's fq_name
- self.fq_name = list(parent_obj.fq_name)
- self.fq_name.append(name)
- elif kwargs_parent_type and kwargs_fq_name:
- self.parent_type = kwargs_parent_type
- self.fq_name = kwargs_fq_name
- else: # No parent obj specified
- self.parent_type = 'global-system-config'
- self.fq_name = [u'default-global-system-config']
- self.fq_name.append(name)
-
-
- # property fields
- if analytics_node_ip_address:
- self._analytics_node_ip_address = analytics_node_ip_address
- if id_perms:
- self._id_perms = id_perms
- if display_name:
- self._display_name = display_name
- #end __init__
-
- def get_type(self):
- """Return object type (analytics-node)."""
- return self._type
- #end get_type
-
- def get_fq_name(self):
- """Return FQN of analytics-node in list form."""
- return self.fq_name
- #end get_fq_name
-
- def get_fq_name_str(self):
- """Return FQN of analytics-node as colon delimited string."""
- return ':'.join(self.fq_name)
- #end get_fq_name_str
-
- @property
- def parent_name(self):
- return self.fq_name[:-1][-1]
- #end parent_name
-
- def get_parent_fq_name(self):
- """Return FQN of analytics-node's parent in list form."""
- if not hasattr(self, 'parent_type'):
- # child of config-root
- return None
-
- return self.fq_name[:-1]
- #end get_parent_fq_name
-
- def get_parent_fq_name_str(self):
- """Return FQN of analytics-node's parent as colon delimted string."""
- if not hasattr(self, 'parent_type'):
- # child of config-root
- return None
-
- return ':'.join(self.fq_name[:-1])
- #end get_parent_fq_name_str
-
- @property
- def uuid(self):
- return getattr(self, '_uuid', None)
- #end uuid
-
- @uuid.setter
- def uuid(self, uuid_val):
- self._uuid = uuid_val
- #end uuid
-
- def set_uuid(self, uuid_val):
- self.uuid = uuid_val
- #end set_uuid
-
- def get_uuid(self):
- return self.uuid
- #end get_uuid
-
- @property
- def analytics_node_ip_address(self):
- """Get analytics-node-ip-address for analytics-node.
-
- :returns: IpAddressType object
-
- """
- return getattr(self, '_analytics_node_ip_address', None)
- #end analytics_node_ip_address
-
- @analytics_node_ip_address.setter
- def analytics_node_ip_address(self, analytics_node_ip_address):
- """Set analytics-node-ip-address for analytics-node.
-
- :param analytics_node_ip_address: IpAddressType object
-
- """
- self._analytics_node_ip_address = analytics_node_ip_address
- #end analytics_node_ip_address
-
- def set_analytics_node_ip_address(self, value):
- self.analytics_node_ip_address = value
- #end set_analytics_node_ip_address
-
- def get_analytics_node_ip_address(self):
- return self.analytics_node_ip_address
- #end get_analytics_node_ip_address
-
- @property
- def id_perms(self):
- """Get id-perms for analytics-node.
-
- :returns: IdPermsType object
-
- """
- return getattr(self, '_id_perms', None)
- #end id_perms
-
- @id_perms.setter
- def id_perms(self, id_perms):
- """Set id-perms for analytics-node.
-
- :param id_perms: IdPermsType object
-
- """
- self._id_perms = id_perms
- #end id_perms
-
- def set_id_perms(self, value):
- self.id_perms = value
- #end set_id_perms
-
- def get_id_perms(self):
- return self.id_perms
- #end get_id_perms
-
- @property
- def display_name(self):
- """Get display-name for analytics-node.
-
- :returns: xsd:string object
-
- """
- return getattr(self, '_display_name', None)
- #end display_name
-
- @display_name.setter
- def display_name(self, display_name):
- """Set display-name for analytics-node.
-
- :param display_name: xsd:string object
-
- """
- self._display_name = display_name
- #end display_name
-
- def set_display_name(self, value):
- self.display_name = value
- #end set_display_name
-
- def get_display_name(self):
- return self.display_name
- #end get_display_name
-
- def _serialize_field_to_json(self, serialized, fields_to_serialize, field_name):
- if fields_to_serialize is None: # all fields are serialized
- serialized[field_name] = getattr(self, field_name)
- elif field_name in fields_to_serialize:
- serialized[field_name] = getattr(self, field_name)
- #end _serialize_field_to_json
-
- def serialize_to_json(self, field_names = None):
- serialized = {}
-
- # serialize common fields
- self._serialize_field_to_json(serialized, ['uuid'], 'uuid')
- self._serialize_field_to_json(serialized, field_names, 'fq_name')
- if hasattr(self, 'parent_type'):
- self._serialize_field_to_json(serialized, field_names, 'parent_type')
-
- # serialize property fields
- if hasattr(self, '_analytics_node_ip_address'):
- self._serialize_field_to_json(serialized, field_names, 'analytics_node_ip_address')
- if hasattr(self, '_id_perms'):
- self._serialize_field_to_json(serialized, field_names, 'id_perms')
- if hasattr(self, '_display_name'):
- self._serialize_field_to_json(serialized, field_names, 'display_name')
-
- # serialize reference fields
- return serialized
- #end serialize_to_json
-
- def get_global_system_config_back_refs(self):
- """Return list of all global-system-configs using this analytics-node"""
- return getattr(self, 'global_system_config_back_refs', None)
- #end get_global_system_config_back_refs
-
- def dump(self):
- """Display analytics-node object in compact form."""
- print '------------ analytics-node ------------'
- print 'Name = ', self.get_fq_name()
- print 'Uuid = ', self.uuid
- if hasattr(self, 'parent_type'): # non config-root children
- print 'Parent Type = ', self.parent_type
- print 'P analytics_node_ip_address = ', self.get_analytics_node_ip_address()
- print 'P id_perms = ', self.get_id_perms()
- print 'P display_name = ', self.get_display_name()
- #end dump
-
-#end class AnalyticsNode
-
-class VirtualDns(object):
- """
- Represents virtual-DNS configuration representation.
-
- Child of:
- :class:`.Domain` object OR
-
- Properties:
- * virtual-DNS-data (:class:`.VirtualDnsType` type)
- * id-perms (:class:`.IdPermsType` type)
- * display-name (xsd:string type)
-
- Children:
- * list of :class:`.VirtualDnsRecord` objects
-
- References to:
-
- Referred by:
- * list of :class:`.NetworkIpam` objects
- """
-
- prop_fields = set([u'virtual_DNS_data', u'id_perms', u'display_name'])
- ref_fields = set([])
- backref_fields = set([u'domain_back_refs', u'network_ipam_back_refs'])
- children_fields = set([u'virtual_DNS_records'])
-
- def __init__(self, name = None, parent_obj = None, virtual_DNS_data = None, id_perms = None, display_name = None, *args, **kwargs):
- # type-independent fields
- self._type = 'virtual-DNS'
- if not name:
- name = u'default-virtual-DNS'
- self.name = name
- self._uuid = None
- # Determine parent type and fq_name
- kwargs_parent_type = kwargs.get('parent_type', None)
- kwargs_fq_name = kwargs.get('fq_name', None)
- if parent_obj:
- self.parent_type = parent_obj._type
- # copy parent's fq_name
- self.fq_name = list(parent_obj.fq_name)
- self.fq_name.append(name)
- elif kwargs_parent_type and kwargs_fq_name:
- self.parent_type = kwargs_parent_type
- self.fq_name = kwargs_fq_name
- else: # No parent obj specified
- self.parent_type = 'domain'
- self.fq_name = [u'default-domain']
- self.fq_name.append(name)
-
-
- # property fields
- if virtual_DNS_data:
- self._virtual_DNS_data = virtual_DNS_data
- if id_perms:
- self._id_perms = id_perms
- if display_name:
- self._display_name = display_name
- #end __init__
-
- def get_type(self):
- """Return object type (virtual-DNS)."""
- return self._type
- #end get_type
-
- def get_fq_name(self):
- """Return FQN of virtual-DNS in list form."""
- return self.fq_name
- #end get_fq_name
-
- def get_fq_name_str(self):
- """Return FQN of virtual-DNS as colon delimited string."""
- return ':'.join(self.fq_name)
- #end get_fq_name_str
-
- @property
- def parent_name(self):
- return self.fq_name[:-1][-1]
- #end parent_name
-
- def get_parent_fq_name(self):
- """Return FQN of virtual-DNS's parent in list form."""
- if not hasattr(self, 'parent_type'):
- # child of config-root
- return None
-
- return self.fq_name[:-1]
- #end get_parent_fq_name
-
- def get_parent_fq_name_str(self):
- """Return FQN of virtual-DNS's parent as colon delimted string."""
- if not hasattr(self, 'parent_type'):
- # child of config-root
- return None
-
- return ':'.join(self.fq_name[:-1])
- #end get_parent_fq_name_str
-
- @property
- def uuid(self):
- return getattr(self, '_uuid', None)
- #end uuid
-
- @uuid.setter
- def uuid(self, uuid_val):
- self._uuid = uuid_val
- #end uuid
-
- def set_uuid(self, uuid_val):
- self.uuid = uuid_val
- #end set_uuid
-
- def get_uuid(self):
- return self.uuid
- #end get_uuid
-
- @property
- def virtual_DNS_data(self):
- """Get virtual-DNS-data for virtual-DNS.
-
- :returns: VirtualDnsType object
-
- """
- return getattr(self, '_virtual_DNS_data', None)
- #end virtual_DNS_data
-
- @virtual_DNS_data.setter
- def virtual_DNS_data(self, virtual_DNS_data):
- """Set virtual-DNS-data for virtual-DNS.
-
- :param virtual_DNS_data: VirtualDnsType object
-
- """
- self._virtual_DNS_data = virtual_DNS_data
- #end virtual_DNS_data
-
- def set_virtual_DNS_data(self, value):
- self.virtual_DNS_data = value
- #end set_virtual_DNS_data
-
- def get_virtual_DNS_data(self):
- return self.virtual_DNS_data
- #end get_virtual_DNS_data
-
- @property
- def id_perms(self):
- """Get id-perms for virtual-DNS.
-
- :returns: IdPermsType object
-
- """
- return getattr(self, '_id_perms', None)
- #end id_perms
-
- @id_perms.setter
- def id_perms(self, id_perms):
- """Set id-perms for virtual-DNS.
-
- :param id_perms: IdPermsType object
-
- """
- self._id_perms = id_perms
- #end id_perms
-
- def set_id_perms(self, value):
- self.id_perms = value
- #end set_id_perms
-
- def get_id_perms(self):
- return self.id_perms
- #end get_id_perms
-
- @property
- def display_name(self):
- """Get display-name for virtual-DNS.
-
- :returns: xsd:string object
-
- """
- return getattr(self, '_display_name', None)
- #end display_name
-
- @display_name.setter
- def display_name(self, display_name):
- """Set display-name for virtual-DNS.
-
- :param display_name: xsd:string object
-
- """
- self._display_name = display_name
- #end display_name
-
- def set_display_name(self, value):
- self.display_name = value
- #end set_display_name
-
- def get_display_name(self):
- return self.display_name
- #end get_display_name
-
- def _serialize_field_to_json(self, serialized, fields_to_serialize, field_name):
- if fields_to_serialize is None: # all fields are serialized
- serialized[field_name] = getattr(self, field_name)
- elif field_name in fields_to_serialize:
- serialized[field_name] = getattr(self, field_name)
- #end _serialize_field_to_json
-
- def serialize_to_json(self, field_names = None):
- serialized = {}
-
- # serialize common fields
- self._serialize_field_to_json(serialized, ['uuid'], 'uuid')
- self._serialize_field_to_json(serialized, field_names, 'fq_name')
- if hasattr(self, 'parent_type'):
- self._serialize_field_to_json(serialized, field_names, 'parent_type')
-
- # serialize property fields
- if hasattr(self, '_virtual_DNS_data'):
- self._serialize_field_to_json(serialized, field_names, 'virtual_DNS_data')
- if hasattr(self, '_id_perms'):
- self._serialize_field_to_json(serialized, field_names, 'id_perms')
- if hasattr(self, '_display_name'):
- self._serialize_field_to_json(serialized, field_names, 'display_name')
-
- # serialize reference fields
- return serialized
- #end serialize_to_json
-
- def get_virtual_DNS_records(self):
- return getattr(self, 'virtual_DNS_records', None)
- #end get_virtual_DNS_records
-
- def get_domain_back_refs(self):
- """Return list of all domains using this virtual-DNS"""
- return getattr(self, 'domain_back_refs', None)
- #end get_domain_back_refs
-
- def get_network_ipam_back_refs(self):
- """Return list of all network-ipams using this virtual-DNS"""
- return getattr(self, 'network_ipam_back_refs', None)
- #end get_network_ipam_back_refs
-
- def dump(self):
- """Display virtual-DNS object in compact form."""
- print '------------ virtual-DNS ------------'
- print 'Name = ', self.get_fq_name()
- print 'Uuid = ', self.uuid
- if hasattr(self, 'parent_type'): # non config-root children
- print 'Parent Type = ', self.parent_type
- print 'P virtual_DNS_data = ', self.get_virtual_DNS_data()
- print 'P id_perms = ', self.get_id_perms()
- print 'P display_name = ', self.get_display_name()
- print 'HAS virtual_DNS_record = ', self.get_virtual_DNS_records()
- print 'BCK network_ipam = ', self.get_network_ipam_back_refs()
- #end dump
-
-#end class VirtualDns
-
-class CustomerAttachment(object):
- """
- Represents customer-attachment configuration representation.
-
- Properties:
- * attachment-address (:class:`.AttachmentAddressType` type)
- * id-perms (:class:`.IdPermsType` type)
- * display-name (xsd:string type)
-
- Children:
-
- References to:
- * list of :class:`.VirtualMachineInterface` objects
- * list of :class:`.FloatingIp` objects
-
- Referred by:
- """
-
- prop_fields = set([u'attachment_address', u'id_perms', u'display_name'])
- ref_fields = set(['virtual_machine_interface_refs', u'floating_ip_refs'])
- backref_fields = set([])
- children_fields = set([])
-
- def __init__(self, name = None, attachment_address = None, id_perms = None, display_name = None, *args, **kwargs):
- # type-independent fields
- self._type = 'customer-attachment'
- if not name:
- name = u'default-customer-attachment'
- self.name = name
- self._uuid = None
- self.fq_name = [name]
-
- # property fields
- if attachment_address:
- self._attachment_address = attachment_address
- if id_perms:
- self._id_perms = id_perms
- if display_name:
- self._display_name = display_name
- #end __init__
-
- def get_type(self):
- """Return object type (customer-attachment)."""
- return self._type
- #end get_type
-
- def get_fq_name(self):
- """Return FQN of customer-attachment in list form."""
- return self.fq_name
- #end get_fq_name
-
- def get_fq_name_str(self):
- """Return FQN of customer-attachment as colon delimited string."""
- return ':'.join(self.fq_name)
- #end get_fq_name_str
-
- @property
- def uuid(self):
- return getattr(self, '_uuid', None)
- #end uuid
-
- @uuid.setter
- def uuid(self, uuid_val):
- self._uuid = uuid_val
- #end uuid
-
- def set_uuid(self, uuid_val):
- self.uuid = uuid_val
- #end set_uuid
-
- def get_uuid(self):
- return self.uuid
- #end get_uuid
-
- @property
- def attachment_address(self):
- """Get attachment-address for customer-attachment.
-
- :returns: AttachmentAddressType object
-
- """
- return getattr(self, '_attachment_address', None)
- #end attachment_address
-
- @attachment_address.setter
- def attachment_address(self, attachment_address):
- """Set attachment-address for customer-attachment.
-
- :param attachment_address: AttachmentAddressType object
-
- """
- self._attachment_address = attachment_address
- #end attachment_address
-
- def set_attachment_address(self, value):
- self.attachment_address = value
- #end set_attachment_address
-
- def get_attachment_address(self):
- return self.attachment_address
- #end get_attachment_address
-
- @property
- def id_perms(self):
- """Get id-perms for customer-attachment.
-
- :returns: IdPermsType object
-
- """
- return getattr(self, '_id_perms', None)
- #end id_perms
-
- @id_perms.setter
- def id_perms(self, id_perms):
- """Set id-perms for customer-attachment.
-
- :param id_perms: IdPermsType object
-
- """
- self._id_perms = id_perms
- #end id_perms
-
- def set_id_perms(self, value):
- self.id_perms = value
- #end set_id_perms
-
- def get_id_perms(self):
- return self.id_perms
- #end get_id_perms
-
- @property
- def display_name(self):
- """Get display-name for customer-attachment.
-
- :returns: xsd:string object
-
- """
- return getattr(self, '_display_name', None)
- #end display_name
-
- @display_name.setter
- def display_name(self, display_name):
- """Set display-name for customer-attachment.
-
- :param display_name: xsd:string object
-
- """
- self._display_name = display_name
- #end display_name
-
- def set_display_name(self, value):
- self.display_name = value
- #end set_display_name
-
- def get_display_name(self):
- return self.display_name
- #end get_display_name
-
- def _serialize_field_to_json(self, serialized, fields_to_serialize, field_name):
- if fields_to_serialize is None: # all fields are serialized
- serialized[field_name] = getattr(self, field_name)
- elif field_name in fields_to_serialize:
- serialized[field_name] = getattr(self, field_name)
- #end _serialize_field_to_json
-
- def serialize_to_json(self, field_names = None):
- serialized = {}
-
- # serialize common fields
- self._serialize_field_to_json(serialized, ['uuid'], 'uuid')
- self._serialize_field_to_json(serialized, field_names, 'fq_name')
- if hasattr(self, 'parent_type'):
- self._serialize_field_to_json(serialized, field_names, 'parent_type')
-
- # serialize property fields
- if hasattr(self, '_attachment_address'):
- self._serialize_field_to_json(serialized, field_names, 'attachment_address')
- if hasattr(self, '_id_perms'):
- self._serialize_field_to_json(serialized, field_names, 'id_perms')
- if hasattr(self, '_display_name'):
- self._serialize_field_to_json(serialized, field_names, 'display_name')
-
- # serialize reference fields
- if hasattr(self, 'virtual_machine_interface_refs'):
- self._serialize_field_to_json(serialized, field_names, 'virtual_machine_interface_refs')
- if hasattr(self, 'floating_ip_refs'):
- self._serialize_field_to_json(serialized, field_names, 'floating_ip_refs')
- return serialized
- #end serialize_to_json
-
- def set_virtual_machine_interface(self, ref_obj):
- """Set virtual-machine-interface for customer-attachment.
-
- :param ref_obj: VirtualMachineInterface object
-
- """
- self.virtual_machine_interface_refs = [{'to':ref_obj.get_fq_name()}]
- if ref_obj.uuid:
- self.virtual_machine_interface_refs[0]['uuid'] = ref_obj.uuid
-
- #end set_virtual_machine_interface
-
- def add_virtual_machine_interface(self, ref_obj):
- """Add virtual-machine-interface to customer-attachment.
-
- :param ref_obj: VirtualMachineInterface object
-
- """
- refs = getattr(self, 'virtual_machine_interface_refs', [])
- if not refs:
- self.virtual_machine_interface_refs = []
-
- # if ref already exists, update any attr with it
- for ref in refs:
- if ref['to'] == ref_obj.get_fq_name():
- ref = {'to':ref_obj.get_fq_name()}
- if ref_obj.uuid:
- ref['uuid'] = ref_obj.uuid
- return
-
- # ref didn't exist before
- ref_info = {'to':ref_obj.get_fq_name()}
- if ref_obj.uuid:
- ref_info['uuid'] = ref_obj.uuid
-
- self.virtual_machine_interface_refs.append(ref_info)
- #end add_virtual_machine_interface
-
- def del_virtual_machine_interface(self, ref_obj):
- refs = self.get_virtual_machine_interface_refs()
- if not refs:
- return
-
- for ref in refs:
- if ref['to'] == ref_obj.get_fq_name():
- self.virtual_machine_interface_refs.remove(ref)
- return
- #end del_virtual_machine_interface
-
- def set_virtual_machine_interface_list(self, ref_obj_list):
- """Set virtual-machine-interface list for customer-attachment.
-
- :param ref_obj_list: list of VirtualMachineInterface object
-
- """
- self.virtual_machine_interface_refs = ref_obj_list
- #end set_virtual_machine_interface_list
-
- def get_virtual_machine_interface_refs(self):
- """Return virtual-machine-interface list for customer-attachment.
-
- :returns: list of <VirtualMachineInterface>
-
- """
- return getattr(self, 'virtual_machine_interface_refs', None)
- #end get_virtual_machine_interface_refs
-
- def set_floating_ip(self, ref_obj):
- """Set floating-ip for customer-attachment.
-
- :param ref_obj: FloatingIp object
-
- """
- self.floating_ip_refs = [{'to':ref_obj.get_fq_name()}]
- if ref_obj.uuid:
- self.floating_ip_refs[0]['uuid'] = ref_obj.uuid
-
- #end set_floating_ip
-
- def add_floating_ip(self, ref_obj):
- """Add floating-ip to customer-attachment.
-
- :param ref_obj: FloatingIp object
-
- """
- refs = getattr(self, 'floating_ip_refs', [])
- if not refs:
- self.floating_ip_refs = []
-
- # if ref already exists, update any attr with it
- for ref in refs:
- if ref['to'] == ref_obj.get_fq_name():
- ref = {'to':ref_obj.get_fq_name()}
- if ref_obj.uuid:
- ref['uuid'] = ref_obj.uuid
- return
-
- # ref didn't exist before
- ref_info = {'to':ref_obj.get_fq_name()}
- if ref_obj.uuid:
- ref_info['uuid'] = ref_obj.uuid
-
- self.floating_ip_refs.append(ref_info)
- #end add_floating_ip
-
- def del_floating_ip(self, ref_obj):
- refs = self.get_floating_ip_refs()
- if not refs:
- return
-
- for ref in refs:
- if ref['to'] == ref_obj.get_fq_name():
- self.floating_ip_refs.remove(ref)
- return
- #end del_floating_ip
-
- def set_floating_ip_list(self, ref_obj_list):
- """Set floating-ip list for customer-attachment.
-
- :param ref_obj_list: list of FloatingIp object
-
- """
- self.floating_ip_refs = ref_obj_list
- #end set_floating_ip_list
-
- def get_floating_ip_refs(self):
- """Return floating-ip list for customer-attachment.
-
- :returns: list of <FloatingIp>
-
- """
- return getattr(self, 'floating_ip_refs', None)
- #end get_floating_ip_refs
-
- def dump(self):
- """Display customer-attachment object in compact form."""
- print '------------ customer-attachment ------------'
- print 'Name = ', self.get_fq_name()
- print 'Uuid = ', self.uuid
- print 'P attachment_address = ', self.get_attachment_address()
- print 'P id_perms = ', self.get_id_perms()
- print 'P display_name = ', self.get_display_name()
- print 'REF virtual_machine_interface = ', self.get_virtual_machine_interface_refs()
- print 'REF floating_ip = ', self.get_floating_ip_refs()
- print 'HAS routing_instance = ', self.get_routing_instances()
- print 'HAS provider_attachment = ', self.get_provider_attachments()
- #end dump
-
-#end class CustomerAttachment
-
-class ServiceApplianceSet(object):
- """
- Represents service-appliance-set configuration representation.
-
- Child of:
- :class:`.GlobalSystemConfig` object OR
-
- Properties:
- * service-appliance-set-properties (:class:`.KeyValuePairs` type)
- * service-appliance-driver (xsd:string type)
- * service-appliance-ha-mode (xsd:string type)
- * id-perms (:class:`.IdPermsType` type)
- * display-name (xsd:string type)
-
- Children:
- * list of :class:`.ServiceAppliance` objects
-
- References to:
-
- Referred by:
- * list of :class:`.LoadbalancerPool` objects
- """
-
- prop_fields = set([u'service_appliance_set_properties', u'service_appliance_driver', u'service_appliance_ha_mode', u'id_perms', u'display_name'])
- ref_fields = set([])
- backref_fields = set([u'global_system_config_back_refs', u'loadbalancer_pool_back_refs'])
- children_fields = set([u'service_appliances'])
-
- def __init__(self, name = None, parent_obj = None, service_appliance_set_properties = None, service_appliance_driver = None, service_appliance_ha_mode = None, id_perms = None, display_name = None, *args, **kwargs):
- # type-independent fields
- self._type = 'service-appliance-set'
- if not name:
- name = u'default-service-appliance-set'
- self.name = name
- self._uuid = None
- # Determine parent type and fq_name
- kwargs_parent_type = kwargs.get('parent_type', None)
- kwargs_fq_name = kwargs.get('fq_name', None)
- if parent_obj:
- self.parent_type = parent_obj._type
- # copy parent's fq_name
- self.fq_name = list(parent_obj.fq_name)
- self.fq_name.append(name)
- elif kwargs_parent_type and kwargs_fq_name:
- self.parent_type = kwargs_parent_type
- self.fq_name = kwargs_fq_name
- else: # No parent obj specified
- self.parent_type = 'global-system-config'
- self.fq_name = [u'default-global-system-config']
- self.fq_name.append(name)
-
-
- # property fields
- if service_appliance_set_properties:
- self._service_appliance_set_properties = service_appliance_set_properties
- if service_appliance_driver:
- self._service_appliance_driver = service_appliance_driver
- if service_appliance_ha_mode:
- self._service_appliance_ha_mode = service_appliance_ha_mode
- if id_perms:
- self._id_perms = id_perms
- if display_name:
- self._display_name = display_name
- #end __init__
-
- def get_type(self):
- """Return object type (service-appliance-set)."""
- return self._type
- #end get_type
-
- def get_fq_name(self):
- """Return FQN of service-appliance-set in list form."""
- return self.fq_name
- #end get_fq_name
-
- def get_fq_name_str(self):
- """Return FQN of service-appliance-set as colon delimited string."""
- return ':'.join(self.fq_name)
- #end get_fq_name_str
-
- @property
- def parent_name(self):
- return self.fq_name[:-1][-1]
- #end parent_name
-
- def get_parent_fq_name(self):
- """Return FQN of service-appliance-set's parent in list form."""
- if not hasattr(self, 'parent_type'):
- # child of config-root
- return None
-
- return self.fq_name[:-1]
- #end get_parent_fq_name
-
- def get_parent_fq_name_str(self):
- """Return FQN of service-appliance-set's parent as colon delimted string."""
- if not hasattr(self, 'parent_type'):
- # child of config-root
- return None
-
- return ':'.join(self.fq_name[:-1])
- #end get_parent_fq_name_str
-
- @property
- def uuid(self):
- return getattr(self, '_uuid', None)
- #end uuid
-
- @uuid.setter
- def uuid(self, uuid_val):
- self._uuid = uuid_val
- #end uuid
-
- def set_uuid(self, uuid_val):
- self.uuid = uuid_val
- #end set_uuid
-
- def get_uuid(self):
- return self.uuid
- #end get_uuid
-
- @property
- def service_appliance_set_properties(self):
- """Get service-appliance-set-properties for service-appliance-set.
-
- :returns: KeyValuePairs object
-
- """
- return getattr(self, '_service_appliance_set_properties', None)
- #end service_appliance_set_properties
-
- @service_appliance_set_properties.setter
- def service_appliance_set_properties(self, service_appliance_set_properties):
- """Set service-appliance-set-properties for service-appliance-set.
-
- :param service_appliance_set_properties: KeyValuePairs object
-
- """
- self._service_appliance_set_properties = service_appliance_set_properties
- #end service_appliance_set_properties
-
- def set_service_appliance_set_properties(self, value):
- self.service_appliance_set_properties = value
- #end set_service_appliance_set_properties
-
- def get_service_appliance_set_properties(self):
- return self.service_appliance_set_properties
- #end get_service_appliance_set_properties
-
- @property
- def service_appliance_driver(self):
- """Get service-appliance-driver for service-appliance-set.
-
- :returns: xsd:string object
-
- """
- return getattr(self, '_service_appliance_driver', None)
- #end service_appliance_driver
-
- @service_appliance_driver.setter
- def service_appliance_driver(self, service_appliance_driver):
- """Set service-appliance-driver for service-appliance-set.
-
- :param service_appliance_driver: xsd:string object
-
- """
- self._service_appliance_driver = service_appliance_driver
- #end service_appliance_driver
-
- def set_service_appliance_driver(self, value):
- self.service_appliance_driver = value
- #end set_service_appliance_driver
-
- def get_service_appliance_driver(self):
- return self.service_appliance_driver
- #end get_service_appliance_driver
-
- @property
- def service_appliance_ha_mode(self):
- """Get service-appliance-ha-mode for service-appliance-set.
-
- :returns: xsd:string object
-
- """
- return getattr(self, '_service_appliance_ha_mode', None)
- #end service_appliance_ha_mode
-
- @service_appliance_ha_mode.setter
- def service_appliance_ha_mode(self, service_appliance_ha_mode):
- """Set service-appliance-ha-mode for service-appliance-set.
-
- :param service_appliance_ha_mode: xsd:string object
-
- """
- self._service_appliance_ha_mode = service_appliance_ha_mode
- #end service_appliance_ha_mode
-
- def set_service_appliance_ha_mode(self, value):
- self.service_appliance_ha_mode = value
- #end set_service_appliance_ha_mode
-
- def get_service_appliance_ha_mode(self):
- return self.service_appliance_ha_mode
- #end get_service_appliance_ha_mode
-
- @property
- def id_perms(self):
- """Get id-perms for service-appliance-set.
-
- :returns: IdPermsType object
-
- """
- return getattr(self, '_id_perms', None)
- #end id_perms
-
- @id_perms.setter
- def id_perms(self, id_perms):
- """Set id-perms for service-appliance-set.
-
- :param id_perms: IdPermsType object
-
- """
- self._id_perms = id_perms
- #end id_perms
-
- def set_id_perms(self, value):
- self.id_perms = value
- #end set_id_perms
-
- def get_id_perms(self):
- return self.id_perms
- #end get_id_perms
-
- @property
- def display_name(self):
- """Get display-name for service-appliance-set.
-
- :returns: xsd:string object
-
- """
- return getattr(self, '_display_name', None)
- #end display_name
-
- @display_name.setter
- def display_name(self, display_name):
- """Set display-name for service-appliance-set.
-
- :param display_name: xsd:string object
-
- """
- self._display_name = display_name
- #end display_name
-
- def set_display_name(self, value):
- self.display_name = value
- #end set_display_name
-
- def get_display_name(self):
- return self.display_name
- #end get_display_name
-
- def _serialize_field_to_json(self, serialized, fields_to_serialize, field_name):
- if fields_to_serialize is None: # all fields are serialized
- serialized[field_name] = getattr(self, field_name)
- elif field_name in fields_to_serialize:
- serialized[field_name] = getattr(self, field_name)
- #end _serialize_field_to_json
-
- def serialize_to_json(self, field_names = None):
- serialized = {}
-
- # serialize common fields
- self._serialize_field_to_json(serialized, ['uuid'], 'uuid')
- self._serialize_field_to_json(serialized, field_names, 'fq_name')
- if hasattr(self, 'parent_type'):
- self._serialize_field_to_json(serialized, field_names, 'parent_type')
-
- # serialize property fields
- if hasattr(self, '_service_appliance_set_properties'):
- self._serialize_field_to_json(serialized, field_names, 'service_appliance_set_properties')
- if hasattr(self, '_service_appliance_driver'):
- self._serialize_field_to_json(serialized, field_names, 'service_appliance_driver')
- if hasattr(self, '_service_appliance_ha_mode'):
- self._serialize_field_to_json(serialized, field_names, 'service_appliance_ha_mode')
- if hasattr(self, '_id_perms'):
- self._serialize_field_to_json(serialized, field_names, 'id_perms')
- if hasattr(self, '_display_name'):
- self._serialize_field_to_json(serialized, field_names, 'display_name')
-
- # serialize reference fields
- return serialized
- #end serialize_to_json
-
- def get_service_appliances(self):
- return getattr(self, 'service_appliances', None)
- #end get_service_appliances
-
- def get_global_system_config_back_refs(self):
- """Return list of all global-system-configs using this service-appliance-set"""
- return getattr(self, 'global_system_config_back_refs', None)
- #end get_global_system_config_back_refs
-
- def get_loadbalancer_pool_back_refs(self):
- """Return list of all loadbalancer-pools using this service-appliance-set"""
- return getattr(self, 'loadbalancer_pool_back_refs', None)
- #end get_loadbalancer_pool_back_refs
-
- def dump(self):
- """Display service-appliance-set object in compact form."""
- print '------------ service-appliance-set ------------'
- print 'Name = ', self.get_fq_name()
- print 'Uuid = ', self.uuid
- if hasattr(self, 'parent_type'): # non config-root children
- print 'Parent Type = ', self.parent_type
- print 'P service_appliance_set_properties = ', self.get_service_appliance_set_properties()
- print 'P service_appliance_driver = ', self.get_service_appliance_driver()
- print 'P service_appliance_ha_mode = ', self.get_service_appliance_ha_mode()
- print 'P id_perms = ', self.get_id_perms()
- print 'P display_name = ', self.get_display_name()
- print 'HAS service_appliance = ', self.get_service_appliances()
- print 'BCK loadbalancer_pool = ', self.get_loadbalancer_pool_back_refs()
- #end dump
-
-#end class ServiceApplianceSet
-
-class ConfigNode(object):
- """
- Represents config-node configuration representation.
-
- Child of:
- :class:`.GlobalSystemConfig` object OR
-
- Properties:
- * config-node-ip-address (IpAddressType type)
- * id-perms (:class:`.IdPermsType` type)
- * display-name (xsd:string type)
-
- Children:
-
- References to:
-
- Referred by:
- """
-
- prop_fields = set([u'config_node_ip_address', u'id_perms', u'display_name'])
- ref_fields = set([])
- backref_fields = set([u'global_system_config_back_refs'])
- children_fields = set([])
-
- def __init__(self, name = None, parent_obj = None, config_node_ip_address = None, id_perms = None, display_name = None, *args, **kwargs):
- # type-independent fields
- self._type = 'config-node'
- if not name:
- name = u'default-config-node'
- self.name = name
- self._uuid = None
- # Determine parent type and fq_name
- kwargs_parent_type = kwargs.get('parent_type', None)
- kwargs_fq_name = kwargs.get('fq_name', None)
- if parent_obj:
- self.parent_type = parent_obj._type
- # copy parent's fq_name
- self.fq_name = list(parent_obj.fq_name)
- self.fq_name.append(name)
- elif kwargs_parent_type and kwargs_fq_name:
- self.parent_type = kwargs_parent_type
- self.fq_name = kwargs_fq_name
- else: # No parent obj specified
- self.parent_type = 'global-system-config'
- self.fq_name = [u'default-global-system-config']
- self.fq_name.append(name)
-
-
- # property fields
- if config_node_ip_address:
- self._config_node_ip_address = config_node_ip_address
- if id_perms:
- self._id_perms = id_perms
- if display_name:
- self._display_name = display_name
- #end __init__
-
- def get_type(self):
- """Return object type (config-node)."""
- return self._type
- #end get_type
-
- def get_fq_name(self):
- """Return FQN of config-node in list form."""
- return self.fq_name
- #end get_fq_name
-
- def get_fq_name_str(self):
- """Return FQN of config-node as colon delimited string."""
- return ':'.join(self.fq_name)
- #end get_fq_name_str
-
- @property
- def parent_name(self):
- return self.fq_name[:-1][-1]
- #end parent_name
-
- def get_parent_fq_name(self):
- """Return FQN of config-node's parent in list form."""
- if not hasattr(self, 'parent_type'):
- # child of config-root
- return None
-
- return self.fq_name[:-1]
- #end get_parent_fq_name
-
- def get_parent_fq_name_str(self):
- """Return FQN of config-node's parent as colon delimted string."""
- if not hasattr(self, 'parent_type'):
- # child of config-root
- return None
-
- return ':'.join(self.fq_name[:-1])
- #end get_parent_fq_name_str
-
- @property
- def uuid(self):
- return getattr(self, '_uuid', None)
- #end uuid
-
- @uuid.setter
- def uuid(self, uuid_val):
- self._uuid = uuid_val
- #end uuid
-
- def set_uuid(self, uuid_val):
- self.uuid = uuid_val
- #end set_uuid
-
- def get_uuid(self):
- return self.uuid
- #end get_uuid
-
- @property
- def config_node_ip_address(self):
- """Get config-node-ip-address for config-node.
-
- :returns: IpAddressType object
-
- """
- return getattr(self, '_config_node_ip_address', None)
- #end config_node_ip_address
-
- @config_node_ip_address.setter
- def config_node_ip_address(self, config_node_ip_address):
- """Set config-node-ip-address for config-node.
-
- :param config_node_ip_address: IpAddressType object
-
- """
- self._config_node_ip_address = config_node_ip_address
- #end config_node_ip_address
-
- def set_config_node_ip_address(self, value):
- self.config_node_ip_address = value
- #end set_config_node_ip_address
-
- def get_config_node_ip_address(self):
- return self.config_node_ip_address
- #end get_config_node_ip_address
-
- @property
- def id_perms(self):
- """Get id-perms for config-node.
-
- :returns: IdPermsType object
-
- """
- return getattr(self, '_id_perms', None)
- #end id_perms
-
- @id_perms.setter
- def id_perms(self, id_perms):
- """Set id-perms for config-node.
-
- :param id_perms: IdPermsType object
-
- """
- self._id_perms = id_perms
- #end id_perms
-
- def set_id_perms(self, value):
- self.id_perms = value
- #end set_id_perms
-
- def get_id_perms(self):
- return self.id_perms
- #end get_id_perms
-
- @property
- def display_name(self):
- """Get display-name for config-node.
-
- :returns: xsd:string object
-
- """
- return getattr(self, '_display_name', None)
- #end display_name
-
- @display_name.setter
- def display_name(self, display_name):
- """Set display-name for config-node.
-
- :param display_name: xsd:string object
-
- """
- self._display_name = display_name
- #end display_name
-
- def set_display_name(self, value):
- self.display_name = value
- #end set_display_name
-
- def get_display_name(self):
- return self.display_name
- #end get_display_name
-
- def _serialize_field_to_json(self, serialized, fields_to_serialize, field_name):
- if fields_to_serialize is None: # all fields are serialized
- serialized[field_name] = getattr(self, field_name)
- elif field_name in fields_to_serialize:
- serialized[field_name] = getattr(self, field_name)
- #end _serialize_field_to_json
-
- def serialize_to_json(self, field_names = None):
- serialized = {}
-
- # serialize common fields
- self._serialize_field_to_json(serialized, ['uuid'], 'uuid')
- self._serialize_field_to_json(serialized, field_names, 'fq_name')
- if hasattr(self, 'parent_type'):
- self._serialize_field_to_json(serialized, field_names, 'parent_type')
-
- # serialize property fields
- if hasattr(self, '_config_node_ip_address'):
- self._serialize_field_to_json(serialized, field_names, 'config_node_ip_address')
- if hasattr(self, '_id_perms'):
- self._serialize_field_to_json(serialized, field_names, 'id_perms')
- if hasattr(self, '_display_name'):
- self._serialize_field_to_json(serialized, field_names, 'display_name')
-
- # serialize reference fields
- return serialized
- #end serialize_to_json
-
- def get_global_system_config_back_refs(self):
- """Return list of all global-system-configs using this config-node"""
- return getattr(self, 'global_system_config_back_refs', None)
- #end get_global_system_config_back_refs
-
- def dump(self):
- """Display config-node object in compact form."""
- print '------------ config-node ------------'
- print 'Name = ', self.get_fq_name()
- print 'Uuid = ', self.uuid
- if hasattr(self, 'parent_type'): # non config-root children
- print 'Parent Type = ', self.parent_type
- print 'P config_node_ip_address = ', self.get_config_node_ip_address()
- print 'P id_perms = ', self.get_id_perms()
- print 'P display_name = ', self.get_display_name()
- #end dump
-
-#end class ConfigNode
-
-class QosQueue(object):
- """
- Represents qos-queue configuration representation.
-
- Child of:
- :class:`.Project` object OR
-
- Properties:
- * min-bandwidth (xsd:integer type)
- * max-bandwidth (xsd:integer type)
- * id-perms (:class:`.IdPermsType` type)
- * display-name (xsd:string type)
-
- Children:
-
- References to:
-
- Referred by:
- * list of :class:`.QosForwardingClass` objects
- """
-
- prop_fields = set([u'min_bandwidth', u'max_bandwidth', u'id_perms', u'display_name'])
- ref_fields = set([])
- backref_fields = set([u'project_back_refs', u'qos_forwarding_class_back_refs'])
- children_fields = set([])
-
- def __init__(self, name = None, parent_obj = None, min_bandwidth = None, max_bandwidth = None, id_perms = None, display_name = None, *args, **kwargs):
- # type-independent fields
- self._type = 'qos-queue'
- if not name:
- name = u'default-qos-queue'
- self.name = name
- self._uuid = None
- # Determine parent type and fq_name
- kwargs_parent_type = kwargs.get('parent_type', None)
- kwargs_fq_name = kwargs.get('fq_name', None)
- if parent_obj:
- self.parent_type = parent_obj._type
- # copy parent's fq_name
- self.fq_name = list(parent_obj.fq_name)
- self.fq_name.append(name)
- elif kwargs_parent_type and kwargs_fq_name:
- self.parent_type = kwargs_parent_type
- self.fq_name = kwargs_fq_name
- else: # No parent obj specified
- self.parent_type = 'project'
- self.fq_name = [u'default-domain', u'default-project']
- self.fq_name.append(name)
-
-
- # property fields
- if min_bandwidth:
- self._min_bandwidth = min_bandwidth
- if max_bandwidth:
- self._max_bandwidth = max_bandwidth
- if id_perms:
- self._id_perms = id_perms
- if display_name:
- self._display_name = display_name
- #end __init__
-
- def get_type(self):
- """Return object type (qos-queue)."""
- return self._type
- #end get_type
-
- def get_fq_name(self):
- """Return FQN of qos-queue in list form."""
- return self.fq_name
- #end get_fq_name
-
- def get_fq_name_str(self):
- """Return FQN of qos-queue as colon delimited string."""
- return ':'.join(self.fq_name)
- #end get_fq_name_str
-
- @property
- def parent_name(self):
- return self.fq_name[:-1][-1]
- #end parent_name
-
- def get_parent_fq_name(self):
- """Return FQN of qos-queue's parent in list form."""
- if not hasattr(self, 'parent_type'):
- # child of config-root
- return None
-
- return self.fq_name[:-1]
- #end get_parent_fq_name
-
- def get_parent_fq_name_str(self):
- """Return FQN of qos-queue's parent as colon delimted string."""
- if not hasattr(self, 'parent_type'):
- # child of config-root
- return None
-
- return ':'.join(self.fq_name[:-1])
- #end get_parent_fq_name_str
-
- @property
- def uuid(self):
- return getattr(self, '_uuid', None)
- #end uuid
-
- @uuid.setter
- def uuid(self, uuid_val):
- self._uuid = uuid_val
- #end uuid
-
- def set_uuid(self, uuid_val):
- self.uuid = uuid_val
- #end set_uuid
-
- def get_uuid(self):
- return self.uuid
- #end get_uuid
-
- @property
- def min_bandwidth(self):
- """Get min-bandwidth for qos-queue.
-
- :returns: xsd:integer object
-
- """
- return getattr(self, '_min_bandwidth', None)
- #end min_bandwidth
-
- @min_bandwidth.setter
- def min_bandwidth(self, min_bandwidth):
- """Set min-bandwidth for qos-queue.
-
- :param min_bandwidth: xsd:integer object
-
- """
- self._min_bandwidth = min_bandwidth
- #end min_bandwidth
-
- def set_min_bandwidth(self, value):
- self.min_bandwidth = value
- #end set_min_bandwidth
-
- def get_min_bandwidth(self):
- return self.min_bandwidth
- #end get_min_bandwidth
-
- @property
- def max_bandwidth(self):
- """Get max-bandwidth for qos-queue.
-
- :returns: xsd:integer object
-
- """
- return getattr(self, '_max_bandwidth', None)
- #end max_bandwidth
-
- @max_bandwidth.setter
- def max_bandwidth(self, max_bandwidth):
- """Set max-bandwidth for qos-queue.
-
- :param max_bandwidth: xsd:integer object
-
- """
- self._max_bandwidth = max_bandwidth
- #end max_bandwidth
-
- def set_max_bandwidth(self, value):
- self.max_bandwidth = value
- #end set_max_bandwidth
-
- def get_max_bandwidth(self):
- return self.max_bandwidth
- #end get_max_bandwidth
-
- @property
- def id_perms(self):
- """Get id-perms for qos-queue.
-
- :returns: IdPermsType object
-
- """
- return getattr(self, '_id_perms', None)
- #end id_perms
-
- @id_perms.setter
- def id_perms(self, id_perms):
- """Set id-perms for qos-queue.
-
- :param id_perms: IdPermsType object
-
- """
- self._id_perms = id_perms
- #end id_perms
-
- def set_id_perms(self, value):
- self.id_perms = value
- #end set_id_perms
-
- def get_id_perms(self):
- return self.id_perms
- #end get_id_perms
-
- @property
- def display_name(self):
- """Get display-name for qos-queue.
-
- :returns: xsd:string object
-
- """
- return getattr(self, '_display_name', None)
- #end display_name
-
- @display_name.setter
- def display_name(self, display_name):
- """Set display-name for qos-queue.
-
- :param display_name: xsd:string object
-
- """
- self._display_name = display_name
- #end display_name
-
- def set_display_name(self, value):
- self.display_name = value
- #end set_display_name
-
- def get_display_name(self):
- return self.display_name
- #end get_display_name
-
- def _serialize_field_to_json(self, serialized, fields_to_serialize, field_name):
- if fields_to_serialize is None: # all fields are serialized
- serialized[field_name] = getattr(self, field_name)
- elif field_name in fields_to_serialize:
- serialized[field_name] = getattr(self, field_name)
- #end _serialize_field_to_json
-
- def serialize_to_json(self, field_names = None):
- serialized = {}
-
- # serialize common fields
- self._serialize_field_to_json(serialized, ['uuid'], 'uuid')
- self._serialize_field_to_json(serialized, field_names, 'fq_name')
- if hasattr(self, 'parent_type'):
- self._serialize_field_to_json(serialized, field_names, 'parent_type')
-
- # serialize property fields
- if hasattr(self, '_min_bandwidth'):
- self._serialize_field_to_json(serialized, field_names, 'min_bandwidth')
- if hasattr(self, '_max_bandwidth'):
- self._serialize_field_to_json(serialized, field_names, 'max_bandwidth')
- if hasattr(self, '_id_perms'):
- self._serialize_field_to_json(serialized, field_names, 'id_perms')
- if hasattr(self, '_display_name'):
- self._serialize_field_to_json(serialized, field_names, 'display_name')
-
- # serialize reference fields
- return serialized
- #end serialize_to_json
-
- def get_project_back_refs(self):
- """Return list of all projects using this qos-queue"""
- return getattr(self, 'project_back_refs', None)
- #end get_project_back_refs
-
- def get_qos_forwarding_class_back_refs(self):
- """Return list of all qos-forwarding-classs using this qos-queue"""
- return getattr(self, 'qos_forwarding_class_back_refs', None)
- #end get_qos_forwarding_class_back_refs
-
- def dump(self):
- """Display qos-queue object in compact form."""
- print '------------ qos-queue ------------'
- print 'Name = ', self.get_fq_name()
- print 'Uuid = ', self.uuid
- if hasattr(self, 'parent_type'): # non config-root children
- print 'Parent Type = ', self.parent_type
- print 'P min_bandwidth = ', self.get_min_bandwidth()
- print 'P max_bandwidth = ', self.get_max_bandwidth()
- print 'P id_perms = ', self.get_id_perms()
- print 'P display_name = ', self.get_display_name()
- print 'BCK qos_forwarding_class = ', self.get_qos_forwarding_class_back_refs()
- #end dump
-
-#end class QosQueue
-
-class VirtualMachine(object):
- """
- Represents virtual-machine configuration representation.
-
- Properties:
- * id-perms (:class:`.IdPermsType` type)
- * display-name (xsd:string type)
-
- Children:
- * list of :class:`.VirtualMachineInterface` objects
-
- References to:
- * list of :class:`.ServiceInstance` objects
-
- Referred by:
- * list of :class:`.VirtualMachineInterface` objects
- * list of :class:`.VirtualRouter` objects
- """
-
- prop_fields = set([u'id_perms', u'display_name'])
- ref_fields = set([u'service_instance_refs'])
- backref_fields = set(['virtual_machine_interface_back_refs', 'virtual_router_back_refs'])
- children_fields = set(['virtual_machine_interfaces'])
-
- def __init__(self, name = None, id_perms = None, display_name = None, *args, **kwargs):
- # type-independent fields
- self._type = 'virtual-machine'
- if not name:
- name = u'default-virtual-machine'
- self.name = name
- self._uuid = None
- self.fq_name = [name]
-
- # property fields
- if id_perms:
- self._id_perms = id_perms
- if display_name:
- self._display_name = display_name
- #end __init__
-
- def get_type(self):
- """Return object type (virtual-machine)."""
- return self._type
- #end get_type
-
- def get_fq_name(self):
- """Return FQN of virtual-machine in list form."""
- return self.fq_name
- #end get_fq_name
-
- def get_fq_name_str(self):
- """Return FQN of virtual-machine as colon delimited string."""
- return ':'.join(self.fq_name)
- #end get_fq_name_str
-
- @property
- def uuid(self):
- return getattr(self, '_uuid', None)
- #end uuid
-
- @uuid.setter
- def uuid(self, uuid_val):
- self._uuid = uuid_val
- #end uuid
-
- def set_uuid(self, uuid_val):
- self.uuid = uuid_val
- #end set_uuid
-
- def get_uuid(self):
- return self.uuid
- #end get_uuid
-
- @property
- def id_perms(self):
- """Get id-perms for virtual-machine.
-
- :returns: IdPermsType object
-
- """
- return getattr(self, '_id_perms', None)
- #end id_perms
-
- @id_perms.setter
- def id_perms(self, id_perms):
- """Set id-perms for virtual-machine.
-
- :param id_perms: IdPermsType object
-
- """
- self._id_perms = id_perms
- #end id_perms
-
- def set_id_perms(self, value):
- self.id_perms = value
- #end set_id_perms
-
- def get_id_perms(self):
- return self.id_perms
- #end get_id_perms
-
- @property
- def display_name(self):
- """Get display-name for virtual-machine.
-
- :returns: xsd:string object
-
- """
- return getattr(self, '_display_name', None)
- #end display_name
-
- @display_name.setter
- def display_name(self, display_name):
- """Set display-name for virtual-machine.
-
- :param display_name: xsd:string object
-
- """
- self._display_name = display_name
- #end display_name
-
- def set_display_name(self, value):
- self.display_name = value
- #end set_display_name
-
- def get_display_name(self):
- return self.display_name
- #end get_display_name
-
- def _serialize_field_to_json(self, serialized, fields_to_serialize, field_name):
- if fields_to_serialize is None: # all fields are serialized
- serialized[field_name] = getattr(self, field_name)
- elif field_name in fields_to_serialize:
- serialized[field_name] = getattr(self, field_name)
- #end _serialize_field_to_json
-
- def serialize_to_json(self, field_names = None):
- serialized = {}
-
- # serialize common fields
- self._serialize_field_to_json(serialized, ['uuid'], 'uuid')
- self._serialize_field_to_json(serialized, field_names, 'fq_name')
- if hasattr(self, 'parent_type'):
- self._serialize_field_to_json(serialized, field_names, 'parent_type')
-
- # serialize property fields
- if hasattr(self, '_id_perms'):
- self._serialize_field_to_json(serialized, field_names, 'id_perms')
- if hasattr(self, '_display_name'):
- self._serialize_field_to_json(serialized, field_names, 'display_name')
-
- # serialize reference fields
- if hasattr(self, 'service_instance_refs'):
- self._serialize_field_to_json(serialized, field_names, 'service_instance_refs')
- return serialized
- #end serialize_to_json
-
- def get_virtual_machine_interfaces(self):
- return getattr(self, 'virtual_machine_interfaces', None)
- #end get_virtual_machine_interfaces
-
- def set_service_instance(self, ref_obj):
- """Set service-instance for virtual-machine.
-
- :param ref_obj: ServiceInstance object
-
- """
- self.service_instance_refs = [{'to':ref_obj.get_fq_name()}]
- if ref_obj.uuid:
- self.service_instance_refs[0]['uuid'] = ref_obj.uuid
-
- #end set_service_instance
-
- def add_service_instance(self, ref_obj):
- """Add service-instance to virtual-machine.
-
- :param ref_obj: ServiceInstance object
-
- """
- refs = getattr(self, 'service_instance_refs', [])
- if not refs:
- self.service_instance_refs = []
-
- # if ref already exists, update any attr with it
- for ref in refs:
- if ref['to'] == ref_obj.get_fq_name():
- ref = {'to':ref_obj.get_fq_name()}
- if ref_obj.uuid:
- ref['uuid'] = ref_obj.uuid
- return
-
- # ref didn't exist before
- ref_info = {'to':ref_obj.get_fq_name()}
- if ref_obj.uuid:
- ref_info['uuid'] = ref_obj.uuid
-
- self.service_instance_refs.append(ref_info)
- #end add_service_instance
-
- def del_service_instance(self, ref_obj):
- refs = self.get_service_instance_refs()
- if not refs:
- return
-
- for ref in refs:
- if ref['to'] == ref_obj.get_fq_name():
- self.service_instance_refs.remove(ref)
- return
- #end del_service_instance
-
- def set_service_instance_list(self, ref_obj_list):
- """Set service-instance list for virtual-machine.
-
- :param ref_obj_list: list of ServiceInstance object
-
- """
- self.service_instance_refs = ref_obj_list
- #end set_service_instance_list
-
- def get_service_instance_refs(self):
- """Return service-instance list for virtual-machine.
-
- :returns: list of <ServiceInstance>
-
- """
- return getattr(self, 'service_instance_refs', None)
- #end get_service_instance_refs
-
- def get_virtual_machine_interface_back_refs(self):
- """Return list of all virtual-machine-interfaces using this virtual-machine"""
- return getattr(self, 'virtual_machine_interface_back_refs', None)
- #end get_virtual_machine_interface_back_refs
-
- def get_virtual_router_back_refs(self):
- """Return list of all virtual-routers using this virtual-machine"""
- return getattr(self, 'virtual_router_back_refs', None)
- #end get_virtual_router_back_refs
-
- def dump(self):
- """Display virtual-machine object in compact form."""
- print '------------ virtual-machine ------------'
- print 'Name = ', self.get_fq_name()
- print 'Uuid = ', self.uuid
- print 'P id_perms = ', self.get_id_perms()
- print 'P display_name = ', self.get_display_name()
- print 'HAS virtual_machine_interface = ', self.get_virtual_machine_interfaces()
- print 'REF service_instance = ', self.get_service_instance_refs()
- print 'BCK virtual_machine_interface = ', self.get_virtual_machine_interface_back_refs()
- print 'BCK virtual_router = ', self.get_virtual_router_back_refs()
- #end dump
-
-#end class VirtualMachine
-
-class InterfaceRouteTable(object):
- """
- Represents interface-route-table configuration representation.
-
- Child of:
- :class:`.Project` object OR
-
- Properties:
- * interface-route-table-routes (:class:`.RouteTableType` type)
- * id-perms (:class:`.IdPermsType` type)
- * display-name (xsd:string type)
-
- Children:
-
- References to:
-
- Referred by:
- * list of :class:`.VirtualMachineInterface` objects
- """
-
- prop_fields = set([u'interface_route_table_routes', u'id_perms', u'display_name'])
- ref_fields = set([])
- backref_fields = set([u'project_back_refs', 'virtual_machine_interface_back_refs'])
- children_fields = set([])
-
- def __init__(self, name = None, parent_obj = None, interface_route_table_routes = None, id_perms = None, display_name = None, *args, **kwargs):
- # type-independent fields
- self._type = 'interface-route-table'
- if not name:
- name = u'default-interface-route-table'
- self.name = name
- self._uuid = None
- # Determine parent type and fq_name
- kwargs_parent_type = kwargs.get('parent_type', None)
- kwargs_fq_name = kwargs.get('fq_name', None)
- if parent_obj:
- self.parent_type = parent_obj._type
- # copy parent's fq_name
- self.fq_name = list(parent_obj.fq_name)
- self.fq_name.append(name)
- elif kwargs_parent_type and kwargs_fq_name:
- self.parent_type = kwargs_parent_type
- self.fq_name = kwargs_fq_name
- else: # No parent obj specified
- self.parent_type = 'project'
- self.fq_name = [u'default-domain', u'default-project']
- self.fq_name.append(name)
-
-
- # property fields
- if interface_route_table_routes:
- self._interface_route_table_routes = interface_route_table_routes
- if id_perms:
- self._id_perms = id_perms
- if display_name:
- self._display_name = display_name
- #end __init__
-
- def get_type(self):
- """Return object type (interface-route-table)."""
- return self._type
- #end get_type
-
- def get_fq_name(self):
- """Return FQN of interface-route-table in list form."""
- return self.fq_name
- #end get_fq_name
-
- def get_fq_name_str(self):
- """Return FQN of interface-route-table as colon delimited string."""
- return ':'.join(self.fq_name)
- #end get_fq_name_str
-
- @property
- def parent_name(self):
- return self.fq_name[:-1][-1]
- #end parent_name
-
- def get_parent_fq_name(self):
- """Return FQN of interface-route-table's parent in list form."""
- if not hasattr(self, 'parent_type'):
- # child of config-root
- return None
-
- return self.fq_name[:-1]
- #end get_parent_fq_name
-
- def get_parent_fq_name_str(self):
- """Return FQN of interface-route-table's parent as colon delimted string."""
- if not hasattr(self, 'parent_type'):
- # child of config-root
- return None
-
- return ':'.join(self.fq_name[:-1])
- #end get_parent_fq_name_str
-
- @property
- def uuid(self):
- return getattr(self, '_uuid', None)
- #end uuid
-
- @uuid.setter
- def uuid(self, uuid_val):
- self._uuid = uuid_val
- #end uuid
-
- def set_uuid(self, uuid_val):
- self.uuid = uuid_val
- #end set_uuid
-
- def get_uuid(self):
- return self.uuid
- #end get_uuid
-
- @property
- def interface_route_table_routes(self):
- """Get interface-route-table-routes for interface-route-table.
-
- :returns: RouteTableType object
-
- """
- return getattr(self, '_interface_route_table_routes', None)
- #end interface_route_table_routes
-
- @interface_route_table_routes.setter
- def interface_route_table_routes(self, interface_route_table_routes):
- """Set interface-route-table-routes for interface-route-table.
-
- :param interface_route_table_routes: RouteTableType object
-
- """
- self._interface_route_table_routes = interface_route_table_routes
- #end interface_route_table_routes
-
- def set_interface_route_table_routes(self, value):
- self.interface_route_table_routes = value
- #end set_interface_route_table_routes
-
- def get_interface_route_table_routes(self):
- return self.interface_route_table_routes
- #end get_interface_route_table_routes
-
- @property
- def id_perms(self):
- """Get id-perms for interface-route-table.
-
- :returns: IdPermsType object
-
- """
- return getattr(self, '_id_perms', None)
- #end id_perms
-
- @id_perms.setter
- def id_perms(self, id_perms):
- """Set id-perms for interface-route-table.
-
- :param id_perms: IdPermsType object
-
- """
- self._id_perms = id_perms
- #end id_perms
-
- def set_id_perms(self, value):
- self.id_perms = value
- #end set_id_perms
-
- def get_id_perms(self):
- return self.id_perms
- #end get_id_perms
-
- @property
- def display_name(self):
- """Get display-name for interface-route-table.
-
- :returns: xsd:string object
-
- """
- return getattr(self, '_display_name', None)
- #end display_name
-
- @display_name.setter
- def display_name(self, display_name):
- """Set display-name for interface-route-table.
-
- :param display_name: xsd:string object
-
- """
- self._display_name = display_name
- #end display_name
-
- def set_display_name(self, value):
- self.display_name = value
- #end set_display_name
-
- def get_display_name(self):
- return self.display_name
- #end get_display_name
-
- def _serialize_field_to_json(self, serialized, fields_to_serialize, field_name):
- if fields_to_serialize is None: # all fields are serialized
- serialized[field_name] = getattr(self, field_name)
- elif field_name in fields_to_serialize:
- serialized[field_name] = getattr(self, field_name)
- #end _serialize_field_to_json
-
- def serialize_to_json(self, field_names = None):
- serialized = {}
-
- # serialize common fields
- self._serialize_field_to_json(serialized, ['uuid'], 'uuid')
- self._serialize_field_to_json(serialized, field_names, 'fq_name')
- if hasattr(self, 'parent_type'):
- self._serialize_field_to_json(serialized, field_names, 'parent_type')
-
- # serialize property fields
- if hasattr(self, '_interface_route_table_routes'):
- self._serialize_field_to_json(serialized, field_names, 'interface_route_table_routes')
- if hasattr(self, '_id_perms'):
- self._serialize_field_to_json(serialized, field_names, 'id_perms')
- if hasattr(self, '_display_name'):
- self._serialize_field_to_json(serialized, field_names, 'display_name')
-
- # serialize reference fields
- return serialized
- #end serialize_to_json
-
- def get_project_back_refs(self):
- """Return list of all projects using this interface-route-table"""
- return getattr(self, 'project_back_refs', None)
- #end get_project_back_refs
-
- def get_virtual_machine_interface_back_refs(self):
- """Return list of all virtual-machine-interfaces using this interface-route-table"""
- return getattr(self, 'virtual_machine_interface_back_refs', None)
- #end get_virtual_machine_interface_back_refs
-
- def dump(self):
- """Display interface-route-table object in compact form."""
- print '------------ interface-route-table ------------'
- print 'Name = ', self.get_fq_name()
- print 'Uuid = ', self.uuid
- if hasattr(self, 'parent_type'): # non config-root children
- print 'Parent Type = ', self.parent_type
- print 'P interface_route_table_routes = ', self.get_interface_route_table_routes()
- print 'P id_perms = ', self.get_id_perms()
- print 'P display_name = ', self.get_display_name()
- print 'BCK virtual_machine_interface = ', self.get_virtual_machine_interface_back_refs()
- #end dump
-
-#end class InterfaceRouteTable
-
-class ServiceTemplate(object):
- """
- Represents service-template configuration representation.
-
- Child of:
- :class:`.Domain` object OR
-
- Properties:
- * service-template-properties (:class:`.ServiceTemplateType` type)
- * id-perms (:class:`.IdPermsType` type)
- * display-name (xsd:string type)
-
- Children:
-
- References to:
-
- Referred by:
- * list of :class:`.ServiceInstance` objects
- """
-
- prop_fields = set([u'service_template_properties', u'id_perms', u'display_name'])
- ref_fields = set([])
- backref_fields = set([u'domain_back_refs', u'service_instance_back_refs'])
- children_fields = set([])
-
- def __init__(self, name = None, parent_obj = None, service_template_properties = None, id_perms = None, display_name = None, *args, **kwargs):
- # type-independent fields
- self._type = 'service-template'
- if not name:
- name = u'default-service-template'
- self.name = name
- self._uuid = None
- # Determine parent type and fq_name
- kwargs_parent_type = kwargs.get('parent_type', None)
- kwargs_fq_name = kwargs.get('fq_name', None)
- if parent_obj:
- self.parent_type = parent_obj._type
- # copy parent's fq_name
- self.fq_name = list(parent_obj.fq_name)
- self.fq_name.append(name)
- elif kwargs_parent_type and kwargs_fq_name:
- self.parent_type = kwargs_parent_type
- self.fq_name = kwargs_fq_name
- else: # No parent obj specified
- self.parent_type = 'domain'
- self.fq_name = [u'default-domain']
- self.fq_name.append(name)
-
-
- # property fields
- if service_template_properties:
- self._service_template_properties = service_template_properties
- if id_perms:
- self._id_perms = id_perms
- if display_name:
- self._display_name = display_name
- #end __init__
-
- def get_type(self):
- """Return object type (service-template)."""
- return self._type
- #end get_type
-
- def get_fq_name(self):
- """Return FQN of service-template in list form."""
- return self.fq_name
- #end get_fq_name
-
- def get_fq_name_str(self):
- """Return FQN of service-template as colon delimited string."""
- return ':'.join(self.fq_name)
- #end get_fq_name_str
-
- @property
- def parent_name(self):
- return self.fq_name[:-1][-1]
- #end parent_name
-
- def get_parent_fq_name(self):
- """Return FQN of service-template's parent in list form."""
- if not hasattr(self, 'parent_type'):
- # child of config-root
- return None
-
- return self.fq_name[:-1]
- #end get_parent_fq_name
-
- def get_parent_fq_name_str(self):
- """Return FQN of service-template's parent as colon delimted string."""
- if not hasattr(self, 'parent_type'):
- # child of config-root
- return None
-
- return ':'.join(self.fq_name[:-1])
- #end get_parent_fq_name_str
-
- @property
- def uuid(self):
- return getattr(self, '_uuid', None)
- #end uuid
-
- @uuid.setter
- def uuid(self, uuid_val):
- self._uuid = uuid_val
- #end uuid
-
- def set_uuid(self, uuid_val):
- self.uuid = uuid_val
- #end set_uuid
-
- def get_uuid(self):
- return self.uuid
- #end get_uuid
-
- @property
- def service_template_properties(self):
- """Get service-template-properties for service-template.
-
- :returns: ServiceTemplateType object
-
- """
- return getattr(self, '_service_template_properties', None)
- #end service_template_properties
-
- @service_template_properties.setter
- def service_template_properties(self, service_template_properties):
- """Set service-template-properties for service-template.
-
- :param service_template_properties: ServiceTemplateType object
-
- """
- self._service_template_properties = service_template_properties
- #end service_template_properties
-
- def set_service_template_properties(self, value):
- self.service_template_properties = value
- #end set_service_template_properties
-
- def get_service_template_properties(self):
- return self.service_template_properties
- #end get_service_template_properties
-
- @property
- def id_perms(self):
- """Get id-perms for service-template.
-
- :returns: IdPermsType object
-
- """
- return getattr(self, '_id_perms', None)
- #end id_perms
-
- @id_perms.setter
- def id_perms(self, id_perms):
- """Set id-perms for service-template.
-
- :param id_perms: IdPermsType object
-
- """
- self._id_perms = id_perms
- #end id_perms
-
- def set_id_perms(self, value):
- self.id_perms = value
- #end set_id_perms
-
- def get_id_perms(self):
- return self.id_perms
- #end get_id_perms
-
- @property
- def display_name(self):
- """Get display-name for service-template.
-
- :returns: xsd:string object
-
- """
- return getattr(self, '_display_name', None)
- #end display_name
-
- @display_name.setter
- def display_name(self, display_name):
- """Set display-name for service-template.
-
- :param display_name: xsd:string object
-
- """
- self._display_name = display_name
- #end display_name
-
- def set_display_name(self, value):
- self.display_name = value
- #end set_display_name
-
- def get_display_name(self):
- return self.display_name
- #end get_display_name
-
- def _serialize_field_to_json(self, serialized, fields_to_serialize, field_name):
- if fields_to_serialize is None: # all fields are serialized
- serialized[field_name] = getattr(self, field_name)
- elif field_name in fields_to_serialize:
- serialized[field_name] = getattr(self, field_name)
- #end _serialize_field_to_json
-
- def serialize_to_json(self, field_names = None):
- serialized = {}
-
- # serialize common fields
- self._serialize_field_to_json(serialized, ['uuid'], 'uuid')
- self._serialize_field_to_json(serialized, field_names, 'fq_name')
- if hasattr(self, 'parent_type'):
- self._serialize_field_to_json(serialized, field_names, 'parent_type')
-
- # serialize property fields
- if hasattr(self, '_service_template_properties'):
- self._serialize_field_to_json(serialized, field_names, 'service_template_properties')
- if hasattr(self, '_id_perms'):
- self._serialize_field_to_json(serialized, field_names, 'id_perms')
- if hasattr(self, '_display_name'):
- self._serialize_field_to_json(serialized, field_names, 'display_name')
-
- # serialize reference fields
- return serialized
- #end serialize_to_json
-
- def get_domain_back_refs(self):
- """Return list of all domains using this service-template"""
- return getattr(self, 'domain_back_refs', None)
- #end get_domain_back_refs
-
- def get_service_instance_back_refs(self):
- """Return list of all service-instances using this service-template"""
- return getattr(self, 'service_instance_back_refs', None)
- #end get_service_instance_back_refs
-
- def dump(self):
- """Display service-template object in compact form."""
- print '------------ service-template ------------'
- print 'Name = ', self.get_fq_name()
- print 'Uuid = ', self.uuid
- if hasattr(self, 'parent_type'): # non config-root children
- print 'Parent Type = ', self.parent_type
- print 'P service_template_properties = ', self.get_service_template_properties()
- print 'P id_perms = ', self.get_id_perms()
- print 'P display_name = ', self.get_display_name()
- print 'BCK service_instance = ', self.get_service_instance_back_refs()
- #end dump
-
-#end class ServiceTemplate
-
-class VirtualIp(object):
- """
- Represents virtual-ip configuration representation.
-
- Child of:
- :class:`.Project` object OR
-
- Properties:
- * virtual-ip-properties (:class:`.VirtualIpType` type)
- * id-perms (:class:`.IdPermsType` type)
- * display-name (xsd:string type)
-
- Children:
-
- References to:
- * list of :class:`.LoadbalancerPool` objects
- * list of :class:`.VirtualMachineInterface` objects
-
- Referred by:
- """
-
- prop_fields = set([u'virtual_ip_properties', u'id_perms', u'display_name'])
- ref_fields = set([u'loadbalancer_pool_refs', 'virtual_machine_interface_refs'])
- backref_fields = set([u'project_back_refs'])
- children_fields = set([])
-
- def __init__(self, name = None, parent_obj = None, virtual_ip_properties = None, id_perms = None, display_name = None, *args, **kwargs):
- # type-independent fields
- self._type = 'virtual-ip'
- if not name:
- name = u'default-virtual-ip'
- self.name = name
- self._uuid = None
- # Determine parent type and fq_name
- kwargs_parent_type = kwargs.get('parent_type', None)
- kwargs_fq_name = kwargs.get('fq_name', None)
- if parent_obj:
- self.parent_type = parent_obj._type
- # copy parent's fq_name
- self.fq_name = list(parent_obj.fq_name)
- self.fq_name.append(name)
- elif kwargs_parent_type and kwargs_fq_name:
- self.parent_type = kwargs_parent_type
- self.fq_name = kwargs_fq_name
- else: # No parent obj specified
- self.parent_type = 'project'
- self.fq_name = [u'default-domain', u'default-project']
- self.fq_name.append(name)
-
-
- # property fields
- if virtual_ip_properties:
- self._virtual_ip_properties = virtual_ip_properties
- if id_perms:
- self._id_perms = id_perms
- if display_name:
- self._display_name = display_name
- #end __init__
-
- def get_type(self):
- """Return object type (virtual-ip)."""
- return self._type
- #end get_type
-
- def get_fq_name(self):
- """Return FQN of virtual-ip in list form."""
- return self.fq_name
- #end get_fq_name
-
- def get_fq_name_str(self):
- """Return FQN of virtual-ip as colon delimited string."""
- return ':'.join(self.fq_name)
- #end get_fq_name_str
-
- @property
- def parent_name(self):
- return self.fq_name[:-1][-1]
- #end parent_name
-
- def get_parent_fq_name(self):
- """Return FQN of virtual-ip's parent in list form."""
- if not hasattr(self, 'parent_type'):
- # child of config-root
- return None
-
- return self.fq_name[:-1]
- #end get_parent_fq_name
-
- def get_parent_fq_name_str(self):
- """Return FQN of virtual-ip's parent as colon delimted string."""
- if not hasattr(self, 'parent_type'):
- # child of config-root
- return None
-
- return ':'.join(self.fq_name[:-1])
- #end get_parent_fq_name_str
-
- @property
- def uuid(self):
- return getattr(self, '_uuid', None)
- #end uuid
-
- @uuid.setter
- def uuid(self, uuid_val):
- self._uuid = uuid_val
- #end uuid
-
- def set_uuid(self, uuid_val):
- self.uuid = uuid_val
- #end set_uuid
-
- def get_uuid(self):
- return self.uuid
- #end get_uuid
-
- @property
- def virtual_ip_properties(self):
- """Get virtual-ip-properties for virtual-ip.
-
- :returns: VirtualIpType object
-
- """
- return getattr(self, '_virtual_ip_properties', None)
- #end virtual_ip_properties
-
- @virtual_ip_properties.setter
- def virtual_ip_properties(self, virtual_ip_properties):
- """Set virtual-ip-properties for virtual-ip.
-
- :param virtual_ip_properties: VirtualIpType object
-
- """
- self._virtual_ip_properties = virtual_ip_properties
- #end virtual_ip_properties
-
- def set_virtual_ip_properties(self, value):
- self.virtual_ip_properties = value
- #end set_virtual_ip_properties
-
- def get_virtual_ip_properties(self):
- return self.virtual_ip_properties
- #end get_virtual_ip_properties
-
- @property
- def id_perms(self):
- """Get id-perms for virtual-ip.
-
- :returns: IdPermsType object
-
- """
- return getattr(self, '_id_perms', None)
- #end id_perms
-
- @id_perms.setter
- def id_perms(self, id_perms):
- """Set id-perms for virtual-ip.
-
- :param id_perms: IdPermsType object
-
- """
- self._id_perms = id_perms
- #end id_perms
-
- def set_id_perms(self, value):
- self.id_perms = value
- #end set_id_perms
-
- def get_id_perms(self):
- return self.id_perms
- #end get_id_perms
-
- @property
- def display_name(self):
- """Get display-name for virtual-ip.
-
- :returns: xsd:string object
-
- """
- return getattr(self, '_display_name', None)
- #end display_name
-
- @display_name.setter
- def display_name(self, display_name):
- """Set display-name for virtual-ip.
-
- :param display_name: xsd:string object
-
- """
- self._display_name = display_name
- #end display_name
-
- def set_display_name(self, value):
- self.display_name = value
- #end set_display_name
-
- def get_display_name(self):
- return self.display_name
- #end get_display_name
-
- def _serialize_field_to_json(self, serialized, fields_to_serialize, field_name):
- if fields_to_serialize is None: # all fields are serialized
- serialized[field_name] = getattr(self, field_name)
- elif field_name in fields_to_serialize:
- serialized[field_name] = getattr(self, field_name)
- #end _serialize_field_to_json
-
- def serialize_to_json(self, field_names = None):
- serialized = {}
-
- # serialize common fields
- self._serialize_field_to_json(serialized, ['uuid'], 'uuid')
- self._serialize_field_to_json(serialized, field_names, 'fq_name')
- if hasattr(self, 'parent_type'):
- self._serialize_field_to_json(serialized, field_names, 'parent_type')
-
- # serialize property fields
- if hasattr(self, '_virtual_ip_properties'):
- self._serialize_field_to_json(serialized, field_names, 'virtual_ip_properties')
- if hasattr(self, '_id_perms'):
- self._serialize_field_to_json(serialized, field_names, 'id_perms')
- if hasattr(self, '_display_name'):
- self._serialize_field_to_json(serialized, field_names, 'display_name')
-
- # serialize reference fields
- if hasattr(self, 'loadbalancer_pool_refs'):
- self._serialize_field_to_json(serialized, field_names, 'loadbalancer_pool_refs')
- if hasattr(self, 'virtual_machine_interface_refs'):
- self._serialize_field_to_json(serialized, field_names, 'virtual_machine_interface_refs')
- return serialized
- #end serialize_to_json
-
- def set_loadbalancer_pool(self, ref_obj):
- """Set loadbalancer-pool for virtual-ip.
-
- :param ref_obj: LoadbalancerPool object
-
- """
- self.loadbalancer_pool_refs = [{'to':ref_obj.get_fq_name()}]
- if ref_obj.uuid:
- self.loadbalancer_pool_refs[0]['uuid'] = ref_obj.uuid
-
- #end set_loadbalancer_pool
-
- def add_loadbalancer_pool(self, ref_obj):
- """Add loadbalancer-pool to virtual-ip.
-
- :param ref_obj: LoadbalancerPool object
-
- """
- refs = getattr(self, 'loadbalancer_pool_refs', [])
- if not refs:
- self.loadbalancer_pool_refs = []
-
- # if ref already exists, update any attr with it
- for ref in refs:
- if ref['to'] == ref_obj.get_fq_name():
- ref = {'to':ref_obj.get_fq_name()}
- if ref_obj.uuid:
- ref['uuid'] = ref_obj.uuid
- return
-
- # ref didn't exist before
- ref_info = {'to':ref_obj.get_fq_name()}
- if ref_obj.uuid:
- ref_info['uuid'] = ref_obj.uuid
-
- self.loadbalancer_pool_refs.append(ref_info)
- #end add_loadbalancer_pool
-
- def del_loadbalancer_pool(self, ref_obj):
- refs = self.get_loadbalancer_pool_refs()
- if not refs:
- return
-
- for ref in refs:
- if ref['to'] == ref_obj.get_fq_name():
- self.loadbalancer_pool_refs.remove(ref)
- return
- #end del_loadbalancer_pool
-
- def set_loadbalancer_pool_list(self, ref_obj_list):
- """Set loadbalancer-pool list for virtual-ip.
-
- :param ref_obj_list: list of LoadbalancerPool object
-
- """
- self.loadbalancer_pool_refs = ref_obj_list
- #end set_loadbalancer_pool_list
-
- def get_loadbalancer_pool_refs(self):
- """Return loadbalancer-pool list for virtual-ip.
-
- :returns: list of <LoadbalancerPool>
-
- """
- return getattr(self, 'loadbalancer_pool_refs', None)
- #end get_loadbalancer_pool_refs
-
- def set_virtual_machine_interface(self, ref_obj):
- """Set virtual-machine-interface for virtual-ip.
-
- :param ref_obj: VirtualMachineInterface object
-
- """
- self.virtual_machine_interface_refs = [{'to':ref_obj.get_fq_name()}]
- if ref_obj.uuid:
- self.virtual_machine_interface_refs[0]['uuid'] = ref_obj.uuid
-
- #end set_virtual_machine_interface
-
- def add_virtual_machine_interface(self, ref_obj):
- """Add virtual-machine-interface to virtual-ip.
-
- :param ref_obj: VirtualMachineInterface object
-
- """
- refs = getattr(self, 'virtual_machine_interface_refs', [])
- if not refs:
- self.virtual_machine_interface_refs = []
-
- # if ref already exists, update any attr with it
- for ref in refs:
- if ref['to'] == ref_obj.get_fq_name():
- ref = {'to':ref_obj.get_fq_name()}
- if ref_obj.uuid:
- ref['uuid'] = ref_obj.uuid
- return
-
- # ref didn't exist before
- ref_info = {'to':ref_obj.get_fq_name()}
- if ref_obj.uuid:
- ref_info['uuid'] = ref_obj.uuid
-
- self.virtual_machine_interface_refs.append(ref_info)
- #end add_virtual_machine_interface
-
- def del_virtual_machine_interface(self, ref_obj):
- refs = self.get_virtual_machine_interface_refs()
- if not refs:
- return
-
- for ref in refs:
- if ref['to'] == ref_obj.get_fq_name():
- self.virtual_machine_interface_refs.remove(ref)
- return
- #end del_virtual_machine_interface
-
- def set_virtual_machine_interface_list(self, ref_obj_list):
- """Set virtual-machine-interface list for virtual-ip.
-
- :param ref_obj_list: list of VirtualMachineInterface object
-
- """
- self.virtual_machine_interface_refs = ref_obj_list
- #end set_virtual_machine_interface_list
-
- def get_virtual_machine_interface_refs(self):
- """Return virtual-machine-interface list for virtual-ip.
-
- :returns: list of <VirtualMachineInterface>
-
- """
- return getattr(self, 'virtual_machine_interface_refs', None)
- #end get_virtual_machine_interface_refs
-
- def get_project_back_refs(self):
- """Return list of all projects using this virtual-ip"""
- return getattr(self, 'project_back_refs', None)
- #end get_project_back_refs
-
- def dump(self):
- """Display virtual-ip object in compact form."""
- print '------------ virtual-ip ------------'
- print 'Name = ', self.get_fq_name()
- print 'Uuid = ', self.uuid
- if hasattr(self, 'parent_type'): # non config-root children
- print 'Parent Type = ', self.parent_type
- print 'P virtual_ip_properties = ', self.get_virtual_ip_properties()
- print 'P id_perms = ', self.get_id_perms()
- print 'P display_name = ', self.get_display_name()
- print 'REF loadbalancer_pool = ', self.get_loadbalancer_pool_refs()
- print 'REF virtual_machine_interface = ', self.get_virtual_machine_interface_refs()
- #end dump
-
-#end class VirtualIp
-
-class LoadbalancerMember(object):
- """
- Represents loadbalancer-member configuration representation.
-
- Child of:
- :class:`.LoadbalancerPool` object OR
-
- Properties:
- * loadbalancer-member-properties (:class:`.LoadbalancerMemberType` type)
- * id-perms (:class:`.IdPermsType` type)
- * display-name (xsd:string type)
-
- Children:
-
- References to:
-
- Referred by:
- """
-
- prop_fields = set([u'loadbalancer_member_properties', u'id_perms', u'display_name'])
- ref_fields = set([])
- backref_fields = set([u'loadbalancer_pool_back_refs'])
- children_fields = set([])
-
- def __init__(self, name = None, parent_obj = None, loadbalancer_member_properties = None, id_perms = None, display_name = None, *args, **kwargs):
- # type-independent fields
- self._type = 'loadbalancer-member'
- if not name:
- name = u'default-loadbalancer-member'
- self.name = name
- self._uuid = None
- # Determine parent type and fq_name
- kwargs_parent_type = kwargs.get('parent_type', None)
- kwargs_fq_name = kwargs.get('fq_name', None)
- if parent_obj:
- self.parent_type = parent_obj._type
- # copy parent's fq_name
- self.fq_name = list(parent_obj.fq_name)
- self.fq_name.append(name)
- elif kwargs_parent_type and kwargs_fq_name:
- self.parent_type = kwargs_parent_type
- self.fq_name = kwargs_fq_name
- else: # No parent obj specified
- self.parent_type = 'loadbalancer-pool'
- self.fq_name = [u'default-domain', u'default-project', u'default-loadbalancer-pool']
- self.fq_name.append(name)
-
-
- # property fields
- if loadbalancer_member_properties:
- self._loadbalancer_member_properties = loadbalancer_member_properties
- if id_perms:
- self._id_perms = id_perms
- if display_name:
- self._display_name = display_name
- #end __init__
-
- def get_type(self):
- """Return object type (loadbalancer-member)."""
- return self._type
- #end get_type
-
- def get_fq_name(self):
- """Return FQN of loadbalancer-member in list form."""
- return self.fq_name
- #end get_fq_name
-
- def get_fq_name_str(self):
- """Return FQN of loadbalancer-member as colon delimited string."""
- return ':'.join(self.fq_name)
- #end get_fq_name_str
-
- @property
- def parent_name(self):
- return self.fq_name[:-1][-1]
- #end parent_name
-
- def get_parent_fq_name(self):
- """Return FQN of loadbalancer-member's parent in list form."""
- if not hasattr(self, 'parent_type'):
- # child of config-root
- return None
-
- return self.fq_name[:-1]
- #end get_parent_fq_name
-
- def get_parent_fq_name_str(self):
- """Return FQN of loadbalancer-member's parent as colon delimted string."""
- if not hasattr(self, 'parent_type'):
- # child of config-root
- return None
-
- return ':'.join(self.fq_name[:-1])
- #end get_parent_fq_name_str
-
- @property
- def uuid(self):
- return getattr(self, '_uuid', None)
- #end uuid
-
- @uuid.setter
- def uuid(self, uuid_val):
- self._uuid = uuid_val
- #end uuid
-
- def set_uuid(self, uuid_val):
- self.uuid = uuid_val
- #end set_uuid
-
- def get_uuid(self):
- return self.uuid
- #end get_uuid
-
- @property
- def loadbalancer_member_properties(self):
- """Get loadbalancer-member-properties for loadbalancer-member.
-
- :returns: LoadbalancerMemberType object
-
- """
- return getattr(self, '_loadbalancer_member_properties', None)
- #end loadbalancer_member_properties
-
- @loadbalancer_member_properties.setter
- def loadbalancer_member_properties(self, loadbalancer_member_properties):
- """Set loadbalancer-member-properties for loadbalancer-member.
-
- :param loadbalancer_member_properties: LoadbalancerMemberType object
-
- """
- self._loadbalancer_member_properties = loadbalancer_member_properties
- #end loadbalancer_member_properties
-
- def set_loadbalancer_member_properties(self, value):
- self.loadbalancer_member_properties = value
- #end set_loadbalancer_member_properties
-
- def get_loadbalancer_member_properties(self):
- return self.loadbalancer_member_properties
- #end get_loadbalancer_member_properties
-
- @property
- def id_perms(self):
- """Get id-perms for loadbalancer-member.
-
- :returns: IdPermsType object
-
- """
- return getattr(self, '_id_perms', None)
- #end id_perms
-
- @id_perms.setter
- def id_perms(self, id_perms):
- """Set id-perms for loadbalancer-member.
-
- :param id_perms: IdPermsType object
-
- """
- self._id_perms = id_perms
- #end id_perms
-
- def set_id_perms(self, value):
- self.id_perms = value
- #end set_id_perms
-
- def get_id_perms(self):
- return self.id_perms
- #end get_id_perms
-
- @property
- def display_name(self):
- """Get display-name for loadbalancer-member.
-
- :returns: xsd:string object
-
- """
- return getattr(self, '_display_name', None)
- #end display_name
-
- @display_name.setter
- def display_name(self, display_name):
- """Set display-name for loadbalancer-member.
-
- :param display_name: xsd:string object
-
- """
- self._display_name = display_name
- #end display_name
-
- def set_display_name(self, value):
- self.display_name = value
- #end set_display_name
-
- def get_display_name(self):
- return self.display_name
- #end get_display_name
-
- def _serialize_field_to_json(self, serialized, fields_to_serialize, field_name):
- if fields_to_serialize is None: # all fields are serialized
- serialized[field_name] = getattr(self, field_name)
- elif field_name in fields_to_serialize:
- serialized[field_name] = getattr(self, field_name)
- #end _serialize_field_to_json
-
- def serialize_to_json(self, field_names = None):
- serialized = {}
-
- # serialize common fields
- self._serialize_field_to_json(serialized, ['uuid'], 'uuid')
- self._serialize_field_to_json(serialized, field_names, 'fq_name')
- if hasattr(self, 'parent_type'):
- self._serialize_field_to_json(serialized, field_names, 'parent_type')
-
- # serialize property fields
- if hasattr(self, '_loadbalancer_member_properties'):
- self._serialize_field_to_json(serialized, field_names, 'loadbalancer_member_properties')
- if hasattr(self, '_id_perms'):
- self._serialize_field_to_json(serialized, field_names, 'id_perms')
- if hasattr(self, '_display_name'):
- self._serialize_field_to_json(serialized, field_names, 'display_name')
-
- # serialize reference fields
- return serialized
- #end serialize_to_json
-
- def get_loadbalancer_pool_back_refs(self):
- """Return list of all loadbalancer-pools using this loadbalancer-member"""
- return getattr(self, 'loadbalancer_pool_back_refs', None)
- #end get_loadbalancer_pool_back_refs
-
- def dump(self):
- """Display loadbalancer-member object in compact form."""
- print '------------ loadbalancer-member ------------'
- print 'Name = ', self.get_fq_name()
- print 'Uuid = ', self.uuid
- if hasattr(self, 'parent_type'): # non config-root children
- print 'Parent Type = ', self.parent_type
- print 'P loadbalancer_member_properties = ', self.get_loadbalancer_member_properties()
- print 'P id_perms = ', self.get_id_perms()
- print 'P display_name = ', self.get_display_name()
- #end dump
-
-#end class LoadbalancerMember
-
-class SecurityGroup(object):
- """
- Represents security-group configuration representation.
-
- Child of:
- :class:`.Project` object OR
-
- Properties:
- * security-group-id (xsd:string type)
- * configured-security-group-id (xsd:integer type)
- * security-group-entries (:class:`.PolicyEntriesType` type)
- * id-perms (:class:`.IdPermsType` type)
- * display-name (xsd:string type)
-
- Children:
- * list of :class:`.AccessControlList` objects
-
- References to:
-
- Referred by:
- * list of :class:`.VirtualMachineInterface` objects
- """
-
- prop_fields = set([u'security_group_id', u'configured_security_group_id', u'security_group_entries', u'id_perms', u'display_name'])
- ref_fields = set([])
- backref_fields = set([u'project_back_refs', 'virtual_machine_interface_back_refs'])
- children_fields = set([u'access_control_lists'])
-
- def __init__(self, name = None, parent_obj = None, security_group_id = None, configured_security_group_id = None, security_group_entries = None, id_perms = None, display_name = None, *args, **kwargs):
- # type-independent fields
- self._type = 'security-group'
- if not name:
- name = u'default-security-group'
- self.name = name
- self._uuid = None
- # Determine parent type and fq_name
- kwargs_parent_type = kwargs.get('parent_type', None)
- kwargs_fq_name = kwargs.get('fq_name', None)
- if parent_obj:
- self.parent_type = parent_obj._type
- # copy parent's fq_name
- self.fq_name = list(parent_obj.fq_name)
- self.fq_name.append(name)
- elif kwargs_parent_type and kwargs_fq_name:
- self.parent_type = kwargs_parent_type
- self.fq_name = kwargs_fq_name
- else: # No parent obj specified
- self.parent_type = 'project'
- self.fq_name = [u'default-domain', u'default-project']
- self.fq_name.append(name)
-
-
- # property fields
- if security_group_id:
- self._security_group_id = security_group_id
- if configured_security_group_id:
- self._configured_security_group_id = configured_security_group_id
- if security_group_entries:
- self._security_group_entries = security_group_entries
- if id_perms:
- self._id_perms = id_perms
- if display_name:
- self._display_name = display_name
- #end __init__
-
- def get_type(self):
- """Return object type (security-group)."""
- return self._type
- #end get_type
-
- def get_fq_name(self):
- """Return FQN of security-group in list form."""
- return self.fq_name
- #end get_fq_name
-
- def get_fq_name_str(self):
- """Return FQN of security-group as colon delimited string."""
- return ':'.join(self.fq_name)
- #end get_fq_name_str
-
- @property
- def parent_name(self):
- return self.fq_name[:-1][-1]
- #end parent_name
-
- def get_parent_fq_name(self):
- """Return FQN of security-group's parent in list form."""
- if not hasattr(self, 'parent_type'):
- # child of config-root
- return None
-
- return self.fq_name[:-1]
- #end get_parent_fq_name
-
- def get_parent_fq_name_str(self):
- """Return FQN of security-group's parent as colon delimted string."""
- if not hasattr(self, 'parent_type'):
- # child of config-root
- return None
-
- return ':'.join(self.fq_name[:-1])
- #end get_parent_fq_name_str
-
- @property
- def uuid(self):
- return getattr(self, '_uuid', None)
- #end uuid
-
- @uuid.setter
- def uuid(self, uuid_val):
- self._uuid = uuid_val
- #end uuid
-
- def set_uuid(self, uuid_val):
- self.uuid = uuid_val
- #end set_uuid
-
- def get_uuid(self):
- return self.uuid
- #end get_uuid
-
- @property
- def security_group_id(self):
- """Get security-group-id for security-group.
-
- :returns: xsd:string object
-
- """
- return getattr(self, '_security_group_id', None)
- #end security_group_id
-
- @security_group_id.setter
- def security_group_id(self, security_group_id):
- """Set security-group-id for security-group.
-
- :param security_group_id: xsd:string object
-
- """
- self._security_group_id = security_group_id
- #end security_group_id
-
- def set_security_group_id(self, value):
- self.security_group_id = value
- #end set_security_group_id
-
- def get_security_group_id(self):
- return self.security_group_id
- #end get_security_group_id
-
- @property
- def configured_security_group_id(self):
- """Get configured-security-group-id for security-group.
-
- :returns: xsd:integer object
-
- """
- return getattr(self, '_configured_security_group_id', None)
- #end configured_security_group_id
-
- @configured_security_group_id.setter
- def configured_security_group_id(self, configured_security_group_id):
- """Set configured-security-group-id for security-group.
-
- :param configured_security_group_id: xsd:integer object
-
- """
- self._configured_security_group_id = configured_security_group_id
- #end configured_security_group_id
-
- def set_configured_security_group_id(self, value):
- self.configured_security_group_id = value
- #end set_configured_security_group_id
-
- def get_configured_security_group_id(self):
- return self.configured_security_group_id
- #end get_configured_security_group_id
-
- @property
- def security_group_entries(self):
- """Get security-group-entries for security-group.
-
- :returns: PolicyEntriesType object
-
- """
- return getattr(self, '_security_group_entries', None)
- #end security_group_entries
-
- @security_group_entries.setter
- def security_group_entries(self, security_group_entries):
- """Set security-group-entries for security-group.
-
- :param security_group_entries: PolicyEntriesType object
-
- """
- self._security_group_entries = security_group_entries
- #end security_group_entries
-
- def set_security_group_entries(self, value):
- self.security_group_entries = value
- #end set_security_group_entries
-
- def get_security_group_entries(self):
- return self.security_group_entries
- #end get_security_group_entries
-
- @property
- def id_perms(self):
- """Get id-perms for security-group.
-
- :returns: IdPermsType object
-
- """
- return getattr(self, '_id_perms', None)
- #end id_perms
-
- @id_perms.setter
- def id_perms(self, id_perms):
- """Set id-perms for security-group.
-
- :param id_perms: IdPermsType object
-
- """
- self._id_perms = id_perms
- #end id_perms
-
- def set_id_perms(self, value):
- self.id_perms = value
- #end set_id_perms
-
- def get_id_perms(self):
- return self.id_perms
- #end get_id_perms
-
- @property
- def display_name(self):
- """Get display-name for security-group.
-
- :returns: xsd:string object
-
- """
- return getattr(self, '_display_name', None)
- #end display_name
-
- @display_name.setter
- def display_name(self, display_name):
- """Set display-name for security-group.
-
- :param display_name: xsd:string object
-
- """
- self._display_name = display_name
- #end display_name
-
- def set_display_name(self, value):
- self.display_name = value
- #end set_display_name
-
- def get_display_name(self):
- return self.display_name
- #end get_display_name
-
- def _serialize_field_to_json(self, serialized, fields_to_serialize, field_name):
- if fields_to_serialize is None: # all fields are serialized
- serialized[field_name] = getattr(self, field_name)
- elif field_name in fields_to_serialize:
- serialized[field_name] = getattr(self, field_name)
- #end _serialize_field_to_json
-
- def serialize_to_json(self, field_names = None):
- serialized = {}
-
- # serialize common fields
- self._serialize_field_to_json(serialized, ['uuid'], 'uuid')
- self._serialize_field_to_json(serialized, field_names, 'fq_name')
- if hasattr(self, 'parent_type'):
- self._serialize_field_to_json(serialized, field_names, 'parent_type')
-
- # serialize property fields
- if hasattr(self, '_security_group_id'):
- self._serialize_field_to_json(serialized, field_names, 'security_group_id')
- if hasattr(self, '_configured_security_group_id'):
- self._serialize_field_to_json(serialized, field_names, 'configured_security_group_id')
- if hasattr(self, '_security_group_entries'):
- self._serialize_field_to_json(serialized, field_names, 'security_group_entries')
- if hasattr(self, '_id_perms'):
- self._serialize_field_to_json(serialized, field_names, 'id_perms')
- if hasattr(self, '_display_name'):
- self._serialize_field_to_json(serialized, field_names, 'display_name')
-
- # serialize reference fields
- return serialized
- #end serialize_to_json
-
- def get_access_control_lists(self):
- return getattr(self, 'access_control_lists', None)
- #end get_access_control_lists
-
- def get_project_back_refs(self):
- """Return list of all projects using this security-group"""
- return getattr(self, 'project_back_refs', None)
- #end get_project_back_refs
-
- def get_virtual_machine_interface_back_refs(self):
- """Return list of all virtual-machine-interfaces using this security-group"""
- return getattr(self, 'virtual_machine_interface_back_refs', None)
- #end get_virtual_machine_interface_back_refs
-
- def dump(self):
- """Display security-group object in compact form."""
- print '------------ security-group ------------'
- print 'Name = ', self.get_fq_name()
- print 'Uuid = ', self.uuid
- if hasattr(self, 'parent_type'): # non config-root children
- print 'Parent Type = ', self.parent_type
- print 'P security_group_id = ', self.get_security_group_id()
- print 'P configured_security_group_id = ', self.get_configured_security_group_id()
- print 'P security_group_entries = ', self.get_security_group_entries()
- print 'P id_perms = ', self.get_id_perms()
- print 'P display_name = ', self.get_display_name()
- print 'HAS access_control_list = ', self.get_access_control_lists()
- print 'BCK virtual_machine_interface = ', self.get_virtual_machine_interface_back_refs()
- #end dump
-
-#end class SecurityGroup
-
-class ProviderAttachment(object):
- """
- Represents provider-attachment configuration representation.
-
- Properties:
- * id-perms (:class:`.IdPermsType` type)
- * display-name (xsd:string type)
-
- Children:
-
- References to:
- * list of :class:`.VirtualRouter` objects
-
- Referred by:
- """
-
- prop_fields = set([u'id_perms', u'display_name'])
- ref_fields = set(['virtual_router_refs'])
- backref_fields = set(['customer_attachment_back_refs'])
- children_fields = set([])
-
- def __init__(self, name = None, id_perms = None, display_name = None, *args, **kwargs):
- # type-independent fields
- self._type = 'provider-attachment'
- if not name:
- name = u'default-provider-attachment'
- self.name = name
- self._uuid = None
- self.fq_name = [name]
-
- # property fields
- if id_perms:
- self._id_perms = id_perms
- if display_name:
- self._display_name = display_name
- #end __init__
-
- def get_type(self):
- """Return object type (provider-attachment)."""
- return self._type
- #end get_type
-
- def get_fq_name(self):
- """Return FQN of provider-attachment in list form."""
- return self.fq_name
- #end get_fq_name
-
- def get_fq_name_str(self):
- """Return FQN of provider-attachment as colon delimited string."""
- return ':'.join(self.fq_name)
- #end get_fq_name_str
-
- @property
- def uuid(self):
- return getattr(self, '_uuid', None)
- #end uuid
-
- @uuid.setter
- def uuid(self, uuid_val):
- self._uuid = uuid_val
- #end uuid
-
- def set_uuid(self, uuid_val):
- self.uuid = uuid_val
- #end set_uuid
-
- def get_uuid(self):
- return self.uuid
- #end get_uuid
-
- @property
- def id_perms(self):
- """Get id-perms for provider-attachment.
-
- :returns: IdPermsType object
-
- """
- return getattr(self, '_id_perms', None)
- #end id_perms
-
- @id_perms.setter
- def id_perms(self, id_perms):
- """Set id-perms for provider-attachment.
-
- :param id_perms: IdPermsType object
-
- """
- self._id_perms = id_perms
- #end id_perms
-
- def set_id_perms(self, value):
- self.id_perms = value
- #end set_id_perms
-
- def get_id_perms(self):
- return self.id_perms
- #end get_id_perms
-
- @property
- def display_name(self):
- """Get display-name for provider-attachment.
-
- :returns: xsd:string object
-
- """
- return getattr(self, '_display_name', None)
- #end display_name
-
- @display_name.setter
- def display_name(self, display_name):
- """Set display-name for provider-attachment.
-
- :param display_name: xsd:string object
-
- """
- self._display_name = display_name
- #end display_name
-
- def set_display_name(self, value):
- self.display_name = value
- #end set_display_name
-
- def get_display_name(self):
- return self.display_name
- #end get_display_name
-
- def _serialize_field_to_json(self, serialized, fields_to_serialize, field_name):
- if fields_to_serialize is None: # all fields are serialized
- serialized[field_name] = getattr(self, field_name)
- elif field_name in fields_to_serialize:
- serialized[field_name] = getattr(self, field_name)
- #end _serialize_field_to_json
-
- def serialize_to_json(self, field_names = None):
- serialized = {}
-
- # serialize common fields
- self._serialize_field_to_json(serialized, ['uuid'], 'uuid')
- self._serialize_field_to_json(serialized, field_names, 'fq_name')
- if hasattr(self, 'parent_type'):
- self._serialize_field_to_json(serialized, field_names, 'parent_type')
-
- # serialize property fields
- if hasattr(self, '_id_perms'):
- self._serialize_field_to_json(serialized, field_names, 'id_perms')
- if hasattr(self, '_display_name'):
- self._serialize_field_to_json(serialized, field_names, 'display_name')
-
- # serialize reference fields
- if hasattr(self, 'virtual_router_refs'):
- self._serialize_field_to_json(serialized, field_names, 'virtual_router_refs')
- return serialized
- #end serialize_to_json
-
- def set_virtual_router(self, ref_obj):
- """Set virtual-router for provider-attachment.
-
- :param ref_obj: VirtualRouter object
-
- """
- self.virtual_router_refs = [{'to':ref_obj.get_fq_name()}]
- if ref_obj.uuid:
- self.virtual_router_refs[0]['uuid'] = ref_obj.uuid
-
- #end set_virtual_router
-
- def add_virtual_router(self, ref_obj):
- """Add virtual-router to provider-attachment.
-
- :param ref_obj: VirtualRouter object
-
- """
- refs = getattr(self, 'virtual_router_refs', [])
- if not refs:
- self.virtual_router_refs = []
-
- # if ref already exists, update any attr with it
- for ref in refs:
- if ref['to'] == ref_obj.get_fq_name():
- ref = {'to':ref_obj.get_fq_name()}
- if ref_obj.uuid:
- ref['uuid'] = ref_obj.uuid
- return
-
- # ref didn't exist before
- ref_info = {'to':ref_obj.get_fq_name()}
- if ref_obj.uuid:
- ref_info['uuid'] = ref_obj.uuid
-
- self.virtual_router_refs.append(ref_info)
- #end add_virtual_router
-
- def del_virtual_router(self, ref_obj):
- refs = self.get_virtual_router_refs()
- if not refs:
- return
-
- for ref in refs:
- if ref['to'] == ref_obj.get_fq_name():
- self.virtual_router_refs.remove(ref)
- return
- #end del_virtual_router
-
- def set_virtual_router_list(self, ref_obj_list):
- """Set virtual-router list for provider-attachment.
-
- :param ref_obj_list: list of VirtualRouter object
-
- """
- self.virtual_router_refs = ref_obj_list
- #end set_virtual_router_list
-
- def get_virtual_router_refs(self):
- """Return virtual-router list for provider-attachment.
-
- :returns: list of <VirtualRouter>
-
- """
- return getattr(self, 'virtual_router_refs', None)
- #end get_virtual_router_refs
-
- def get_customer_attachment_back_refs(self):
- """Return list of all customer-attachments using this provider-attachment"""
- return getattr(self, 'customer_attachment_back_refs', None)
- #end get_customer_attachment_back_refs
-
- def dump(self):
- """Display provider-attachment object in compact form."""
- print '------------ provider-attachment ------------'
- print 'Name = ', self.get_fq_name()
- print 'Uuid = ', self.uuid
- print 'P id_perms = ', self.get_id_perms()
- print 'P display_name = ', self.get_display_name()
- print 'REF virtual_router = ', self.get_virtual_router_refs()
- #end dump
-
-#end class ProviderAttachment
-
-class VirtualMachineInterface(object):
- """
- Represents virtual-machine-interface configuration representation.
-
- Child of:
- :class:`.VirtualMachine` object OR
- :class:`.Project` object OR
-
- Properties:
- * virtual-machine-interface-mac-addresses (:class:`.MacAddressesType` type)
- * virtual-machine-interface-dhcp-option-list (:class:`.DhcpOptionsListType` type)
- * virtual-machine-interface-host-routes (:class:`.RouteTableType` type)
- * virtual-machine-interface-allowed-address-pairs (:class:`.AllowedAddressPairs` type)
- * vrf-assign-table (:class:`.VrfAssignTableType` type)
- * virtual-machine-interface-device-owner (xsd:string type)
- * virtual-machine-interface-properties (:class:`.VirtualMachineInterfacePropertiesType` type)
- * id-perms (:class:`.IdPermsType` type)
- * display-name (xsd:string type)
-
- Children:
-
- References to:
- * list of :class:`.QosForwardingClass` objects
- * list of :class:`.SecurityGroup` objects
- * list of :class:`.VirtualMachineInterface` objects
- * list of :class:`.VirtualMachine` objects
- * list of :class:`.VirtualNetwork` objects
- * list of (:class:`.RoutingInstance` object, :class:`.PolicyBasedForwardingRuleType` attribute)
- * list of :class:`.InterfaceRouteTable` objects
-
- Referred by:
- * list of :class:`.VirtualMachineInterface` objects
- * list of :class:`.InstanceIp` objects
- * list of :class:`.Subnet` objects
- * list of :class:`.FloatingIp` objects
- * list of :class:`.LogicalInterface` objects
- * list of :class:`.CustomerAttachment` objects
- * list of :class:`.LogicalRouter` objects
- * list of :class:`.LoadbalancerPool` objects
- * list of :class:`.VirtualIp` objects
- """
-
- prop_fields = set([u'virtual_machine_interface_mac_addresses', u'virtual_machine_interface_dhcp_option_list', u'virtual_machine_interface_host_routes', u'virtual_machine_interface_allowed_address_pairs', u'vrf_assign_table', u'virtual_machine_interface_device_owner', u'virtual_machine_interface_properties', u'id_perms', u'display_name'])
- ref_fields = set([u'qos_forwarding_class_refs', u'security_group_refs', 'virtual_machine_interface_refs', u'virtual_machine_refs', u'virtual_network_refs', 'routing_instance_refs', u'interface_route_table_refs'])
- backref_fields = set(['virtual_machine_interface_back_refs', u'virtual_machine_back_refs', u'project_back_refs', u'instance_ip_back_refs', u'subnet_back_refs', u'floating_ip_back_refs', u'logical_interface_back_refs', 'customer_attachment_back_refs', u'logical_router_back_refs', u'loadbalancer_pool_back_refs', u'virtual_ip_back_refs'])
- children_fields = set([])
-
- def __init__(self, name = None, parent_obj = None, virtual_machine_interface_mac_addresses = None, virtual_machine_interface_dhcp_option_list = None, virtual_machine_interface_host_routes = None, virtual_machine_interface_allowed_address_pairs = None, vrf_assign_table = None, virtual_machine_interface_device_owner = None, virtual_machine_interface_properties = None, id_perms = None, display_name = None, *args, **kwargs):
- # type-independent fields
- self._type = 'virtual-machine-interface'
- if not name:
- name = u'default-virtual-machine-interface'
- self.name = name
- self._uuid = None
- # Determine parent type and fq_name
- kwargs_parent_type = kwargs.get('parent_type', None)
- kwargs_fq_name = kwargs.get('fq_name', None)
- if parent_obj:
- self.parent_type = parent_obj._type
- # copy parent's fq_name
- self.fq_name = list(parent_obj.fq_name)
- self.fq_name.append(name)
- elif kwargs_parent_type and kwargs_fq_name:
- self.parent_type = kwargs_parent_type
- self.fq_name = kwargs_fq_name
- else: # No parent obj specified
- # if obj constructed from within server, ignore if parent not specified
- if not kwargs['parent_type']:
- raise AmbiguousParentError("[[u'default-virtual-machine'], [u'default-domain', u'default-project']]")
-
- # property fields
- if virtual_machine_interface_mac_addresses:
- self._virtual_machine_interface_mac_addresses = virtual_machine_interface_mac_addresses
- if virtual_machine_interface_dhcp_option_list:
- self._virtual_machine_interface_dhcp_option_list = virtual_machine_interface_dhcp_option_list
- if virtual_machine_interface_host_routes:
- self._virtual_machine_interface_host_routes = virtual_machine_interface_host_routes
- if virtual_machine_interface_allowed_address_pairs:
- self._virtual_machine_interface_allowed_address_pairs = virtual_machine_interface_allowed_address_pairs
- if vrf_assign_table:
- self._vrf_assign_table = vrf_assign_table
- if virtual_machine_interface_device_owner:
- self._virtual_machine_interface_device_owner = virtual_machine_interface_device_owner
- if virtual_machine_interface_properties:
- self._virtual_machine_interface_properties = virtual_machine_interface_properties
- if id_perms:
- self._id_perms = id_perms
- if display_name:
- self._display_name = display_name
- #end __init__
-
- def get_type(self):
- """Return object type (virtual-machine-interface)."""
- return self._type
- #end get_type
-
- def get_fq_name(self):
- """Return FQN of virtual-machine-interface in list form."""
- return self.fq_name
- #end get_fq_name
-
- def get_fq_name_str(self):
- """Return FQN of virtual-machine-interface as colon delimited string."""
- return ':'.join(self.fq_name)
- #end get_fq_name_str
-
- @property
- def parent_name(self):
- return self.fq_name[:-1][-1]
- #end parent_name
-
- def get_parent_fq_name(self):
- """Return FQN of virtual-machine-interface's parent in list form."""
- if not hasattr(self, 'parent_type'):
- # child of config-root
- return None
-
- return self.fq_name[:-1]
- #end get_parent_fq_name
-
- def get_parent_fq_name_str(self):
- """Return FQN of virtual-machine-interface's parent as colon delimted string."""
- if not hasattr(self, 'parent_type'):
- # child of config-root
- return None
-
- return ':'.join(self.fq_name[:-1])
- #end get_parent_fq_name_str
-
- @property
- def uuid(self):
- return getattr(self, '_uuid', None)
- #end uuid
-
- @uuid.setter
- def uuid(self, uuid_val):
- self._uuid = uuid_val
- #end uuid
-
- def set_uuid(self, uuid_val):
- self.uuid = uuid_val
- #end set_uuid
-
- def get_uuid(self):
- return self.uuid
- #end get_uuid
-
- @property
- def virtual_machine_interface_mac_addresses(self):
- """Get virtual-machine-interface-mac-addresses for virtual-machine-interface.
-
- :returns: MacAddressesType object
-
- """
- return getattr(self, '_virtual_machine_interface_mac_addresses', None)
- #end virtual_machine_interface_mac_addresses
-
- @virtual_machine_interface_mac_addresses.setter
- def virtual_machine_interface_mac_addresses(self, virtual_machine_interface_mac_addresses):
- """Set virtual-machine-interface-mac-addresses for virtual-machine-interface.
-
- :param virtual_machine_interface_mac_addresses: MacAddressesType object
-
- """
- self._virtual_machine_interface_mac_addresses = virtual_machine_interface_mac_addresses
- #end virtual_machine_interface_mac_addresses
-
- def set_virtual_machine_interface_mac_addresses(self, value):
- self.virtual_machine_interface_mac_addresses = value
- #end set_virtual_machine_interface_mac_addresses
-
- def get_virtual_machine_interface_mac_addresses(self):
- return self.virtual_machine_interface_mac_addresses
- #end get_virtual_machine_interface_mac_addresses
-
- @property
- def virtual_machine_interface_dhcp_option_list(self):
- """Get virtual-machine-interface-dhcp-option-list for virtual-machine-interface.
-
- :returns: DhcpOptionsListType object
-
- """
- return getattr(self, '_virtual_machine_interface_dhcp_option_list', None)
- #end virtual_machine_interface_dhcp_option_list
-
- @virtual_machine_interface_dhcp_option_list.setter
- def virtual_machine_interface_dhcp_option_list(self, virtual_machine_interface_dhcp_option_list):
- """Set virtual-machine-interface-dhcp-option-list for virtual-machine-interface.
-
- :param virtual_machine_interface_dhcp_option_list: DhcpOptionsListType object
-
- """
- self._virtual_machine_interface_dhcp_option_list = virtual_machine_interface_dhcp_option_list
- #end virtual_machine_interface_dhcp_option_list
-
- def set_virtual_machine_interface_dhcp_option_list(self, value):
- self.virtual_machine_interface_dhcp_option_list = value
- #end set_virtual_machine_interface_dhcp_option_list
-
- def get_virtual_machine_interface_dhcp_option_list(self):
- return self.virtual_machine_interface_dhcp_option_list
- #end get_virtual_machine_interface_dhcp_option_list
-
- @property
- def virtual_machine_interface_host_routes(self):
- """Get virtual-machine-interface-host-routes for virtual-machine-interface.
-
- :returns: RouteTableType object
-
- """
- return getattr(self, '_virtual_machine_interface_host_routes', None)
- #end virtual_machine_interface_host_routes
-
- @virtual_machine_interface_host_routes.setter
- def virtual_machine_interface_host_routes(self, virtual_machine_interface_host_routes):
- """Set virtual-machine-interface-host-routes for virtual-machine-interface.
-
- :param virtual_machine_interface_host_routes: RouteTableType object
-
- """
- self._virtual_machine_interface_host_routes = virtual_machine_interface_host_routes
- #end virtual_machine_interface_host_routes
-
- def set_virtual_machine_interface_host_routes(self, value):
- self.virtual_machine_interface_host_routes = value
- #end set_virtual_machine_interface_host_routes
-
- def get_virtual_machine_interface_host_routes(self):
- return self.virtual_machine_interface_host_routes
- #end get_virtual_machine_interface_host_routes
-
- @property
- def virtual_machine_interface_allowed_address_pairs(self):
- """Get virtual-machine-interface-allowed-address-pairs for virtual-machine-interface.
-
- :returns: AllowedAddressPairs object
-
- """
- return getattr(self, '_virtual_machine_interface_allowed_address_pairs', None)
- #end virtual_machine_interface_allowed_address_pairs
-
- @virtual_machine_interface_allowed_address_pairs.setter
- def virtual_machine_interface_allowed_address_pairs(self, virtual_machine_interface_allowed_address_pairs):
- """Set virtual-machine-interface-allowed-address-pairs for virtual-machine-interface.
-
- :param virtual_machine_interface_allowed_address_pairs: AllowedAddressPairs object
-
- """
- self._virtual_machine_interface_allowed_address_pairs = virtual_machine_interface_allowed_address_pairs
- #end virtual_machine_interface_allowed_address_pairs
-
- def set_virtual_machine_interface_allowed_address_pairs(self, value):
- self.virtual_machine_interface_allowed_address_pairs = value
- #end set_virtual_machine_interface_allowed_address_pairs
-
- def get_virtual_machine_interface_allowed_address_pairs(self):
- return self.virtual_machine_interface_allowed_address_pairs
- #end get_virtual_machine_interface_allowed_address_pairs
-
- @property
- def vrf_assign_table(self):
- """Get vrf-assign-table for virtual-machine-interface.
-
- :returns: VrfAssignTableType object
-
- """
- return getattr(self, '_vrf_assign_table', None)
- #end vrf_assign_table
-
- @vrf_assign_table.setter
- def vrf_assign_table(self, vrf_assign_table):
- """Set vrf-assign-table for virtual-machine-interface.
-
- :param vrf_assign_table: VrfAssignTableType object
-
- """
- self._vrf_assign_table = vrf_assign_table
- #end vrf_assign_table
-
- def set_vrf_assign_table(self, value):
- self.vrf_assign_table = value
- #end set_vrf_assign_table
-
- def get_vrf_assign_table(self):
- return self.vrf_assign_table
- #end get_vrf_assign_table
-
- @property
- def virtual_machine_interface_device_owner(self):
- """Get virtual-machine-interface-device-owner for virtual-machine-interface.
-
- :returns: xsd:string object
-
- """
- return getattr(self, '_virtual_machine_interface_device_owner', None)
- #end virtual_machine_interface_device_owner
-
- @virtual_machine_interface_device_owner.setter
- def virtual_machine_interface_device_owner(self, virtual_machine_interface_device_owner):
- """Set virtual-machine-interface-device-owner for virtual-machine-interface.
-
- :param virtual_machine_interface_device_owner: xsd:string object
-
- """
- self._virtual_machine_interface_device_owner = virtual_machine_interface_device_owner
- #end virtual_machine_interface_device_owner
-
- def set_virtual_machine_interface_device_owner(self, value):
- self.virtual_machine_interface_device_owner = value
- #end set_virtual_machine_interface_device_owner
-
- def get_virtual_machine_interface_device_owner(self):
- return self.virtual_machine_interface_device_owner
- #end get_virtual_machine_interface_device_owner
-
- @property
- def virtual_machine_interface_properties(self):
- """Get virtual-machine-interface-properties for virtual-machine-interface.
-
- :returns: VirtualMachineInterfacePropertiesType object
-
- """
- return getattr(self, '_virtual_machine_interface_properties', None)
- #end virtual_machine_interface_properties
-
- @virtual_machine_interface_properties.setter
- def virtual_machine_interface_properties(self, virtual_machine_interface_properties):
- """Set virtual-machine-interface-properties for virtual-machine-interface.
-
- :param virtual_machine_interface_properties: VirtualMachineInterfacePropertiesType object
-
- """
- self._virtual_machine_interface_properties = virtual_machine_interface_properties
- #end virtual_machine_interface_properties
-
- def set_virtual_machine_interface_properties(self, value):
- self.virtual_machine_interface_properties = value
- #end set_virtual_machine_interface_properties
-
- def get_virtual_machine_interface_properties(self):
- return self.virtual_machine_interface_properties
- #end get_virtual_machine_interface_properties
-
- @property
- def id_perms(self):
- """Get id-perms for virtual-machine-interface.
-
- :returns: IdPermsType object
-
- """
- return getattr(self, '_id_perms', None)
- #end id_perms
-
- @id_perms.setter
- def id_perms(self, id_perms):
- """Set id-perms for virtual-machine-interface.
-
- :param id_perms: IdPermsType object
-
- """
- self._id_perms = id_perms
- #end id_perms
-
- def set_id_perms(self, value):
- self.id_perms = value
- #end set_id_perms
-
- def get_id_perms(self):
- return self.id_perms
- #end get_id_perms
-
- @property
- def display_name(self):
- """Get display-name for virtual-machine-interface.
-
- :returns: xsd:string object
-
- """
- return getattr(self, '_display_name', None)
- #end display_name
-
- @display_name.setter
- def display_name(self, display_name):
- """Set display-name for virtual-machine-interface.
-
- :param display_name: xsd:string object
-
- """
- self._display_name = display_name
- #end display_name
-
- def set_display_name(self, value):
- self.display_name = value
- #end set_display_name
-
- def get_display_name(self):
- return self.display_name
- #end get_display_name
-
- def _serialize_field_to_json(self, serialized, fields_to_serialize, field_name):
- if fields_to_serialize is None: # all fields are serialized
- serialized[field_name] = getattr(self, field_name)
- elif field_name in fields_to_serialize:
- serialized[field_name] = getattr(self, field_name)
- #end _serialize_field_to_json
-
- def serialize_to_json(self, field_names = None):
- serialized = {}
-
- # serialize common fields
- self._serialize_field_to_json(serialized, ['uuid'], 'uuid')
- self._serialize_field_to_json(serialized, field_names, 'fq_name')
- if hasattr(self, 'parent_type'):
- self._serialize_field_to_json(serialized, field_names, 'parent_type')
-
- # serialize property fields
- if hasattr(self, '_virtual_machine_interface_mac_addresses'):
- self._serialize_field_to_json(serialized, field_names, 'virtual_machine_interface_mac_addresses')
- if hasattr(self, '_virtual_machine_interface_dhcp_option_list'):
- self._serialize_field_to_json(serialized, field_names, 'virtual_machine_interface_dhcp_option_list')
- if hasattr(self, '_virtual_machine_interface_host_routes'):
- self._serialize_field_to_json(serialized, field_names, 'virtual_machine_interface_host_routes')
- if hasattr(self, '_virtual_machine_interface_allowed_address_pairs'):
- self._serialize_field_to_json(serialized, field_names, 'virtual_machine_interface_allowed_address_pairs')
- if hasattr(self, '_vrf_assign_table'):
- self._serialize_field_to_json(serialized, field_names, 'vrf_assign_table')
- if hasattr(self, '_virtual_machine_interface_device_owner'):
- self._serialize_field_to_json(serialized, field_names, 'virtual_machine_interface_device_owner')
- if hasattr(self, '_virtual_machine_interface_properties'):
- self._serialize_field_to_json(serialized, field_names, 'virtual_machine_interface_properties')
- if hasattr(self, '_id_perms'):
- self._serialize_field_to_json(serialized, field_names, 'id_perms')
- if hasattr(self, '_display_name'):
- self._serialize_field_to_json(serialized, field_names, 'display_name')
-
- # serialize reference fields
- if hasattr(self, 'qos_forwarding_class_refs'):
- self._serialize_field_to_json(serialized, field_names, 'qos_forwarding_class_refs')
- if hasattr(self, 'security_group_refs'):
- self._serialize_field_to_json(serialized, field_names, 'security_group_refs')
- if hasattr(self, 'virtual_machine_interface_refs'):
- self._serialize_field_to_json(serialized, field_names, 'virtual_machine_interface_refs')
- if hasattr(self, 'virtual_machine_refs'):
- self._serialize_field_to_json(serialized, field_names, 'virtual_machine_refs')
- if hasattr(self, 'virtual_network_refs'):
- self._serialize_field_to_json(serialized, field_names, 'virtual_network_refs')
- if hasattr(self, 'routing_instance_refs'):
- self._serialize_field_to_json(serialized, field_names, 'routing_instance_refs')
- if hasattr(self, 'interface_route_table_refs'):
- self._serialize_field_to_json(serialized, field_names, 'interface_route_table_refs')
- return serialized
- #end serialize_to_json
-
- def set_qos_forwarding_class(self, ref_obj):
- """Set qos-forwarding-class for virtual-machine-interface.
-
- :param ref_obj: QosForwardingClass object
-
- """
- self.qos_forwarding_class_refs = [{'to':ref_obj.get_fq_name()}]
- if ref_obj.uuid:
- self.qos_forwarding_class_refs[0]['uuid'] = ref_obj.uuid
-
- #end set_qos_forwarding_class
-
- def add_qos_forwarding_class(self, ref_obj):
- """Add qos-forwarding-class to virtual-machine-interface.
-
- :param ref_obj: QosForwardingClass object
-
- """
- refs = getattr(self, 'qos_forwarding_class_refs', [])
- if not refs:
- self.qos_forwarding_class_refs = []
-
- # if ref already exists, update any attr with it
- for ref in refs:
- if ref['to'] == ref_obj.get_fq_name():
- ref = {'to':ref_obj.get_fq_name()}
- if ref_obj.uuid:
- ref['uuid'] = ref_obj.uuid
- return
-
- # ref didn't exist before
- ref_info = {'to':ref_obj.get_fq_name()}
- if ref_obj.uuid:
- ref_info['uuid'] = ref_obj.uuid
-
- self.qos_forwarding_class_refs.append(ref_info)
- #end add_qos_forwarding_class
-
- def del_qos_forwarding_class(self, ref_obj):
- refs = self.get_qos_forwarding_class_refs()
- if not refs:
- return
-
- for ref in refs:
- if ref['to'] == ref_obj.get_fq_name():
- self.qos_forwarding_class_refs.remove(ref)
- return
- #end del_qos_forwarding_class
-
- def set_qos_forwarding_class_list(self, ref_obj_list):
- """Set qos-forwarding-class list for virtual-machine-interface.
-
- :param ref_obj_list: list of QosForwardingClass object
-
- """
- self.qos_forwarding_class_refs = ref_obj_list
- #end set_qos_forwarding_class_list
-
- def get_qos_forwarding_class_refs(self):
- """Return qos-forwarding-class list for virtual-machine-interface.
-
- :returns: list of <QosForwardingClass>
-
- """
- return getattr(self, 'qos_forwarding_class_refs', None)
- #end get_qos_forwarding_class_refs
-
- def set_security_group(self, ref_obj):
- """Set security-group for virtual-machine-interface.
-
- :param ref_obj: SecurityGroup object
-
- """
- self.security_group_refs = [{'to':ref_obj.get_fq_name()}]
- if ref_obj.uuid:
- self.security_group_refs[0]['uuid'] = ref_obj.uuid
-
- #end set_security_group
-
- def add_security_group(self, ref_obj):
- """Add security-group to virtual-machine-interface.
-
- :param ref_obj: SecurityGroup object
-
- """
- refs = getattr(self, 'security_group_refs', [])
- if not refs:
- self.security_group_refs = []
-
- # if ref already exists, update any attr with it
- for ref in refs:
- if ref['to'] == ref_obj.get_fq_name():
- ref = {'to':ref_obj.get_fq_name()}
- if ref_obj.uuid:
- ref['uuid'] = ref_obj.uuid
- return
-
- # ref didn't exist before
- ref_info = {'to':ref_obj.get_fq_name()}
- if ref_obj.uuid:
- ref_info['uuid'] = ref_obj.uuid
-
- self.security_group_refs.append(ref_info)
- #end add_security_group
-
- def del_security_group(self, ref_obj):
- refs = self.get_security_group_refs()
- if not refs:
- return
-
- for ref in refs:
- if ref['to'] == ref_obj.get_fq_name():
- self.security_group_refs.remove(ref)
- return
- #end del_security_group
-
- def set_security_group_list(self, ref_obj_list):
- """Set security-group list for virtual-machine-interface.
-
- :param ref_obj_list: list of SecurityGroup object
-
- """
- self.security_group_refs = ref_obj_list
- #end set_security_group_list
-
- def get_security_group_refs(self):
- """Return security-group list for virtual-machine-interface.
-
- :returns: list of <SecurityGroup>
-
- """
- return getattr(self, 'security_group_refs', None)
- #end get_security_group_refs
-
- def set_virtual_machine_interface(self, ref_obj):
- """Set virtual-machine-interface for virtual-machine-interface.
-
- :param ref_obj: VirtualMachineInterface object
-
- """
- self.virtual_machine_interface_refs = [{'to':ref_obj.get_fq_name()}]
- if ref_obj.uuid:
- self.virtual_machine_interface_refs[0]['uuid'] = ref_obj.uuid
-
- #end set_virtual_machine_interface
-
- def add_virtual_machine_interface(self, ref_obj):
- """Add virtual-machine-interface to virtual-machine-interface.
-
- :param ref_obj: VirtualMachineInterface object
-
- """
- refs = getattr(self, 'virtual_machine_interface_refs', [])
- if not refs:
- self.virtual_machine_interface_refs = []
-
- # if ref already exists, update any attr with it
- for ref in refs:
- if ref['to'] == ref_obj.get_fq_name():
- ref = {'to':ref_obj.get_fq_name()}
- if ref_obj.uuid:
- ref['uuid'] = ref_obj.uuid
- return
-
- # ref didn't exist before
- ref_info = {'to':ref_obj.get_fq_name()}
- if ref_obj.uuid:
- ref_info['uuid'] = ref_obj.uuid
-
- self.virtual_machine_interface_refs.append(ref_info)
- #end add_virtual_machine_interface
-
- def del_virtual_machine_interface(self, ref_obj):
- refs = self.get_virtual_machine_interface_refs()
- if not refs:
- return
-
- for ref in refs:
- if ref['to'] == ref_obj.get_fq_name():
- self.virtual_machine_interface_refs.remove(ref)
- return
- #end del_virtual_machine_interface
-
- def set_virtual_machine_interface_list(self, ref_obj_list):
- """Set virtual-machine-interface list for virtual-machine-interface.
-
- :param ref_obj_list: list of VirtualMachineInterface object
-
- """
- self.virtual_machine_interface_refs = ref_obj_list
- #end set_virtual_machine_interface_list
-
- def get_virtual_machine_interface_refs(self):
- """Return virtual-machine-interface list for virtual-machine-interface.
-
- :returns: list of <VirtualMachineInterface>
-
- """
- return getattr(self, 'virtual_machine_interface_refs', None)
- #end get_virtual_machine_interface_refs
-
- def set_virtual_machine(self, ref_obj):
- """Set virtual-machine for virtual-machine-interface.
-
- :param ref_obj: VirtualMachine object
-
- """
- self.virtual_machine_refs = [{'to':ref_obj.get_fq_name()}]
- if ref_obj.uuid:
- self.virtual_machine_refs[0]['uuid'] = ref_obj.uuid
-
- #end set_virtual_machine
-
- def add_virtual_machine(self, ref_obj):
- """Add virtual-machine to virtual-machine-interface.
-
- :param ref_obj: VirtualMachine object
-
- """
- refs = getattr(self, 'virtual_machine_refs', [])
- if not refs:
- self.virtual_machine_refs = []
-
- # if ref already exists, update any attr with it
- for ref in refs:
- if ref['to'] == ref_obj.get_fq_name():
- ref = {'to':ref_obj.get_fq_name()}
- if ref_obj.uuid:
- ref['uuid'] = ref_obj.uuid
- return
-
- # ref didn't exist before
- ref_info = {'to':ref_obj.get_fq_name()}
- if ref_obj.uuid:
- ref_info['uuid'] = ref_obj.uuid
-
- self.virtual_machine_refs.append(ref_info)
- #end add_virtual_machine
-
- def del_virtual_machine(self, ref_obj):
- refs = self.get_virtual_machine_refs()
- if not refs:
- return
-
- for ref in refs:
- if ref['to'] == ref_obj.get_fq_name():
- self.virtual_machine_refs.remove(ref)
- return
- #end del_virtual_machine
-
- def set_virtual_machine_list(self, ref_obj_list):
- """Set virtual-machine list for virtual-machine-interface.
-
- :param ref_obj_list: list of VirtualMachine object
-
- """
- self.virtual_machine_refs = ref_obj_list
- #end set_virtual_machine_list
-
- def get_virtual_machine_refs(self):
- """Return virtual-machine list for virtual-machine-interface.
-
- :returns: list of <VirtualMachine>
-
- """
- return getattr(self, 'virtual_machine_refs', None)
- #end get_virtual_machine_refs
-
- def set_virtual_network(self, ref_obj):
- """Set virtual-network for virtual-machine-interface.
-
- :param ref_obj: VirtualNetwork object
-
- """
- self.virtual_network_refs = [{'to':ref_obj.get_fq_name()}]
- if ref_obj.uuid:
- self.virtual_network_refs[0]['uuid'] = ref_obj.uuid
-
- #end set_virtual_network
-
- def add_virtual_network(self, ref_obj):
- """Add virtual-network to virtual-machine-interface.
-
- :param ref_obj: VirtualNetwork object
-
- """
- refs = getattr(self, 'virtual_network_refs', [])
- if not refs:
- self.virtual_network_refs = []
-
- # if ref already exists, update any attr with it
- for ref in refs:
- if ref['to'] == ref_obj.get_fq_name():
- ref = {'to':ref_obj.get_fq_name()}
- if ref_obj.uuid:
- ref['uuid'] = ref_obj.uuid
- return
-
- # ref didn't exist before
- ref_info = {'to':ref_obj.get_fq_name()}
- if ref_obj.uuid:
- ref_info['uuid'] = ref_obj.uuid
-
- self.virtual_network_refs.append(ref_info)
- #end add_virtual_network
-
- def del_virtual_network(self, ref_obj):
- refs = self.get_virtual_network_refs()
- if not refs:
- return
-
- for ref in refs:
- if ref['to'] == ref_obj.get_fq_name():
- self.virtual_network_refs.remove(ref)
- return
- #end del_virtual_network
-
- def set_virtual_network_list(self, ref_obj_list):
- """Set virtual-network list for virtual-machine-interface.
-
- :param ref_obj_list: list of VirtualNetwork object
-
- """
- self.virtual_network_refs = ref_obj_list
- #end set_virtual_network_list
-
- def get_virtual_network_refs(self):
- """Return virtual-network list for virtual-machine-interface.
-
- :returns: list of <VirtualNetwork>
-
- """
- return getattr(self, 'virtual_network_refs', None)
- #end get_virtual_network_refs
-
- def set_routing_instance(self, ref_obj, ref_data):
- """Set routing-instance for virtual-machine-interface.
-
- :param ref_obj: RoutingInstance object
- :param ref_data: PolicyBasedForwardingRuleType object
-
- """
- self.routing_instance_refs = [{'to':ref_obj.get_fq_name(), 'attr':ref_data}]
- if ref_obj.uuid:
- self.routing_instance_refs[0]['uuid'] = ref_obj.uuid
-
- #end set_routing_instance
-
- def add_routing_instance(self, ref_obj, ref_data):
- """Add routing-instance to virtual-machine-interface.
-
- :param ref_obj: RoutingInstance object
- :param ref_data: PolicyBasedForwardingRuleType object
-
- """
- refs = getattr(self, 'routing_instance_refs', [])
- if not refs:
- self.routing_instance_refs = []
-
- # if ref already exists, update any attr with it
- for ref in refs:
- if ref['to'] == ref_obj.get_fq_name():
- ref = {'to':ref_obj.get_fq_name(), 'attr':ref_data}
- if ref_obj.uuid:
- ref['uuid'] = ref_obj.uuid
- return
-
- # ref didn't exist before
- ref_info = {'to':ref_obj.get_fq_name(), 'attr':ref_data}
- if ref_obj.uuid:
- ref_info['uuid'] = ref_obj.uuid
-
- self.routing_instance_refs.append(ref_info)
- #end add_routing_instance
-
- def del_routing_instance(self, ref_obj):
- refs = self.get_routing_instance_refs()
- if not refs:
- return
-
- for ref in refs:
- if ref['to'] == ref_obj.get_fq_name():
- self.routing_instance_refs.remove(ref)
- return
- #end del_routing_instance
-
- def set_routing_instance_list(self, ref_obj_list, ref_data_list):
- """Set routing-instance list for virtual-machine-interface.
-
- :param ref_obj_list: list of RoutingInstance object
- :param ref_data_list: list of PolicyBasedForwardingRuleType object
-
- """
- self.routing_instance_refs = [{'to':ref_obj_list[i], 'attr':ref_data_list[i]} for i in range(len(ref_obj_list))]
- #end set_routing_instance_list
-
- def get_routing_instance_refs(self):
- """Return routing-instance list for virtual-machine-interface.
-
- :returns: list of tuple <RoutingInstance, PolicyBasedForwardingRuleType>
-
- """
- return getattr(self, 'routing_instance_refs', None)
- #end get_routing_instance_refs
-
- def set_interface_route_table(self, ref_obj):
- """Set interface-route-table for virtual-machine-interface.
-
- :param ref_obj: InterfaceRouteTable object
-
- """
- self.interface_route_table_refs = [{'to':ref_obj.get_fq_name()}]
- if ref_obj.uuid:
- self.interface_route_table_refs[0]['uuid'] = ref_obj.uuid
-
- #end set_interface_route_table
-
- def add_interface_route_table(self, ref_obj):
- """Add interface-route-table to virtual-machine-interface.
-
- :param ref_obj: InterfaceRouteTable object
-
- """
- refs = getattr(self, 'interface_route_table_refs', [])
- if not refs:
- self.interface_route_table_refs = []
-
- # if ref already exists, update any attr with it
- for ref in refs:
- if ref['to'] == ref_obj.get_fq_name():
- ref = {'to':ref_obj.get_fq_name()}
- if ref_obj.uuid:
- ref['uuid'] = ref_obj.uuid
- return
-
- # ref didn't exist before
- ref_info = {'to':ref_obj.get_fq_name()}
- if ref_obj.uuid:
- ref_info['uuid'] = ref_obj.uuid
-
- self.interface_route_table_refs.append(ref_info)
- #end add_interface_route_table
-
- def del_interface_route_table(self, ref_obj):
- refs = self.get_interface_route_table_refs()
- if not refs:
- return
-
- for ref in refs:
- if ref['to'] == ref_obj.get_fq_name():
- self.interface_route_table_refs.remove(ref)
- return
- #end del_interface_route_table
-
- def set_interface_route_table_list(self, ref_obj_list):
- """Set interface-route-table list for virtual-machine-interface.
-
- :param ref_obj_list: list of InterfaceRouteTable object
-
- """
- self.interface_route_table_refs = ref_obj_list
- #end set_interface_route_table_list
-
- def get_interface_route_table_refs(self):
- """Return interface-route-table list for virtual-machine-interface.
-
- :returns: list of <InterfaceRouteTable>
-
- """
- return getattr(self, 'interface_route_table_refs', None)
- #end get_interface_route_table_refs
-
- def get_virtual_machine_interface_back_refs(self):
- """Return list of all virtual-machine-interfaces using this virtual-machine-interface"""
- return getattr(self, 'virtual_machine_interface_back_refs', None)
- #end get_virtual_machine_interface_back_refs
-
- def get_virtual_machine_back_refs(self):
- """Return list of all virtual-machines using this virtual-machine-interface"""
- return getattr(self, 'virtual_machine_back_refs', None)
- #end get_virtual_machine_back_refs
-
- def get_project_back_refs(self):
- """Return list of all projects using this virtual-machine-interface"""
- return getattr(self, 'project_back_refs', None)
- #end get_project_back_refs
-
- def get_instance_ip_back_refs(self):
- """Return list of all instance-ips using this virtual-machine-interface"""
- return getattr(self, 'instance_ip_back_refs', None)
- #end get_instance_ip_back_refs
-
- def get_subnet_back_refs(self):
- """Return list of all subnets using this virtual-machine-interface"""
- return getattr(self, 'subnet_back_refs', None)
- #end get_subnet_back_refs
-
- def get_floating_ip_back_refs(self):
- """Return list of all floating-ips using this virtual-machine-interface"""
- return getattr(self, 'floating_ip_back_refs', None)
- #end get_floating_ip_back_refs
-
- def get_logical_interface_back_refs(self):
- """Return list of all logical-interfaces using this virtual-machine-interface"""
- return getattr(self, 'logical_interface_back_refs', None)
- #end get_logical_interface_back_refs
-
- def get_customer_attachment_back_refs(self):
- """Return list of all customer-attachments using this virtual-machine-interface"""
- return getattr(self, 'customer_attachment_back_refs', None)
- #end get_customer_attachment_back_refs
-
- def get_logical_router_back_refs(self):
- """Return list of all logical-routers using this virtual-machine-interface"""
- return getattr(self, 'logical_router_back_refs', None)
- #end get_logical_router_back_refs
-
- def get_loadbalancer_pool_back_refs(self):
- """Return list of all loadbalancer-pools using this virtual-machine-interface"""
- return getattr(self, 'loadbalancer_pool_back_refs', None)
- #end get_loadbalancer_pool_back_refs
-
- def get_virtual_ip_back_refs(self):
- """Return list of all virtual-ips using this virtual-machine-interface"""
- return getattr(self, 'virtual_ip_back_refs', None)
- #end get_virtual_ip_back_refs
-
- def dump(self):
- """Display virtual-machine-interface object in compact form."""
- print '------------ virtual-machine-interface ------------'
- print 'Name = ', self.get_fq_name()
- print 'Uuid = ', self.uuid
- if hasattr(self, 'parent_type'): # non config-root children
- print 'Parent Type = ', self.parent_type
- print 'P virtual_machine_interface_mac_addresses = ', self.get_virtual_machine_interface_mac_addresses()
- print 'P virtual_machine_interface_dhcp_option_list = ', self.get_virtual_machine_interface_dhcp_option_list()
- print 'P virtual_machine_interface_host_routes = ', self.get_virtual_machine_interface_host_routes()
- print 'P virtual_machine_interface_allowed_address_pairs = ', self.get_virtual_machine_interface_allowed_address_pairs()
- print 'P vrf_assign_table = ', self.get_vrf_assign_table()
- print 'P virtual_machine_interface_device_owner = ', self.get_virtual_machine_interface_device_owner()
- print 'P virtual_machine_interface_properties = ', self.get_virtual_machine_interface_properties()
- print 'P id_perms = ', self.get_id_perms()
- print 'P display_name = ', self.get_display_name()
- print 'REF qos_forwarding_class = ', self.get_qos_forwarding_class_refs()
- print 'REF security_group = ', self.get_security_group_refs()
- print 'REF virtual_machine_interface = ', self.get_virtual_machine_interface_refs()
- print 'REF virtual_machine = ', self.get_virtual_machine_refs()
- print 'REF virtual_network = ', self.get_virtual_network_refs()
- print 'REF routing_instance = ', self.get_routing_instance_refs()
- print 'REF interface_route_table = ', self.get_interface_route_table_refs()
- print 'BCK virtual_machine_interface = ', self.get_virtual_machine_interface_back_refs()
- print 'BCK instance_ip = ', self.get_instance_ip_back_refs()
- print 'BCK subnet = ', self.get_subnet_back_refs()
- print 'BCK floating_ip = ', self.get_floating_ip_back_refs()
- print 'BCK logical_interface = ', self.get_logical_interface_back_refs()
- print 'BCK customer_attachment = ', self.get_customer_attachment_back_refs()
- print 'BCK logical_router = ', self.get_logical_router_back_refs()
- print 'BCK loadbalancer_pool = ', self.get_loadbalancer_pool_back_refs()
- print 'BCK virtual_ip = ', self.get_virtual_ip_back_refs()
- #end dump
-
-#end class VirtualMachineInterface
-
-class LoadbalancerHealthmonitor(object):
- """
- Represents loadbalancer-healthmonitor configuration representation.
-
- Child of:
- :class:`.Project` object OR
-
- Properties:
- * loadbalancer-healthmonitor-properties (:class:`.LoadbalancerHealthmonitorType` type)
- * id-perms (:class:`.IdPermsType` type)
- * display-name (xsd:string type)
-
- Children:
-
- References to:
-
- Referred by:
- * list of :class:`.LoadbalancerPool` objects
- """
-
- prop_fields = set([u'loadbalancer_healthmonitor_properties', u'id_perms', u'display_name'])
- ref_fields = set([])
- backref_fields = set([u'project_back_refs', u'loadbalancer_pool_back_refs'])
- children_fields = set([])
-
- def __init__(self, name = None, parent_obj = None, loadbalancer_healthmonitor_properties = None, id_perms = None, display_name = None, *args, **kwargs):
- # type-independent fields
- self._type = 'loadbalancer-healthmonitor'
- if not name:
- name = u'default-loadbalancer-healthmonitor'
- self.name = name
- self._uuid = None
- # Determine parent type and fq_name
- kwargs_parent_type = kwargs.get('parent_type', None)
- kwargs_fq_name = kwargs.get('fq_name', None)
- if parent_obj:
- self.parent_type = parent_obj._type
- # copy parent's fq_name
- self.fq_name = list(parent_obj.fq_name)
- self.fq_name.append(name)
- elif kwargs_parent_type and kwargs_fq_name:
- self.parent_type = kwargs_parent_type
- self.fq_name = kwargs_fq_name
- else: # No parent obj specified
- self.parent_type = 'project'
- self.fq_name = [u'default-domain', u'default-project']
- self.fq_name.append(name)
-
-
- # property fields
- if loadbalancer_healthmonitor_properties:
- self._loadbalancer_healthmonitor_properties = loadbalancer_healthmonitor_properties
- if id_perms:
- self._id_perms = id_perms
- if display_name:
- self._display_name = display_name
- #end __init__
-
- def get_type(self):
- """Return object type (loadbalancer-healthmonitor)."""
- return self._type
- #end get_type
-
- def get_fq_name(self):
- """Return FQN of loadbalancer-healthmonitor in list form."""
- return self.fq_name
- #end get_fq_name
-
- def get_fq_name_str(self):
- """Return FQN of loadbalancer-healthmonitor as colon delimited string."""
- return ':'.join(self.fq_name)
- #end get_fq_name_str
-
- @property
- def parent_name(self):
- return self.fq_name[:-1][-1]
- #end parent_name
-
- def get_parent_fq_name(self):
- """Return FQN of loadbalancer-healthmonitor's parent in list form."""
- if not hasattr(self, 'parent_type'):
- # child of config-root
- return None
-
- return self.fq_name[:-1]
- #end get_parent_fq_name
-
- def get_parent_fq_name_str(self):
- """Return FQN of loadbalancer-healthmonitor's parent as colon delimted string."""
- if not hasattr(self, 'parent_type'):
- # child of config-root
- return None
-
- return ':'.join(self.fq_name[:-1])
- #end get_parent_fq_name_str
-
- @property
- def uuid(self):
- return getattr(self, '_uuid', None)
- #end uuid
-
- @uuid.setter
- def uuid(self, uuid_val):
- self._uuid = uuid_val
- #end uuid
-
- def set_uuid(self, uuid_val):
- self.uuid = uuid_val
- #end set_uuid
-
- def get_uuid(self):
- return self.uuid
- #end get_uuid
-
- @property
- def loadbalancer_healthmonitor_properties(self):
- """Get loadbalancer-healthmonitor-properties for loadbalancer-healthmonitor.
-
- :returns: LoadbalancerHealthmonitorType object
-
- """
- return getattr(self, '_loadbalancer_healthmonitor_properties', None)
- #end loadbalancer_healthmonitor_properties
-
- @loadbalancer_healthmonitor_properties.setter
- def loadbalancer_healthmonitor_properties(self, loadbalancer_healthmonitor_properties):
- """Set loadbalancer-healthmonitor-properties for loadbalancer-healthmonitor.
-
- :param loadbalancer_healthmonitor_properties: LoadbalancerHealthmonitorType object
-
- """
- self._loadbalancer_healthmonitor_properties = loadbalancer_healthmonitor_properties
- #end loadbalancer_healthmonitor_properties
-
- def set_loadbalancer_healthmonitor_properties(self, value):
- self.loadbalancer_healthmonitor_properties = value
- #end set_loadbalancer_healthmonitor_properties
-
- def get_loadbalancer_healthmonitor_properties(self):
- return self.loadbalancer_healthmonitor_properties
- #end get_loadbalancer_healthmonitor_properties
-
- @property
- def id_perms(self):
- """Get id-perms for loadbalancer-healthmonitor.
-
- :returns: IdPermsType object
-
- """
- return getattr(self, '_id_perms', None)
- #end id_perms
-
- @id_perms.setter
- def id_perms(self, id_perms):
- """Set id-perms for loadbalancer-healthmonitor.
-
- :param id_perms: IdPermsType object
-
- """
- self._id_perms = id_perms
- #end id_perms
-
- def set_id_perms(self, value):
- self.id_perms = value
- #end set_id_perms
-
- def get_id_perms(self):
- return self.id_perms
- #end get_id_perms
-
- @property
- def display_name(self):
- """Get display-name for loadbalancer-healthmonitor.
-
- :returns: xsd:string object
-
- """
- return getattr(self, '_display_name', None)
- #end display_name
-
- @display_name.setter
- def display_name(self, display_name):
- """Set display-name for loadbalancer-healthmonitor.
-
- :param display_name: xsd:string object
-
- """
- self._display_name = display_name
- #end display_name
-
- def set_display_name(self, value):
- self.display_name = value
- #end set_display_name
-
- def get_display_name(self):
- return self.display_name
- #end get_display_name
-
- def _serialize_field_to_json(self, serialized, fields_to_serialize, field_name):
- if fields_to_serialize is None: # all fields are serialized
- serialized[field_name] = getattr(self, field_name)
- elif field_name in fields_to_serialize:
- serialized[field_name] = getattr(self, field_name)
- #end _serialize_field_to_json
-
- def serialize_to_json(self, field_names = None):
- serialized = {}
-
- # serialize common fields
- self._serialize_field_to_json(serialized, ['uuid'], 'uuid')
- self._serialize_field_to_json(serialized, field_names, 'fq_name')
- if hasattr(self, 'parent_type'):
- self._serialize_field_to_json(serialized, field_names, 'parent_type')
-
- # serialize property fields
- if hasattr(self, '_loadbalancer_healthmonitor_properties'):
- self._serialize_field_to_json(serialized, field_names, 'loadbalancer_healthmonitor_properties')
- if hasattr(self, '_id_perms'):
- self._serialize_field_to_json(serialized, field_names, 'id_perms')
- if hasattr(self, '_display_name'):
- self._serialize_field_to_json(serialized, field_names, 'display_name')
-
- # serialize reference fields
- return serialized
- #end serialize_to_json
-
- def get_project_back_refs(self):
- """Return list of all projects using this loadbalancer-healthmonitor"""
- return getattr(self, 'project_back_refs', None)
- #end get_project_back_refs
-
- def get_loadbalancer_pool_back_refs(self):
- """Return list of all loadbalancer-pools using this loadbalancer-healthmonitor"""
- return getattr(self, 'loadbalancer_pool_back_refs', None)
- #end get_loadbalancer_pool_back_refs
-
- def dump(self):
- """Display loadbalancer-healthmonitor object in compact form."""
- print '------------ loadbalancer-healthmonitor ------------'
- print 'Name = ', self.get_fq_name()
- print 'Uuid = ', self.uuid
- if hasattr(self, 'parent_type'): # non config-root children
- print 'Parent Type = ', self.parent_type
- print 'P loadbalancer_healthmonitor_properties = ', self.get_loadbalancer_healthmonitor_properties()
- print 'P id_perms = ', self.get_id_perms()
- print 'P display_name = ', self.get_display_name()
- print 'BCK loadbalancer_pool = ', self.get_loadbalancer_pool_back_refs()
- #end dump
-
-#end class LoadbalancerHealthmonitor
-
-class VirtualNetwork(object):
- """
- Represents virtual-network configuration representation.
-
- Child of:
- :class:`.Project` object OR
-
- Properties:
- * virtual-network-properties (:class:`.VirtualNetworkType` type)
- * virtual-network-network-id (xsd:integer type)
- * route-target-list (:class:`.RouteTargetList` type)
- * router-external (xsd:boolean type)
- * is-shared (xsd:boolean type)
- * external-ipam (xsd:boolean type)
- * flood-unknown-unicast (xsd:boolean type)
- * id-perms (:class:`.IdPermsType` type)
- * display-name (xsd:string type)
-
- Children:
- * list of :class:`.AccessControlList` objects
- * list of :class:`.FloatingIpPool` objects
- * list of :class:`.RoutingInstance` objects
-
- References to:
- * list of :class:`.QosForwardingClass` objects
- * list of (:class:`.NetworkIpam` object, :class:`.VnSubnetsType` attribute)
- * list of (:class:`.NetworkPolicy` object, :class:`.VirtualNetworkPolicyType` attribute)
- * list of :class:`.RouteTable` objects
-
- Referred by:
- * list of :class:`.VirtualMachineInterface` objects
- * list of :class:`.InstanceIp` objects
- * list of :class:`.PhysicalRouter` objects
- * list of :class:`.LogicalRouter` objects
- """
-
- prop_fields = set([u'virtual_network_properties', u'virtual_network_network_id', u'route_target_list', u'router_external', u'is_shared', u'external_ipam', u'flood_unknown_unicast', u'id_perms', u'display_name'])
- ref_fields = set([u'qos_forwarding_class_refs', u'network_ipam_refs', u'network_policy_refs', u'route_table_refs'])
- backref_fields = set([u'project_back_refs', 'virtual_machine_interface_back_refs', u'instance_ip_back_refs', u'physical_router_back_refs', u'logical_router_back_refs'])
- children_fields = set([u'access_control_lists', u'floating_ip_pools', 'routing_instances'])
-
- def __init__(self, name = None, parent_obj = None, virtual_network_properties = None, virtual_network_network_id = None, route_target_list = None, router_external = None, is_shared = None, external_ipam = None, flood_unknown_unicast = None, id_perms = None, display_name = None, *args, **kwargs):
- # type-independent fields
- self._type = 'virtual-network'
- if not name:
- name = u'default-virtual-network'
- self.name = name
- self._uuid = None
- # Determine parent type and fq_name
- kwargs_parent_type = kwargs.get('parent_type', None)
- kwargs_fq_name = kwargs.get('fq_name', None)
- if parent_obj:
- self.parent_type = parent_obj._type
- # copy parent's fq_name
- self.fq_name = list(parent_obj.fq_name)
- self.fq_name.append(name)
- elif kwargs_parent_type and kwargs_fq_name:
- self.parent_type = kwargs_parent_type
- self.fq_name = kwargs_fq_name
- else: # No parent obj specified
- self.parent_type = 'project'
- self.fq_name = [u'default-domain', u'default-project']
- self.fq_name.append(name)
-
-
- # property fields
- if virtual_network_properties:
- self._virtual_network_properties = virtual_network_properties
- if virtual_network_network_id:
- self._virtual_network_network_id = virtual_network_network_id
- if route_target_list:
- self._route_target_list = route_target_list
- if router_external:
- self._router_external = router_external
- if is_shared:
- self._is_shared = is_shared
- if external_ipam:
- self._external_ipam = external_ipam
- if flood_unknown_unicast:
- self._flood_unknown_unicast = flood_unknown_unicast
- if id_perms:
- self._id_perms = id_perms
- if display_name:
- self._display_name = display_name
- #end __init__
-
- def get_type(self):
- """Return object type (virtual-network)."""
- return self._type
- #end get_type
-
- def get_fq_name(self):
- """Return FQN of virtual-network in list form."""
- return self.fq_name
- #end get_fq_name
-
- def get_fq_name_str(self):
- """Return FQN of virtual-network as colon delimited string."""
- return ':'.join(self.fq_name)
- #end get_fq_name_str
-
- @property
- def parent_name(self):
- return self.fq_name[:-1][-1]
- #end parent_name
-
- def get_parent_fq_name(self):
- """Return FQN of virtual-network's parent in list form."""
- if not hasattr(self, 'parent_type'):
- # child of config-root
- return None
-
- return self.fq_name[:-1]
- #end get_parent_fq_name
-
- def get_parent_fq_name_str(self):
- """Return FQN of virtual-network's parent as colon delimted string."""
- if not hasattr(self, 'parent_type'):
- # child of config-root
- return None
-
- return ':'.join(self.fq_name[:-1])
- #end get_parent_fq_name_str
-
- @property
- def uuid(self):
- return getattr(self, '_uuid', None)
- #end uuid
-
- @uuid.setter
- def uuid(self, uuid_val):
- self._uuid = uuid_val
- #end uuid
-
- def set_uuid(self, uuid_val):
- self.uuid = uuid_val
- #end set_uuid
-
- def get_uuid(self):
- return self.uuid
- #end get_uuid
-
- @property
- def virtual_network_properties(self):
- """Get virtual-network-properties for virtual-network.
-
- :returns: VirtualNetworkType object
-
- """
- return getattr(self, '_virtual_network_properties', None)
- #end virtual_network_properties
-
- @virtual_network_properties.setter
- def virtual_network_properties(self, virtual_network_properties):
- """Set virtual-network-properties for virtual-network.
-
- :param virtual_network_properties: VirtualNetworkType object
-
- """
- self._virtual_network_properties = virtual_network_properties
- #end virtual_network_properties
-
- def set_virtual_network_properties(self, value):
- self.virtual_network_properties = value
- #end set_virtual_network_properties
-
- def get_virtual_network_properties(self):
- return self.virtual_network_properties
- #end get_virtual_network_properties
-
- @property
- def virtual_network_network_id(self):
- """Get virtual-network-network-id for virtual-network.
-
- :returns: xsd:integer object
-
- """
- return getattr(self, '_virtual_network_network_id', None)
- #end virtual_network_network_id
-
- @virtual_network_network_id.setter
- def virtual_network_network_id(self, virtual_network_network_id):
- """Set virtual-network-network-id for virtual-network.
-
- :param virtual_network_network_id: xsd:integer object
-
- """
- self._virtual_network_network_id = virtual_network_network_id
- #end virtual_network_network_id
-
- def set_virtual_network_network_id(self, value):
- self.virtual_network_network_id = value
- #end set_virtual_network_network_id
-
- def get_virtual_network_network_id(self):
- return self.virtual_network_network_id
- #end get_virtual_network_network_id
-
- @property
- def route_target_list(self):
- """Get route-target-list for virtual-network.
-
- :returns: RouteTargetList object
-
- """
- return getattr(self, '_route_target_list', None)
- #end route_target_list
-
- @route_target_list.setter
- def route_target_list(self, route_target_list):
- """Set route-target-list for virtual-network.
-
- :param route_target_list: RouteTargetList object
-
- """
- self._route_target_list = route_target_list
- #end route_target_list
-
- def set_route_target_list(self, value):
- self.route_target_list = value
- #end set_route_target_list
-
- def get_route_target_list(self):
- return self.route_target_list
- #end get_route_target_list
-
- @property
- def router_external(self):
- """Get router-external for virtual-network.
-
- :returns: xsd:boolean object
-
- """
- return getattr(self, '_router_external', None)
- #end router_external
-
- @router_external.setter
- def router_external(self, router_external):
- """Set router-external for virtual-network.
-
- :param router_external: xsd:boolean object
-
- """
- self._router_external = router_external
- #end router_external
-
- def set_router_external(self, value):
- self.router_external = value
- #end set_router_external
-
- def get_router_external(self):
- return self.router_external
- #end get_router_external
-
- @property
- def is_shared(self):
- """Get is-shared for virtual-network.
-
- :returns: xsd:boolean object
-
- """
- return getattr(self, '_is_shared', None)
- #end is_shared
-
- @is_shared.setter
- def is_shared(self, is_shared):
- """Set is-shared for virtual-network.
-
- :param is_shared: xsd:boolean object
-
- """
- self._is_shared = is_shared
- #end is_shared
-
- def set_is_shared(self, value):
- self.is_shared = value
- #end set_is_shared
-
- def get_is_shared(self):
- return self.is_shared
- #end get_is_shared
-
- @property
- def external_ipam(self):
- """Get external-ipam for virtual-network.
-
- :returns: xsd:boolean object
-
- """
- return getattr(self, '_external_ipam', None)
- #end external_ipam
-
- @external_ipam.setter
- def external_ipam(self, external_ipam):
- """Set external-ipam for virtual-network.
-
- :param external_ipam: xsd:boolean object
-
- """
- self._external_ipam = external_ipam
- #end external_ipam
-
- def set_external_ipam(self, value):
- self.external_ipam = value
- #end set_external_ipam
-
- def get_external_ipam(self):
- return self.external_ipam
- #end get_external_ipam
-
- @property
- def flood_unknown_unicast(self):
- """Get flood-unknown-unicast for virtual-network.
-
- :returns: xsd:boolean object
-
- """
- return getattr(self, '_flood_unknown_unicast', None)
- #end flood_unknown_unicast
-
- @flood_unknown_unicast.setter
- def flood_unknown_unicast(self, flood_unknown_unicast):
- """Set flood-unknown-unicast for virtual-network.
-
- :param flood_unknown_unicast: xsd:boolean object
-
- """
- self._flood_unknown_unicast = flood_unknown_unicast
- #end flood_unknown_unicast
-
- def set_flood_unknown_unicast(self, value):
- self.flood_unknown_unicast = value
- #end set_flood_unknown_unicast
-
- def get_flood_unknown_unicast(self):
- return self.flood_unknown_unicast
- #end get_flood_unknown_unicast
-
- @property
- def id_perms(self):
- """Get id-perms for virtual-network.
-
- :returns: IdPermsType object
-
- """
- return getattr(self, '_id_perms', None)
- #end id_perms
-
- @id_perms.setter
- def id_perms(self, id_perms):
- """Set id-perms for virtual-network.
-
- :param id_perms: IdPermsType object
-
- """
- self._id_perms = id_perms
- #end id_perms
-
- def set_id_perms(self, value):
- self.id_perms = value
- #end set_id_perms
-
- def get_id_perms(self):
- return self.id_perms
- #end get_id_perms
-
- @property
- def display_name(self):
- """Get display-name for virtual-network.
-
- :returns: xsd:string object
-
- """
- return getattr(self, '_display_name', None)
- #end display_name
-
- @display_name.setter
- def display_name(self, display_name):
- """Set display-name for virtual-network.
-
- :param display_name: xsd:string object
-
- """
- self._display_name = display_name
- #end display_name
-
- def set_display_name(self, value):
- self.display_name = value
- #end set_display_name
-
- def get_display_name(self):
- return self.display_name
- #end get_display_name
-
- def _serialize_field_to_json(self, serialized, fields_to_serialize, field_name):
- if fields_to_serialize is None: # all fields are serialized
- serialized[field_name] = getattr(self, field_name)
- elif field_name in fields_to_serialize:
- serialized[field_name] = getattr(self, field_name)
- #end _serialize_field_to_json
-
- def serialize_to_json(self, field_names = None):
- serialized = {}
-
- # serialize common fields
- self._serialize_field_to_json(serialized, ['uuid'], 'uuid')
- self._serialize_field_to_json(serialized, field_names, 'fq_name')
- if hasattr(self, 'parent_type'):
- self._serialize_field_to_json(serialized, field_names, 'parent_type')
-
- # serialize property fields
- if hasattr(self, '_virtual_network_properties'):
- self._serialize_field_to_json(serialized, field_names, 'virtual_network_properties')
- if hasattr(self, '_virtual_network_network_id'):
- self._serialize_field_to_json(serialized, field_names, 'virtual_network_network_id')
- if hasattr(self, '_route_target_list'):
- self._serialize_field_to_json(serialized, field_names, 'route_target_list')
- if hasattr(self, '_router_external'):
- self._serialize_field_to_json(serialized, field_names, 'router_external')
- if hasattr(self, '_is_shared'):
- self._serialize_field_to_json(serialized, field_names, 'is_shared')
- if hasattr(self, '_external_ipam'):
- self._serialize_field_to_json(serialized, field_names, 'external_ipam')
- if hasattr(self, '_flood_unknown_unicast'):
- self._serialize_field_to_json(serialized, field_names, 'flood_unknown_unicast')
- if hasattr(self, '_id_perms'):
- self._serialize_field_to_json(serialized, field_names, 'id_perms')
- if hasattr(self, '_display_name'):
- self._serialize_field_to_json(serialized, field_names, 'display_name')
-
- # serialize reference fields
- if hasattr(self, 'qos_forwarding_class_refs'):
- self._serialize_field_to_json(serialized, field_names, 'qos_forwarding_class_refs')
- if hasattr(self, 'network_ipam_refs'):
- self._serialize_field_to_json(serialized, field_names, 'network_ipam_refs')
- if hasattr(self, 'network_policy_refs'):
- self._serialize_field_to_json(serialized, field_names, 'network_policy_refs')
- if hasattr(self, 'route_table_refs'):
- self._serialize_field_to_json(serialized, field_names, 'route_table_refs')
- return serialized
- #end serialize_to_json
-
- def get_access_control_lists(self):
- return getattr(self, 'access_control_lists', None)
- #end get_access_control_lists
-
- def get_floating_ip_pools(self):
- return getattr(self, 'floating_ip_pools', None)
- #end get_floating_ip_pools
-
- def get_routing_instances(self):
- return getattr(self, 'routing_instances', None)
- #end get_routing_instances
-
- def set_qos_forwarding_class(self, ref_obj):
- """Set qos-forwarding-class for virtual-network.
-
- :param ref_obj: QosForwardingClass object
-
- """
- self.qos_forwarding_class_refs = [{'to':ref_obj.get_fq_name()}]
- if ref_obj.uuid:
- self.qos_forwarding_class_refs[0]['uuid'] = ref_obj.uuid
-
- #end set_qos_forwarding_class
-
- def add_qos_forwarding_class(self, ref_obj):
- """Add qos-forwarding-class to virtual-network.
-
- :param ref_obj: QosForwardingClass object
-
- """
- refs = getattr(self, 'qos_forwarding_class_refs', [])
- if not refs:
- self.qos_forwarding_class_refs = []
-
- # if ref already exists, update any attr with it
- for ref in refs:
- if ref['to'] == ref_obj.get_fq_name():
- ref = {'to':ref_obj.get_fq_name()}
- if ref_obj.uuid:
- ref['uuid'] = ref_obj.uuid
- return
-
- # ref didn't exist before
- ref_info = {'to':ref_obj.get_fq_name()}
- if ref_obj.uuid:
- ref_info['uuid'] = ref_obj.uuid
-
- self.qos_forwarding_class_refs.append(ref_info)
- #end add_qos_forwarding_class
-
- def del_qos_forwarding_class(self, ref_obj):
- refs = self.get_qos_forwarding_class_refs()
- if not refs:
- return
-
- for ref in refs:
- if ref['to'] == ref_obj.get_fq_name():
- self.qos_forwarding_class_refs.remove(ref)
- return
- #end del_qos_forwarding_class
-
- def set_qos_forwarding_class_list(self, ref_obj_list):
- """Set qos-forwarding-class list for virtual-network.
-
- :param ref_obj_list: list of QosForwardingClass object
-
- """
- self.qos_forwarding_class_refs = ref_obj_list
- #end set_qos_forwarding_class_list
-
- def get_qos_forwarding_class_refs(self):
- """Return qos-forwarding-class list for virtual-network.
-
- :returns: list of <QosForwardingClass>
-
- """
- return getattr(self, 'qos_forwarding_class_refs', None)
- #end get_qos_forwarding_class_refs
-
- def set_network_ipam(self, ref_obj, ref_data):
- """Set network-ipam for virtual-network.
-
- :param ref_obj: NetworkIpam object
- :param ref_data: VnSubnetsType object
-
- """
- self.network_ipam_refs = [{'to':ref_obj.get_fq_name(), 'attr':ref_data}]
- if ref_obj.uuid:
- self.network_ipam_refs[0]['uuid'] = ref_obj.uuid
-
- #end set_network_ipam
-
- def add_network_ipam(self, ref_obj, ref_data):
- """Add network-ipam to virtual-network.
-
- :param ref_obj: NetworkIpam object
- :param ref_data: VnSubnetsType object
-
- """
- refs = getattr(self, 'network_ipam_refs', [])
- if not refs:
- self.network_ipam_refs = []
-
- # if ref already exists, update any attr with it
- for ref in refs:
- if ref['to'] == ref_obj.get_fq_name():
- ref = {'to':ref_obj.get_fq_name(), 'attr':ref_data}
- if ref_obj.uuid:
- ref['uuid'] = ref_obj.uuid
- return
-
- # ref didn't exist before
- ref_info = {'to':ref_obj.get_fq_name(), 'attr':ref_data}
- if ref_obj.uuid:
- ref_info['uuid'] = ref_obj.uuid
-
- self.network_ipam_refs.append(ref_info)
- #end add_network_ipam
-
- def del_network_ipam(self, ref_obj):
- refs = self.get_network_ipam_refs()
- if not refs:
- return
-
- for ref in refs:
- if ref['to'] == ref_obj.get_fq_name():
- self.network_ipam_refs.remove(ref)
- return
- #end del_network_ipam
-
- def set_network_ipam_list(self, ref_obj_list, ref_data_list):
- """Set network-ipam list for virtual-network.
-
- :param ref_obj_list: list of NetworkIpam object
- :param ref_data_list: list of VnSubnetsType object
-
- """
- self.network_ipam_refs = [{'to':ref_obj_list[i], 'attr':ref_data_list[i]} for i in range(len(ref_obj_list))]
- #end set_network_ipam_list
-
- def get_network_ipam_refs(self):
- """Return network-ipam list for virtual-network.
-
- :returns: list of tuple <NetworkIpam, VnSubnetsType>
-
- """
- return getattr(self, 'network_ipam_refs', None)
- #end get_network_ipam_refs
-
- def set_network_policy(self, ref_obj, ref_data):
- """Set network-policy for virtual-network.
-
- :param ref_obj: NetworkPolicy object
- :param ref_data: VirtualNetworkPolicyType object
-
- """
- self.network_policy_refs = [{'to':ref_obj.get_fq_name(), 'attr':ref_data}]
- if ref_obj.uuid:
- self.network_policy_refs[0]['uuid'] = ref_obj.uuid
-
- #end set_network_policy
-
- def add_network_policy(self, ref_obj, ref_data):
- """Add network-policy to virtual-network.
-
- :param ref_obj: NetworkPolicy object
- :param ref_data: VirtualNetworkPolicyType object
-
- """
- refs = getattr(self, 'network_policy_refs', [])
- if not refs:
- self.network_policy_refs = []
-
- # if ref already exists, update any attr with it
- for ref in refs:
- if ref['to'] == ref_obj.get_fq_name():
- ref = {'to':ref_obj.get_fq_name(), 'attr':ref_data}
- if ref_obj.uuid:
- ref['uuid'] = ref_obj.uuid
- return
-
- # ref didn't exist before
- ref_info = {'to':ref_obj.get_fq_name(), 'attr':ref_data}
- if ref_obj.uuid:
- ref_info['uuid'] = ref_obj.uuid
-
- self.network_policy_refs.append(ref_info)
- #end add_network_policy
-
- def del_network_policy(self, ref_obj):
- refs = self.get_network_policy_refs()
- if not refs:
- return
-
- for ref in refs:
- if ref['to'] == ref_obj.get_fq_name():
- self.network_policy_refs.remove(ref)
- return
- #end del_network_policy
-
- def set_network_policy_list(self, ref_obj_list, ref_data_list):
- """Set network-policy list for virtual-network.
-
- :param ref_obj_list: list of NetworkPolicy object
- :param ref_data_list: list of VirtualNetworkPolicyType object
-
- """
- self.network_policy_refs = [{'to':ref_obj_list[i], 'attr':ref_data_list[i]} for i in range(len(ref_obj_list))]
- #end set_network_policy_list
-
- def get_network_policy_refs(self):
- """Return network-policy list for virtual-network.
-
- :returns: list of tuple <NetworkPolicy, VirtualNetworkPolicyType>
-
- """
- return getattr(self, 'network_policy_refs', None)
- #end get_network_policy_refs
-
- def set_route_table(self, ref_obj):
- """Set route-table for virtual-network.
-
- :param ref_obj: RouteTable object
-
- """
- self.route_table_refs = [{'to':ref_obj.get_fq_name()}]
- if ref_obj.uuid:
- self.route_table_refs[0]['uuid'] = ref_obj.uuid
-
- #end set_route_table
-
- def add_route_table(self, ref_obj):
- """Add route-table to virtual-network.
-
- :param ref_obj: RouteTable object
-
- """
- refs = getattr(self, 'route_table_refs', [])
- if not refs:
- self.route_table_refs = []
-
- # if ref already exists, update any attr with it
- for ref in refs:
- if ref['to'] == ref_obj.get_fq_name():
- ref = {'to':ref_obj.get_fq_name()}
- if ref_obj.uuid:
- ref['uuid'] = ref_obj.uuid
- return
-
- # ref didn't exist before
- ref_info = {'to':ref_obj.get_fq_name()}
- if ref_obj.uuid:
- ref_info['uuid'] = ref_obj.uuid
-
- self.route_table_refs.append(ref_info)
- #end add_route_table
-
- def del_route_table(self, ref_obj):
- refs = self.get_route_table_refs()
- if not refs:
- return
-
- for ref in refs:
- if ref['to'] == ref_obj.get_fq_name():
- self.route_table_refs.remove(ref)
- return
- #end del_route_table
-
- def set_route_table_list(self, ref_obj_list):
- """Set route-table list for virtual-network.
-
- :param ref_obj_list: list of RouteTable object
-
- """
- self.route_table_refs = ref_obj_list
- #end set_route_table_list
-
- def get_route_table_refs(self):
- """Return route-table list for virtual-network.
-
- :returns: list of <RouteTable>
-
- """
- return getattr(self, 'route_table_refs', None)
- #end get_route_table_refs
-
- def get_project_back_refs(self):
- """Return list of all projects using this virtual-network"""
- return getattr(self, 'project_back_refs', None)
- #end get_project_back_refs
-
- def get_virtual_machine_interface_back_refs(self):
- """Return list of all virtual-machine-interfaces using this virtual-network"""
- return getattr(self, 'virtual_machine_interface_back_refs', None)
- #end get_virtual_machine_interface_back_refs
-
- def get_instance_ip_back_refs(self):
- """Return list of all instance-ips using this virtual-network"""
- return getattr(self, 'instance_ip_back_refs', None)
- #end get_instance_ip_back_refs
-
- def get_physical_router_back_refs(self):
- """Return list of all physical-routers using this virtual-network"""
- return getattr(self, 'physical_router_back_refs', None)
- #end get_physical_router_back_refs
-
- def get_logical_router_back_refs(self):
- """Return list of all logical-routers using this virtual-network"""
- return getattr(self, 'logical_router_back_refs', None)
- #end get_logical_router_back_refs
-
- def dump(self):
- """Display virtual-network object in compact form."""
- print '------------ virtual-network ------------'
- print 'Name = ', self.get_fq_name()
- print 'Uuid = ', self.uuid
- if hasattr(self, 'parent_type'): # non config-root children
- print 'Parent Type = ', self.parent_type
- print 'P virtual_network_properties = ', self.get_virtual_network_properties()
- print 'P virtual_network_network_id = ', self.get_virtual_network_network_id()
- print 'P route_target_list = ', self.get_route_target_list()
- print 'P router_external = ', self.get_router_external()
- print 'P is_shared = ', self.get_is_shared()
- print 'P external_ipam = ', self.get_external_ipam()
- print 'P flood_unknown_unicast = ', self.get_flood_unknown_unicast()
- print 'P id_perms = ', self.get_id_perms()
- print 'P display_name = ', self.get_display_name()
- print 'REF qos_forwarding_class = ', self.get_qos_forwarding_class_refs()
- print 'REF network_ipam = ', self.get_network_ipam_refs()
- print 'REF network_policy = ', self.get_network_policy_refs()
- print 'HAS access_control_list = ', self.get_access_control_lists()
- print 'HAS floating_ip_pool = ', self.get_floating_ip_pools()
- print 'HAS routing_instance = ', self.get_routing_instances()
- print 'REF route_table = ', self.get_route_table_refs()
- print 'BCK virtual_machine_interface = ', self.get_virtual_machine_interface_back_refs()
- print 'BCK instance_ip = ', self.get_instance_ip_back_refs()
- print 'BCK physical_router = ', self.get_physical_router_back_refs()
- print 'BCK logical_router = ', self.get_logical_router_back_refs()
- #end dump
-
-#end class VirtualNetwork
-
-class Project(object):
- """
- Represents project configuration representation.
-
- Child of:
- :class:`.Domain` object OR
-
- Properties:
- * quota (:class:`.QuotaType` type)
- * id-perms (:class:`.IdPermsType` type)
- * display-name (xsd:string type)
-
- Children:
- * list of :class:`.SecurityGroup` objects
- * list of :class:`.VirtualNetwork` objects
- * list of :class:`.QosQueue` objects
- * list of :class:`.QosForwardingClass` objects
- * list of :class:`.NetworkIpam` objects
- * list of :class:`.NetworkPolicy` objects
- * list of :class:`.VirtualMachineInterface` objects
- * list of :class:`.ServiceInstance` objects
- * list of :class:`.RouteTable` objects
- * list of :class:`.InterfaceRouteTable` objects
- * list of :class:`.LogicalRouter` objects
- * list of :class:`.LoadbalancerPool` objects
- * list of :class:`.LoadbalancerHealthmonitor` objects
- * list of :class:`.VirtualIp` objects
-
- References to:
- * list of (:class:`.Namespace` object, :class:`.SubnetType` attribute)
- * list of :class:`.FloatingIpPool` objects
-
- Referred by:
- * list of :class:`.FloatingIp` objects
- """
-
- prop_fields = set([u'quota', u'id_perms', u'display_name'])
- ref_fields = set([u'namespace_refs', u'floating_ip_pool_refs'])
- backref_fields = set([u'domain_back_refs', u'floating_ip_back_refs'])
- children_fields = set([u'security_groups', u'virtual_networks', u'qos_queues', u'qos_forwarding_classs', u'network_ipams', u'network_policys', 'virtual_machine_interfaces', u'service_instances', u'route_tables', u'interface_route_tables', u'logical_routers', u'loadbalancer_pools', u'loadbalancer_healthmonitors', u'virtual_ips'])
-
- def __init__(self, name = None, parent_obj = None, quota = None, id_perms = None, display_name = None, *args, **kwargs):
- # type-independent fields
- self._type = 'project'
- if not name:
- name = u'default-project'
- self.name = name
- self._uuid = None
- # Determine parent type and fq_name
- kwargs_parent_type = kwargs.get('parent_type', None)
- kwargs_fq_name = kwargs.get('fq_name', None)
- if parent_obj:
- self.parent_type = parent_obj._type
- # copy parent's fq_name
- self.fq_name = list(parent_obj.fq_name)
- self.fq_name.append(name)
- elif kwargs_parent_type and kwargs_fq_name:
- self.parent_type = kwargs_parent_type
- self.fq_name = kwargs_fq_name
- else: # No parent obj specified
- self.parent_type = 'domain'
- self.fq_name = [u'default-domain']
- self.fq_name.append(name)
-
-
- # property fields
- if quota:
- self._quota = quota
- if id_perms:
- self._id_perms = id_perms
- if display_name:
- self._display_name = display_name
- #end __init__
-
- def get_type(self):
- """Return object type (project)."""
- return self._type
- #end get_type
-
- def get_fq_name(self):
- """Return FQN of project in list form."""
- return self.fq_name
- #end get_fq_name
-
- def get_fq_name_str(self):
- """Return FQN of project as colon delimited string."""
- return ':'.join(self.fq_name)
- #end get_fq_name_str
-
- @property
- def parent_name(self):
- return self.fq_name[:-1][-1]
- #end parent_name
-
- def get_parent_fq_name(self):
- """Return FQN of project's parent in list form."""
- if not hasattr(self, 'parent_type'):
- # child of config-root
- return None
-
- return self.fq_name[:-1]
- #end get_parent_fq_name
-
- def get_parent_fq_name_str(self):
- """Return FQN of project's parent as colon delimted string."""
- if not hasattr(self, 'parent_type'):
- # child of config-root
- return None
-
- return ':'.join(self.fq_name[:-1])
- #end get_parent_fq_name_str
-
- @property
- def uuid(self):
- return getattr(self, '_uuid', None)
- #end uuid
-
- @uuid.setter
- def uuid(self, uuid_val):
- self._uuid = uuid_val
- #end uuid
-
- def set_uuid(self, uuid_val):
- self.uuid = uuid_val
- #end set_uuid
-
- def get_uuid(self):
- return self.uuid
- #end get_uuid
-
- @property
- def quota(self):
- """Get quota for project.
-
- :returns: QuotaType object
-
- """
- return getattr(self, '_quota', None)
- #end quota
-
- @quota.setter
- def quota(self, quota):
- """Set quota for project.
-
- :param quota: QuotaType object
-
- """
- self._quota = quota
- #end quota
-
- def set_quota(self, value):
- self.quota = value
- #end set_quota
-
- def get_quota(self):
- return self.quota
- #end get_quota
-
- @property
- def id_perms(self):
- """Get id-perms for project.
-
- :returns: IdPermsType object
-
- """
- return getattr(self, '_id_perms', None)
- #end id_perms
-
- @id_perms.setter
- def id_perms(self, id_perms):
- """Set id-perms for project.
-
- :param id_perms: IdPermsType object
-
- """
- self._id_perms = id_perms
- #end id_perms
-
- def set_id_perms(self, value):
- self.id_perms = value
- #end set_id_perms
-
- def get_id_perms(self):
- return self.id_perms
- #end get_id_perms
-
- @property
- def display_name(self):
- """Get display-name for project.
-
- :returns: xsd:string object
-
- """
- return getattr(self, '_display_name', None)
- #end display_name
-
- @display_name.setter
- def display_name(self, display_name):
- """Set display-name for project.
-
- :param display_name: xsd:string object
-
- """
- self._display_name = display_name
- #end display_name
-
- def set_display_name(self, value):
- self.display_name = value
- #end set_display_name
-
- def get_display_name(self):
- return self.display_name
- #end get_display_name
-
- def _serialize_field_to_json(self, serialized, fields_to_serialize, field_name):
- if fields_to_serialize is None: # all fields are serialized
- serialized[field_name] = getattr(self, field_name)
- elif field_name in fields_to_serialize:
- serialized[field_name] = getattr(self, field_name)
- #end _serialize_field_to_json
-
- def serialize_to_json(self, field_names = None):
- serialized = {}
-
- # serialize common fields
- self._serialize_field_to_json(serialized, ['uuid'], 'uuid')
- self._serialize_field_to_json(serialized, field_names, 'fq_name')
- if hasattr(self, 'parent_type'):
- self._serialize_field_to_json(serialized, field_names, 'parent_type')
-
- # serialize property fields
- if hasattr(self, '_quota'):
- self._serialize_field_to_json(serialized, field_names, 'quota')
- if hasattr(self, '_id_perms'):
- self._serialize_field_to_json(serialized, field_names, 'id_perms')
- if hasattr(self, '_display_name'):
- self._serialize_field_to_json(serialized, field_names, 'display_name')
-
- # serialize reference fields
- if hasattr(self, 'namespace_refs'):
- self._serialize_field_to_json(serialized, field_names, 'namespace_refs')
- if hasattr(self, 'floating_ip_pool_refs'):
- self._serialize_field_to_json(serialized, field_names, 'floating_ip_pool_refs')
- return serialized
- #end serialize_to_json
-
- def get_security_groups(self):
- return getattr(self, 'security_groups', None)
- #end get_security_groups
-
- def get_virtual_networks(self):
- return getattr(self, 'virtual_networks', None)
- #end get_virtual_networks
-
- def get_qos_queues(self):
- return getattr(self, 'qos_queues', None)
- #end get_qos_queues
-
- def get_qos_forwarding_classs(self):
- return getattr(self, 'qos_forwarding_classs', None)
- #end get_qos_forwarding_classs
-
- def get_network_ipams(self):
- return getattr(self, 'network_ipams', None)
- #end get_network_ipams
-
- def get_network_policys(self):
- return getattr(self, 'network_policys', None)
- #end get_network_policys
-
- def get_virtual_machine_interfaces(self):
- return getattr(self, 'virtual_machine_interfaces', None)
- #end get_virtual_machine_interfaces
-
- def get_service_instances(self):
- return getattr(self, 'service_instances', None)
- #end get_service_instances
-
- def get_route_tables(self):
- return getattr(self, 'route_tables', None)
- #end get_route_tables
-
- def get_interface_route_tables(self):
- return getattr(self, 'interface_route_tables', None)
- #end get_interface_route_tables
-
- def get_logical_routers(self):
- return getattr(self, 'logical_routers', None)
- #end get_logical_routers
-
- def get_loadbalancer_pools(self):
- return getattr(self, 'loadbalancer_pools', None)
- #end get_loadbalancer_pools
-
- def get_loadbalancer_healthmonitors(self):
- return getattr(self, 'loadbalancer_healthmonitors', None)
- #end get_loadbalancer_healthmonitors
-
- def get_virtual_ips(self):
- return getattr(self, 'virtual_ips', None)
- #end get_virtual_ips
-
- def set_namespace(self, ref_obj, ref_data):
- """Set namespace for project.
-
- :param ref_obj: Namespace object
- :param ref_data: SubnetType object
-
- """
- self.namespace_refs = [{'to':ref_obj.get_fq_name(), 'attr':ref_data}]
- if ref_obj.uuid:
- self.namespace_refs[0]['uuid'] = ref_obj.uuid
-
- #end set_namespace
-
- def add_namespace(self, ref_obj, ref_data):
- """Add namespace to project.
-
- :param ref_obj: Namespace object
- :param ref_data: SubnetType object
-
- """
- refs = getattr(self, 'namespace_refs', [])
- if not refs:
- self.namespace_refs = []
-
- # if ref already exists, update any attr with it
- for ref in refs:
- if ref['to'] == ref_obj.get_fq_name():
- ref = {'to':ref_obj.get_fq_name(), 'attr':ref_data}
- if ref_obj.uuid:
- ref['uuid'] = ref_obj.uuid
- return
-
- # ref didn't exist before
- ref_info = {'to':ref_obj.get_fq_name(), 'attr':ref_data}
- if ref_obj.uuid:
- ref_info['uuid'] = ref_obj.uuid
-
- self.namespace_refs.append(ref_info)
- #end add_namespace
-
- def del_namespace(self, ref_obj):
- refs = self.get_namespace_refs()
- if not refs:
- return
-
- for ref in refs:
- if ref['to'] == ref_obj.get_fq_name():
- self.namespace_refs.remove(ref)
- return
- #end del_namespace
-
- def set_namespace_list(self, ref_obj_list, ref_data_list):
- """Set namespace list for project.
-
- :param ref_obj_list: list of Namespace object
- :param ref_data_list: list of SubnetType object
-
- """
- self.namespace_refs = [{'to':ref_obj_list[i], 'attr':ref_data_list[i]} for i in range(len(ref_obj_list))]
- #end set_namespace_list
-
- def get_namespace_refs(self):
- """Return namespace list for project.
-
- :returns: list of tuple <Namespace, SubnetType>
-
- """
- return getattr(self, 'namespace_refs', None)
- #end get_namespace_refs
-
- def set_floating_ip_pool(self, ref_obj):
- """Set floating-ip-pool for project.
-
- :param ref_obj: FloatingIpPool object
-
- """
- self.floating_ip_pool_refs = [{'to':ref_obj.get_fq_name()}]
- if ref_obj.uuid:
- self.floating_ip_pool_refs[0]['uuid'] = ref_obj.uuid
-
- #end set_floating_ip_pool
-
- def add_floating_ip_pool(self, ref_obj):
- """Add floating-ip-pool to project.
-
- :param ref_obj: FloatingIpPool object
-
- """
- refs = getattr(self, 'floating_ip_pool_refs', [])
- if not refs:
- self.floating_ip_pool_refs = []
-
- # if ref already exists, update any attr with it
- for ref in refs:
- if ref['to'] == ref_obj.get_fq_name():
- ref = {'to':ref_obj.get_fq_name()}
- if ref_obj.uuid:
- ref['uuid'] = ref_obj.uuid
- return
-
- # ref didn't exist before
- ref_info = {'to':ref_obj.get_fq_name()}
- if ref_obj.uuid:
- ref_info['uuid'] = ref_obj.uuid
-
- self.floating_ip_pool_refs.append(ref_info)
- #end add_floating_ip_pool
-
- def del_floating_ip_pool(self, ref_obj):
- refs = self.get_floating_ip_pool_refs()
- if not refs:
- return
-
- for ref in refs:
- if ref['to'] == ref_obj.get_fq_name():
- self.floating_ip_pool_refs.remove(ref)
- return
- #end del_floating_ip_pool
-
- def set_floating_ip_pool_list(self, ref_obj_list):
- """Set floating-ip-pool list for project.
-
- :param ref_obj_list: list of FloatingIpPool object
-
- """
- self.floating_ip_pool_refs = ref_obj_list
- #end set_floating_ip_pool_list
-
- def get_floating_ip_pool_refs(self):
- """Return floating-ip-pool list for project.
-
- :returns: list of <FloatingIpPool>
-
- """
- return getattr(self, 'floating_ip_pool_refs', None)
- #end get_floating_ip_pool_refs
-
- def get_domain_back_refs(self):
- """Return list of all domains using this project"""
- return getattr(self, 'domain_back_refs', None)
- #end get_domain_back_refs
-
- def get_floating_ip_back_refs(self):
- """Return list of all floating-ips using this project"""
- return getattr(self, 'floating_ip_back_refs', None)
- #end get_floating_ip_back_refs
-
- def dump(self):
- """Display project object in compact form."""
- print '------------ project ------------'
- print 'Name = ', self.get_fq_name()
- print 'Uuid = ', self.uuid
- if hasattr(self, 'parent_type'): # non config-root children
- print 'Parent Type = ', self.parent_type
- print 'P quota = ', self.get_quota()
- print 'P id_perms = ', self.get_id_perms()
- print 'P display_name = ', self.get_display_name()
- print 'REF namespace = ', self.get_namespace_refs()
- print 'HAS security_group = ', self.get_security_groups()
- print 'HAS virtual_network = ', self.get_virtual_networks()
- print 'HAS qos_queue = ', self.get_qos_queues()
- print 'HAS qos_forwarding_class = ', self.get_qos_forwarding_classs()
- print 'HAS network_ipam = ', self.get_network_ipams()
- print 'HAS network_policy = ', self.get_network_policys()
- print 'HAS virtual_machine_interface = ', self.get_virtual_machine_interfaces()
- print 'REF floating_ip_pool = ', self.get_floating_ip_pool_refs()
- print 'HAS service_instance = ', self.get_service_instances()
- print 'HAS route_table = ', self.get_route_tables()
- print 'HAS interface_route_table = ', self.get_interface_route_tables()
- print 'HAS logical_router = ', self.get_logical_routers()
- print 'HAS loadbalancer_pool = ', self.get_loadbalancer_pools()
- print 'HAS loadbalancer_healthmonitor = ', self.get_loadbalancer_healthmonitors()
- print 'HAS virtual_ip = ', self.get_virtual_ips()
- print 'BCK floating_ip = ', self.get_floating_ip_back_refs()
- #end dump
-
-#end class Project
-
-class QosForwardingClass(object):
- """
- Represents qos-forwarding-class configuration representation.
-
- Child of:
- :class:`.Project` object OR
-
- Properties:
- * dscp (xsd:integer type)
- * trusted (xsd:boolean type)
- * id-perms (:class:`.IdPermsType` type)
- * display-name (xsd:string type)
-
- Children:
-
- References to:
- * list of :class:`.QosQueue` objects
-
- Referred by:
- * list of :class:`.VirtualNetwork` objects
- * list of :class:`.VirtualMachineInterface` objects
- """
-
- prop_fields = set([u'dscp', u'trusted', u'id_perms', u'display_name'])
- ref_fields = set([u'qos_queue_refs'])
- backref_fields = set([u'project_back_refs', u'virtual_network_back_refs', 'virtual_machine_interface_back_refs'])
- children_fields = set([])
-
- def __init__(self, name = None, parent_obj = None, dscp = None, trusted = None, id_perms = None, display_name = None, *args, **kwargs):
- # type-independent fields
- self._type = 'qos-forwarding-class'
- if not name:
- name = u'default-qos-forwarding-class'
- self.name = name
- self._uuid = None
- # Determine parent type and fq_name
- kwargs_parent_type = kwargs.get('parent_type', None)
- kwargs_fq_name = kwargs.get('fq_name', None)
- if parent_obj:
- self.parent_type = parent_obj._type
- # copy parent's fq_name
- self.fq_name = list(parent_obj.fq_name)
- self.fq_name.append(name)
- elif kwargs_parent_type and kwargs_fq_name:
- self.parent_type = kwargs_parent_type
- self.fq_name = kwargs_fq_name
- else: # No parent obj specified
- self.parent_type = 'project'
- self.fq_name = [u'default-domain', u'default-project']
- self.fq_name.append(name)
-
-
- # property fields
- if dscp:
- self._dscp = dscp
- if trusted:
- self._trusted = trusted
- if id_perms:
- self._id_perms = id_perms
- if display_name:
- self._display_name = display_name
- #end __init__
-
- def get_type(self):
- """Return object type (qos-forwarding-class)."""
- return self._type
- #end get_type
-
- def get_fq_name(self):
- """Return FQN of qos-forwarding-class in list form."""
- return self.fq_name
- #end get_fq_name
-
- def get_fq_name_str(self):
- """Return FQN of qos-forwarding-class as colon delimited string."""
- return ':'.join(self.fq_name)
- #end get_fq_name_str
-
- @property
- def parent_name(self):
- return self.fq_name[:-1][-1]
- #end parent_name
-
- def get_parent_fq_name(self):
- """Return FQN of qos-forwarding-class's parent in list form."""
- if not hasattr(self, 'parent_type'):
- # child of config-root
- return None
-
- return self.fq_name[:-1]
- #end get_parent_fq_name
-
- def get_parent_fq_name_str(self):
- """Return FQN of qos-forwarding-class's parent as colon delimted string."""
- if not hasattr(self, 'parent_type'):
- # child of config-root
- return None
-
- return ':'.join(self.fq_name[:-1])
- #end get_parent_fq_name_str
-
- @property
- def uuid(self):
- return getattr(self, '_uuid', None)
- #end uuid
-
- @uuid.setter
- def uuid(self, uuid_val):
- self._uuid = uuid_val
- #end uuid
-
- def set_uuid(self, uuid_val):
- self.uuid = uuid_val
- #end set_uuid
-
- def get_uuid(self):
- return self.uuid
- #end get_uuid
-
- @property
- def dscp(self):
- """Get dscp for qos-forwarding-class.
-
- :returns: xsd:integer object
-
- """
- return getattr(self, '_dscp', None)
- #end dscp
-
- @dscp.setter
- def dscp(self, dscp):
- """Set dscp for qos-forwarding-class.
-
- :param dscp: xsd:integer object
-
- """
- self._dscp = dscp
- #end dscp
-
- def set_dscp(self, value):
- self.dscp = value
- #end set_dscp
-
- def get_dscp(self):
- return self.dscp
- #end get_dscp
-
- @property
- def trusted(self):
- """Get trusted for qos-forwarding-class.
-
- :returns: xsd:boolean object
-
- """
- return getattr(self, '_trusted', None)
- #end trusted
-
- @trusted.setter
- def trusted(self, trusted):
- """Set trusted for qos-forwarding-class.
-
- :param trusted: xsd:boolean object
-
- """
- self._trusted = trusted
- #end trusted
-
- def set_trusted(self, value):
- self.trusted = value
- #end set_trusted
-
- def get_trusted(self):
- return self.trusted
- #end get_trusted
-
- @property
- def id_perms(self):
- """Get id-perms for qos-forwarding-class.
-
- :returns: IdPermsType object
-
- """
- return getattr(self, '_id_perms', None)
- #end id_perms
-
- @id_perms.setter
- def id_perms(self, id_perms):
- """Set id-perms for qos-forwarding-class.
-
- :param id_perms: IdPermsType object
-
- """
- self._id_perms = id_perms
- #end id_perms
-
- def set_id_perms(self, value):
- self.id_perms = value
- #end set_id_perms
-
- def get_id_perms(self):
- return self.id_perms
- #end get_id_perms
-
- @property
- def display_name(self):
- """Get display-name for qos-forwarding-class.
-
- :returns: xsd:string object
-
- """
- return getattr(self, '_display_name', None)
- #end display_name
-
- @display_name.setter
- def display_name(self, display_name):
- """Set display-name for qos-forwarding-class.
-
- :param display_name: xsd:string object
-
- """
- self._display_name = display_name
- #end display_name
-
- def set_display_name(self, value):
- self.display_name = value
- #end set_display_name
-
- def get_display_name(self):
- return self.display_name
- #end get_display_name
-
- def _serialize_field_to_json(self, serialized, fields_to_serialize, field_name):
- if fields_to_serialize is None: # all fields are serialized
- serialized[field_name] = getattr(self, field_name)
- elif field_name in fields_to_serialize:
- serialized[field_name] = getattr(self, field_name)
- #end _serialize_field_to_json
-
- def serialize_to_json(self, field_names = None):
- serialized = {}
-
- # serialize common fields
- self._serialize_field_to_json(serialized, ['uuid'], 'uuid')
- self._serialize_field_to_json(serialized, field_names, 'fq_name')
- if hasattr(self, 'parent_type'):
- self._serialize_field_to_json(serialized, field_names, 'parent_type')
-
- # serialize property fields
- if hasattr(self, '_dscp'):
- self._serialize_field_to_json(serialized, field_names, 'dscp')
- if hasattr(self, '_trusted'):
- self._serialize_field_to_json(serialized, field_names, 'trusted')
- if hasattr(self, '_id_perms'):
- self._serialize_field_to_json(serialized, field_names, 'id_perms')
- if hasattr(self, '_display_name'):
- self._serialize_field_to_json(serialized, field_names, 'display_name')
-
- # serialize reference fields
- if hasattr(self, 'qos_queue_refs'):
- self._serialize_field_to_json(serialized, field_names, 'qos_queue_refs')
- return serialized
- #end serialize_to_json
-
- def set_qos_queue(self, ref_obj):
- """Set qos-queue for qos-forwarding-class.
-
- :param ref_obj: QosQueue object
-
- """
- self.qos_queue_refs = [{'to':ref_obj.get_fq_name()}]
- if ref_obj.uuid:
- self.qos_queue_refs[0]['uuid'] = ref_obj.uuid
-
- #end set_qos_queue
-
- def add_qos_queue(self, ref_obj):
- """Add qos-queue to qos-forwarding-class.
-
- :param ref_obj: QosQueue object
-
- """
- refs = getattr(self, 'qos_queue_refs', [])
- if not refs:
- self.qos_queue_refs = []
-
- # if ref already exists, update any attr with it
- for ref in refs:
- if ref['to'] == ref_obj.get_fq_name():
- ref = {'to':ref_obj.get_fq_name()}
- if ref_obj.uuid:
- ref['uuid'] = ref_obj.uuid
- return
-
- # ref didn't exist before
- ref_info = {'to':ref_obj.get_fq_name()}
- if ref_obj.uuid:
- ref_info['uuid'] = ref_obj.uuid
-
- self.qos_queue_refs.append(ref_info)
- #end add_qos_queue
-
- def del_qos_queue(self, ref_obj):
- refs = self.get_qos_queue_refs()
- if not refs:
- return
-
- for ref in refs:
- if ref['to'] == ref_obj.get_fq_name():
- self.qos_queue_refs.remove(ref)
- return
- #end del_qos_queue
-
- def set_qos_queue_list(self, ref_obj_list):
- """Set qos-queue list for qos-forwarding-class.
-
- :param ref_obj_list: list of QosQueue object
-
- """
- self.qos_queue_refs = ref_obj_list
- #end set_qos_queue_list
-
- def get_qos_queue_refs(self):
- """Return qos-queue list for qos-forwarding-class.
-
- :returns: list of <QosQueue>
-
- """
- return getattr(self, 'qos_queue_refs', None)
- #end get_qos_queue_refs
-
- def get_project_back_refs(self):
- """Return list of all projects using this qos-forwarding-class"""
- return getattr(self, 'project_back_refs', None)
- #end get_project_back_refs
-
- def get_virtual_network_back_refs(self):
- """Return list of all virtual-networks using this qos-forwarding-class"""
- return getattr(self, 'virtual_network_back_refs', None)
- #end get_virtual_network_back_refs
-
- def get_virtual_machine_interface_back_refs(self):
- """Return list of all virtual-machine-interfaces using this qos-forwarding-class"""
- return getattr(self, 'virtual_machine_interface_back_refs', None)
- #end get_virtual_machine_interface_back_refs
-
- def dump(self):
- """Display qos-forwarding-class object in compact form."""
- print '------------ qos-forwarding-class ------------'
- print 'Name = ', self.get_fq_name()
- print 'Uuid = ', self.uuid
- if hasattr(self, 'parent_type'): # non config-root children
- print 'Parent Type = ', self.parent_type
- print 'P dscp = ', self.get_dscp()
- print 'P trusted = ', self.get_trusted()
- print 'P id_perms = ', self.get_id_perms()
- print 'P display_name = ', self.get_display_name()
- print 'REF qos_queue = ', self.get_qos_queue_refs()
- print 'BCK virtual_network = ', self.get_virtual_network_back_refs()
- print 'BCK virtual_machine_interface = ', self.get_virtual_machine_interface_back_refs()
- #end dump
-
-#end class QosForwardingClass
-
-class DatabaseNode(object):
- """
- Represents database-node configuration representation.
-
- Child of:
- :class:`.GlobalSystemConfig` object OR
-
- Properties:
- * database-node-ip-address (IpAddressType type)
- * id-perms (:class:`.IdPermsType` type)
- * display-name (xsd:string type)
-
- Children:
-
- References to:
-
- Referred by:
- """
-
- prop_fields = set([u'database_node_ip_address', u'id_perms', u'display_name'])
- ref_fields = set([])
- backref_fields = set([u'global_system_config_back_refs'])
- children_fields = set([])
-
- def __init__(self, name = None, parent_obj = None, database_node_ip_address = None, id_perms = None, display_name = None, *args, **kwargs):
- # type-independent fields
- self._type = 'database-node'
- if not name:
- name = u'default-database-node'
- self.name = name
- self._uuid = None
- # Determine parent type and fq_name
- kwargs_parent_type = kwargs.get('parent_type', None)
- kwargs_fq_name = kwargs.get('fq_name', None)
- if parent_obj:
- self.parent_type = parent_obj._type
- # copy parent's fq_name
- self.fq_name = list(parent_obj.fq_name)
- self.fq_name.append(name)
- elif kwargs_parent_type and kwargs_fq_name:
- self.parent_type = kwargs_parent_type
- self.fq_name = kwargs_fq_name
- else: # No parent obj specified
- self.parent_type = 'global-system-config'
- self.fq_name = [u'default-global-system-config']
- self.fq_name.append(name)
-
-
- # property fields
- if database_node_ip_address:
- self._database_node_ip_address = database_node_ip_address
- if id_perms:
- self._id_perms = id_perms
- if display_name:
- self._display_name = display_name
- #end __init__
-
- def get_type(self):
- """Return object type (database-node)."""
- return self._type
- #end get_type
-
- def get_fq_name(self):
- """Return FQN of database-node in list form."""
- return self.fq_name
- #end get_fq_name
-
- def get_fq_name_str(self):
- """Return FQN of database-node as colon delimited string."""
- return ':'.join(self.fq_name)
- #end get_fq_name_str
-
- @property
- def parent_name(self):
- return self.fq_name[:-1][-1]
- #end parent_name
-
- def get_parent_fq_name(self):
- """Return FQN of database-node's parent in list form."""
- if not hasattr(self, 'parent_type'):
- # child of config-root
- return None
-
- return self.fq_name[:-1]
- #end get_parent_fq_name
-
- def get_parent_fq_name_str(self):
- """Return FQN of database-node's parent as colon delimted string."""
- if not hasattr(self, 'parent_type'):
- # child of config-root
- return None
-
- return ':'.join(self.fq_name[:-1])
- #end get_parent_fq_name_str
-
- @property
- def uuid(self):
- return getattr(self, '_uuid', None)
- #end uuid
-
- @uuid.setter
- def uuid(self, uuid_val):
- self._uuid = uuid_val
- #end uuid
-
- def set_uuid(self, uuid_val):
- self.uuid = uuid_val
- #end set_uuid
-
- def get_uuid(self):
- return self.uuid
- #end get_uuid
-
- @property
- def database_node_ip_address(self):
- """Get database-node-ip-address for database-node.
-
- :returns: IpAddressType object
-
- """
- return getattr(self, '_database_node_ip_address', None)
- #end database_node_ip_address
-
- @database_node_ip_address.setter
- def database_node_ip_address(self, database_node_ip_address):
- """Set database-node-ip-address for database-node.
-
- :param database_node_ip_address: IpAddressType object
-
- """
- self._database_node_ip_address = database_node_ip_address
- #end database_node_ip_address
-
- def set_database_node_ip_address(self, value):
- self.database_node_ip_address = value
- #end set_database_node_ip_address
-
- def get_database_node_ip_address(self):
- return self.database_node_ip_address
- #end get_database_node_ip_address
-
- @property
- def id_perms(self):
- """Get id-perms for database-node.
-
- :returns: IdPermsType object
-
- """
- return getattr(self, '_id_perms', None)
- #end id_perms
-
- @id_perms.setter
- def id_perms(self, id_perms):
- """Set id-perms for database-node.
-
- :param id_perms: IdPermsType object
-
- """
- self._id_perms = id_perms
- #end id_perms
-
- def set_id_perms(self, value):
- self.id_perms = value
- #end set_id_perms
-
- def get_id_perms(self):
- return self.id_perms
- #end get_id_perms
-
- @property
- def display_name(self):
- """Get display-name for database-node.
-
- :returns: xsd:string object
-
- """
- return getattr(self, '_display_name', None)
- #end display_name
-
- @display_name.setter
- def display_name(self, display_name):
- """Set display-name for database-node.
-
- :param display_name: xsd:string object
-
- """
- self._display_name = display_name
- #end display_name
-
- def set_display_name(self, value):
- self.display_name = value
- #end set_display_name
-
- def get_display_name(self):
- return self.display_name
- #end get_display_name
-
- def _serialize_field_to_json(self, serialized, fields_to_serialize, field_name):
- if fields_to_serialize is None: # all fields are serialized
- serialized[field_name] = getattr(self, field_name)
- elif field_name in fields_to_serialize:
- serialized[field_name] = getattr(self, field_name)
- #end _serialize_field_to_json
-
- def serialize_to_json(self, field_names = None):
- serialized = {}
-
- # serialize common fields
- self._serialize_field_to_json(serialized, ['uuid'], 'uuid')
- self._serialize_field_to_json(serialized, field_names, 'fq_name')
- if hasattr(self, 'parent_type'):
- self._serialize_field_to_json(serialized, field_names, 'parent_type')
-
- # serialize property fields
- if hasattr(self, '_database_node_ip_address'):
- self._serialize_field_to_json(serialized, field_names, 'database_node_ip_address')
- if hasattr(self, '_id_perms'):
- self._serialize_field_to_json(serialized, field_names, 'id_perms')
- if hasattr(self, '_display_name'):
- self._serialize_field_to_json(serialized, field_names, 'display_name')
-
- # serialize reference fields
- return serialized
- #end serialize_to_json
-
- def get_global_system_config_back_refs(self):
- """Return list of all global-system-configs using this database-node"""
- return getattr(self, 'global_system_config_back_refs', None)
- #end get_global_system_config_back_refs
-
- def dump(self):
- """Display database-node object in compact form."""
- print '------------ database-node ------------'
- print 'Name = ', self.get_fq_name()
- print 'Uuid = ', self.uuid
- if hasattr(self, 'parent_type'): # non config-root children
- print 'Parent Type = ', self.parent_type
- print 'P database_node_ip_address = ', self.get_database_node_ip_address()
- print 'P id_perms = ', self.get_id_perms()
- print 'P display_name = ', self.get_display_name()
- #end dump
-
-#end class DatabaseNode
-
-class RoutingInstance(object):
- """
- Represents routing-instance configuration representation.
-
- Child of:
- :class:`.VirtualNetwork` object OR
-
- Properties:
- * service-chain-information (:class:`.ServiceChainInfo` type)
- * routing-instance-is-default (xsd:boolean type)
- * static-route-entries (:class:`.StaticRouteEntriesType` type)
- * default-ce-protocol (:class:`.DefaultProtocolType` type)
- * id-perms (:class:`.IdPermsType` type)
- * display-name (xsd:string type)
-
- Children:
- * list of :class:`.BgpRouter` objects
-
- References to:
- * list of (:class:`.RoutingInstance` object, :class:`.ConnectionType` attribute)
- * list of (:class:`.RouteTarget` object, :class:`.InstanceTargetType` attribute)
-
- Referred by:
- * list of :class:`.VirtualMachineInterface` objects
- * list of :class:`.RoutingInstance` objects
- """
-
- prop_fields = set([u'service_chain_information', u'routing_instance_is_default', u'static_route_entries', u'default_ce_protocol', u'id_perms', u'display_name'])
- ref_fields = set(['routing_instance_refs', 'route_target_refs'])
- backref_fields = set(['virtual_machine_interface_back_refs', u'virtual_network_back_refs', 'routing_instance_back_refs', 'customer_attachment_back_refs'])
- children_fields = set(['bgp_routers'])
-
- def __init__(self, name = None, parent_obj = None, service_chain_information = None, routing_instance_is_default = None, static_route_entries = None, default_ce_protocol = None, id_perms = None, display_name = None, *args, **kwargs):
- # type-independent fields
- self._type = 'routing-instance'
- if not name:
- name = u'default-routing-instance'
- self.name = name
- self._uuid = None
- # Determine parent type and fq_name
- kwargs_parent_type = kwargs.get('parent_type', None)
- kwargs_fq_name = kwargs.get('fq_name', None)
- if parent_obj:
- self.parent_type = parent_obj._type
- # copy parent's fq_name
- self.fq_name = list(parent_obj.fq_name)
- self.fq_name.append(name)
- elif kwargs_parent_type and kwargs_fq_name:
- self.parent_type = kwargs_parent_type
- self.fq_name = kwargs_fq_name
- else: # No parent obj specified
- self.parent_type = 'virtual-network'
- self.fq_name = [u'default-domain', u'default-project', u'default-virtual-network']
- self.fq_name.append(name)
-
-
- # property fields
- if service_chain_information:
- self._service_chain_information = service_chain_information
- if routing_instance_is_default:
- self._routing_instance_is_default = routing_instance_is_default
- if static_route_entries:
- self._static_route_entries = static_route_entries
- if default_ce_protocol:
- self._default_ce_protocol = default_ce_protocol
- if id_perms:
- self._id_perms = id_perms
- if display_name:
- self._display_name = display_name
- #end __init__
-
- def get_type(self):
- """Return object type (routing-instance)."""
- return self._type
- #end get_type
-
- def get_fq_name(self):
- """Return FQN of routing-instance in list form."""
- return self.fq_name
- #end get_fq_name
-
- def get_fq_name_str(self):
- """Return FQN of routing-instance as colon delimited string."""
- return ':'.join(self.fq_name)
- #end get_fq_name_str
-
- @property
- def parent_name(self):
- return self.fq_name[:-1][-1]
- #end parent_name
-
- def get_parent_fq_name(self):
- """Return FQN of routing-instance's parent in list form."""
- if not hasattr(self, 'parent_type'):
- # child of config-root
- return None
-
- return self.fq_name[:-1]
- #end get_parent_fq_name
-
- def get_parent_fq_name_str(self):
- """Return FQN of routing-instance's parent as colon delimted string."""
- if not hasattr(self, 'parent_type'):
- # child of config-root
- return None
-
- return ':'.join(self.fq_name[:-1])
- #end get_parent_fq_name_str
-
- @property
- def uuid(self):
- return getattr(self, '_uuid', None)
- #end uuid
-
- @uuid.setter
- def uuid(self, uuid_val):
- self._uuid = uuid_val
- #end uuid
-
- def set_uuid(self, uuid_val):
- self.uuid = uuid_val
- #end set_uuid
-
- def get_uuid(self):
- return self.uuid
- #end get_uuid
-
- @property
- def service_chain_information(self):
- """Get service-chain-information for routing-instance.
-
- :returns: ServiceChainInfo object
-
- """
- return getattr(self, '_service_chain_information', None)
- #end service_chain_information
-
- @service_chain_information.setter
- def service_chain_information(self, service_chain_information):
- """Set service-chain-information for routing-instance.
-
- :param service_chain_information: ServiceChainInfo object
-
- """
- self._service_chain_information = service_chain_information
- #end service_chain_information
-
- def set_service_chain_information(self, value):
- self.service_chain_information = value
- #end set_service_chain_information
-
- def get_service_chain_information(self):
- return self.service_chain_information
- #end get_service_chain_information
-
- @property
- def routing_instance_is_default(self):
- """Get routing-instance-is-default for routing-instance.
-
- :returns: xsd:boolean object
-
- """
- return getattr(self, '_routing_instance_is_default', None)
- #end routing_instance_is_default
-
- @routing_instance_is_default.setter
- def routing_instance_is_default(self, routing_instance_is_default):
- """Set routing-instance-is-default for routing-instance.
-
- :param routing_instance_is_default: xsd:boolean object
-
- """
- self._routing_instance_is_default = routing_instance_is_default
- #end routing_instance_is_default
-
- def set_routing_instance_is_default(self, value):
- self.routing_instance_is_default = value
- #end set_routing_instance_is_default
-
- def get_routing_instance_is_default(self):
- return self.routing_instance_is_default
- #end get_routing_instance_is_default
-
- @property
- def static_route_entries(self):
- """Get static-route-entries for routing-instance.
-
- :returns: StaticRouteEntriesType object
-
- """
- return getattr(self, '_static_route_entries', None)
- #end static_route_entries
-
- @static_route_entries.setter
- def static_route_entries(self, static_route_entries):
- """Set static-route-entries for routing-instance.
-
- :param static_route_entries: StaticRouteEntriesType object
-
- """
- self._static_route_entries = static_route_entries
- #end static_route_entries
-
- def set_static_route_entries(self, value):
- self.static_route_entries = value
- #end set_static_route_entries
-
- def get_static_route_entries(self):
- return self.static_route_entries
- #end get_static_route_entries
-
- @property
- def default_ce_protocol(self):
- """Get default-ce-protocol for routing-instance.
-
- :returns: DefaultProtocolType object
-
- """
- return getattr(self, '_default_ce_protocol', None)
- #end default_ce_protocol
-
- @default_ce_protocol.setter
- def default_ce_protocol(self, default_ce_protocol):
- """Set default-ce-protocol for routing-instance.
-
- :param default_ce_protocol: DefaultProtocolType object
-
- """
- self._default_ce_protocol = default_ce_protocol
- #end default_ce_protocol
-
- def set_default_ce_protocol(self, value):
- self.default_ce_protocol = value
- #end set_default_ce_protocol
-
- def get_default_ce_protocol(self):
- return self.default_ce_protocol
- #end get_default_ce_protocol
-
- @property
- def id_perms(self):
- """Get id-perms for routing-instance.
-
- :returns: IdPermsType object
-
- """
- return getattr(self, '_id_perms', None)
- #end id_perms
-
- @id_perms.setter
- def id_perms(self, id_perms):
- """Set id-perms for routing-instance.
-
- :param id_perms: IdPermsType object
-
- """
- self._id_perms = id_perms
- #end id_perms
-
- def set_id_perms(self, value):
- self.id_perms = value
- #end set_id_perms
-
- def get_id_perms(self):
- return self.id_perms
- #end get_id_perms
-
- @property
- def display_name(self):
- """Get display-name for routing-instance.
-
- :returns: xsd:string object
-
- """
- return getattr(self, '_display_name', None)
- #end display_name
-
- @display_name.setter
- def display_name(self, display_name):
- """Set display-name for routing-instance.
-
- :param display_name: xsd:string object
-
- """
- self._display_name = display_name
- #end display_name
-
- def set_display_name(self, value):
- self.display_name = value
- #end set_display_name
-
- def get_display_name(self):
- return self.display_name
- #end get_display_name
-
- def _serialize_field_to_json(self, serialized, fields_to_serialize, field_name):
- if fields_to_serialize is None: # all fields are serialized
- serialized[field_name] = getattr(self, field_name)
- elif field_name in fields_to_serialize:
- serialized[field_name] = getattr(self, field_name)
- #end _serialize_field_to_json
-
- def serialize_to_json(self, field_names = None):
- serialized = {}
-
- # serialize common fields
- self._serialize_field_to_json(serialized, ['uuid'], 'uuid')
- self._serialize_field_to_json(serialized, field_names, 'fq_name')
- if hasattr(self, 'parent_type'):
- self._serialize_field_to_json(serialized, field_names, 'parent_type')
-
- # serialize property fields
- if hasattr(self, '_service_chain_information'):
- self._serialize_field_to_json(serialized, field_names, 'service_chain_information')
- if hasattr(self, '_routing_instance_is_default'):
- self._serialize_field_to_json(serialized, field_names, 'routing_instance_is_default')
- if hasattr(self, '_static_route_entries'):
- self._serialize_field_to_json(serialized, field_names, 'static_route_entries')
- if hasattr(self, '_default_ce_protocol'):
- self._serialize_field_to_json(serialized, field_names, 'default_ce_protocol')
- if hasattr(self, '_id_perms'):
- self._serialize_field_to_json(serialized, field_names, 'id_perms')
- if hasattr(self, '_display_name'):
- self._serialize_field_to_json(serialized, field_names, 'display_name')
-
- # serialize reference fields
- if hasattr(self, 'routing_instance_refs'):
- self._serialize_field_to_json(serialized, field_names, 'routing_instance_refs')
- if hasattr(self, 'route_target_refs'):
- self._serialize_field_to_json(serialized, field_names, 'route_target_refs')
- return serialized
- #end serialize_to_json
-
- def get_bgp_routers(self):
- return getattr(self, 'bgp_routers', None)
- #end get_bgp_routers
-
- def set_routing_instance(self, ref_obj, ref_data):
- """Set routing-instance for routing-instance.
-
- :param ref_obj: RoutingInstance object
- :param ref_data: ConnectionType object
-
- """
- self.routing_instance_refs = [{'to':ref_obj.get_fq_name(), 'attr':ref_data}]
- if ref_obj.uuid:
- self.routing_instance_refs[0]['uuid'] = ref_obj.uuid
-
- #end set_routing_instance
-
- def add_routing_instance(self, ref_obj, ref_data):
- """Add routing-instance to routing-instance.
-
- :param ref_obj: RoutingInstance object
- :param ref_data: ConnectionType object
-
- """
- refs = getattr(self, 'routing_instance_refs', [])
- if not refs:
- self.routing_instance_refs = []
-
- # if ref already exists, update any attr with it
- for ref in refs:
- if ref['to'] == ref_obj.get_fq_name():
- ref = {'to':ref_obj.get_fq_name(), 'attr':ref_data}
- if ref_obj.uuid:
- ref['uuid'] = ref_obj.uuid
- return
-
- # ref didn't exist before
- ref_info = {'to':ref_obj.get_fq_name(), 'attr':ref_data}
- if ref_obj.uuid:
- ref_info['uuid'] = ref_obj.uuid
-
- self.routing_instance_refs.append(ref_info)
- #end add_routing_instance
-
- def del_routing_instance(self, ref_obj):
- refs = self.get_routing_instance_refs()
- if not refs:
- return
-
- for ref in refs:
- if ref['to'] == ref_obj.get_fq_name():
- self.routing_instance_refs.remove(ref)
- return
- #end del_routing_instance
-
- def set_routing_instance_list(self, ref_obj_list, ref_data_list):
- """Set routing-instance list for routing-instance.
-
- :param ref_obj_list: list of RoutingInstance object
- :param ref_data_list: list of ConnectionType object
-
- """
- self.routing_instance_refs = [{'to':ref_obj_list[i], 'attr':ref_data_list[i]} for i in range(len(ref_obj_list))]
- #end set_routing_instance_list
-
- def get_routing_instance_refs(self):
- """Return routing-instance list for routing-instance.
-
- :returns: list of tuple <RoutingInstance, ConnectionType>
-
- """
- return getattr(self, 'routing_instance_refs', None)
- #end get_routing_instance_refs
-
- def set_route_target(self, ref_obj, ref_data):
- """Set route-target for routing-instance.
-
- :param ref_obj: RouteTarget object
- :param ref_data: InstanceTargetType object
-
- """
- self.route_target_refs = [{'to':ref_obj.get_fq_name(), 'attr':ref_data}]
- if ref_obj.uuid:
- self.route_target_refs[0]['uuid'] = ref_obj.uuid
-
- #end set_route_target
-
- def add_route_target(self, ref_obj, ref_data):
- """Add route-target to routing-instance.
-
- :param ref_obj: RouteTarget object
- :param ref_data: InstanceTargetType object
-
- """
- refs = getattr(self, 'route_target_refs', [])
- if not refs:
- self.route_target_refs = []
-
- # if ref already exists, update any attr with it
- for ref in refs:
- if ref['to'] == ref_obj.get_fq_name():
- ref = {'to':ref_obj.get_fq_name(), 'attr':ref_data}
- if ref_obj.uuid:
- ref['uuid'] = ref_obj.uuid
- return
-
- # ref didn't exist before
- ref_info = {'to':ref_obj.get_fq_name(), 'attr':ref_data}
- if ref_obj.uuid:
- ref_info['uuid'] = ref_obj.uuid
-
- self.route_target_refs.append(ref_info)
- #end add_route_target
-
- def del_route_target(self, ref_obj):
- refs = self.get_route_target_refs()
- if not refs:
- return
-
- for ref in refs:
- if ref['to'] == ref_obj.get_fq_name():
- self.route_target_refs.remove(ref)
- return
- #end del_route_target
-
- def set_route_target_list(self, ref_obj_list, ref_data_list):
- """Set route-target list for routing-instance.
-
- :param ref_obj_list: list of RouteTarget object
- :param ref_data_list: list of InstanceTargetType object
-
- """
- self.route_target_refs = [{'to':ref_obj_list[i], 'attr':ref_data_list[i]} for i in range(len(ref_obj_list))]
- #end set_route_target_list
-
- def get_route_target_refs(self):
- """Return route-target list for routing-instance.
-
- :returns: list of tuple <RouteTarget, InstanceTargetType>
-
- """
- return getattr(self, 'route_target_refs', None)
- #end get_route_target_refs
-
- def get_virtual_machine_interface_back_refs(self):
- """Return list of all virtual-machine-interfaces using this routing-instance"""
- return getattr(self, 'virtual_machine_interface_back_refs', None)
- #end get_virtual_machine_interface_back_refs
-
- def get_virtual_network_back_refs(self):
- """Return list of all virtual-networks using this routing-instance"""
- return getattr(self, 'virtual_network_back_refs', None)
- #end get_virtual_network_back_refs
-
- def get_routing_instance_back_refs(self):
- """Return list of all routing-instances using this routing-instance"""
- return getattr(self, 'routing_instance_back_refs', None)
- #end get_routing_instance_back_refs
-
- def get_customer_attachment_back_refs(self):
- """Return list of all customer-attachments using this routing-instance"""
- return getattr(self, 'customer_attachment_back_refs', None)
- #end get_customer_attachment_back_refs
-
- def dump(self):
- """Display routing-instance object in compact form."""
- print '------------ routing-instance ------------'
- print 'Name = ', self.get_fq_name()
- print 'Uuid = ', self.uuid
- if hasattr(self, 'parent_type'): # non config-root children
- print 'Parent Type = ', self.parent_type
- print 'P service_chain_information = ', self.get_service_chain_information()
- print 'P routing_instance_is_default = ', self.get_routing_instance_is_default()
- print 'P static_route_entries = ', self.get_static_route_entries()
- print 'P default_ce_protocol = ', self.get_default_ce_protocol()
- print 'P id_perms = ', self.get_id_perms()
- print 'P display_name = ', self.get_display_name()
- print 'HAS bgp_router = ', self.get_bgp_routers()
- print 'REF routing_instance = ', self.get_routing_instance_refs()
- print 'REF route_target = ', self.get_route_target_refs()
- print 'BCK virtual_machine_interface = ', self.get_virtual_machine_interface_back_refs()
- print 'BCK routing_instance = ', self.get_routing_instance_back_refs()
- #end dump
-
-#end class RoutingInstance
-
-class NetworkIpam(object):
- """
- Represents network-ipam configuration representation.
-
- Child of:
- :class:`.Project` object OR
-
- Properties:
- * network-ipam-mgmt (:class:`.IpamType` type)
- * id-perms (:class:`.IdPermsType` type)
- * display-name (xsd:string type)
-
- Children:
-
- References to:
- * list of :class:`.VirtualDns` objects
-
- Referred by:
- * list of :class:`.VirtualNetwork` objects
- """
-
- prop_fields = set([u'network_ipam_mgmt', u'id_perms', u'display_name'])
- ref_fields = set([u'virtual_DNS_refs'])
- backref_fields = set([u'project_back_refs', u'virtual_network_back_refs'])
- children_fields = set([])
-
- def __init__(self, name = None, parent_obj = None, network_ipam_mgmt = None, id_perms = None, display_name = None, *args, **kwargs):
- # type-independent fields
- self._type = 'network-ipam'
- if not name:
- name = u'default-network-ipam'
- self.name = name
- self._uuid = None
- # Determine parent type and fq_name
- kwargs_parent_type = kwargs.get('parent_type', None)
- kwargs_fq_name = kwargs.get('fq_name', None)
- if parent_obj:
- self.parent_type = parent_obj._type
- # copy parent's fq_name
- self.fq_name = list(parent_obj.fq_name)
- self.fq_name.append(name)
- elif kwargs_parent_type and kwargs_fq_name:
- self.parent_type = kwargs_parent_type
- self.fq_name = kwargs_fq_name
- else: # No parent obj specified
- self.parent_type = 'project'
- self.fq_name = [u'default-domain', u'default-project']
- self.fq_name.append(name)
-
-
- # property fields
- if network_ipam_mgmt:
- self._network_ipam_mgmt = network_ipam_mgmt
- if id_perms:
- self._id_perms = id_perms
- if display_name:
- self._display_name = display_name
- #end __init__
-
- def get_type(self):
- """Return object type (network-ipam)."""
- return self._type
- #end get_type
-
- def get_fq_name(self):
- """Return FQN of network-ipam in list form."""
- return self.fq_name
- #end get_fq_name
-
- def get_fq_name_str(self):
- """Return FQN of network-ipam as colon delimited string."""
- return ':'.join(self.fq_name)
- #end get_fq_name_str
-
- @property
- def parent_name(self):
- return self.fq_name[:-1][-1]
- #end parent_name
-
- def get_parent_fq_name(self):
- """Return FQN of network-ipam's parent in list form."""
- if not hasattr(self, 'parent_type'):
- # child of config-root
- return None
-
- return self.fq_name[:-1]
- #end get_parent_fq_name
-
- def get_parent_fq_name_str(self):
- """Return FQN of network-ipam's parent as colon delimted string."""
- if not hasattr(self, 'parent_type'):
- # child of config-root
- return None
-
- return ':'.join(self.fq_name[:-1])
- #end get_parent_fq_name_str
-
- @property
- def uuid(self):
- return getattr(self, '_uuid', None)
- #end uuid
-
- @uuid.setter
- def uuid(self, uuid_val):
- self._uuid = uuid_val
- #end uuid
-
- def set_uuid(self, uuid_val):
- self.uuid = uuid_val
- #end set_uuid
-
- def get_uuid(self):
- return self.uuid
- #end get_uuid
-
- @property
- def network_ipam_mgmt(self):
- """Get network-ipam-mgmt for network-ipam.
-
- :returns: IpamType object
-
- """
- return getattr(self, '_network_ipam_mgmt', None)
- #end network_ipam_mgmt
-
- @network_ipam_mgmt.setter
- def network_ipam_mgmt(self, network_ipam_mgmt):
- """Set network-ipam-mgmt for network-ipam.
-
- :param network_ipam_mgmt: IpamType object
-
- """
- self._network_ipam_mgmt = network_ipam_mgmt
- #end network_ipam_mgmt
-
- def set_network_ipam_mgmt(self, value):
- self.network_ipam_mgmt = value
- #end set_network_ipam_mgmt
-
- def get_network_ipam_mgmt(self):
- return self.network_ipam_mgmt
- #end get_network_ipam_mgmt
-
- @property
- def id_perms(self):
- """Get id-perms for network-ipam.
-
- :returns: IdPermsType object
-
- """
- return getattr(self, '_id_perms', None)
- #end id_perms
-
- @id_perms.setter
- def id_perms(self, id_perms):
- """Set id-perms for network-ipam.
-
- :param id_perms: IdPermsType object
-
- """
- self._id_perms = id_perms
- #end id_perms
-
- def set_id_perms(self, value):
- self.id_perms = value
- #end set_id_perms
-
- def get_id_perms(self):
- return self.id_perms
- #end get_id_perms
-
- @property
- def display_name(self):
- """Get display-name for network-ipam.
-
- :returns: xsd:string object
-
- """
- return getattr(self, '_display_name', None)
- #end display_name
-
- @display_name.setter
- def display_name(self, display_name):
- """Set display-name for network-ipam.
-
- :param display_name: xsd:string object
-
- """
- self._display_name = display_name
- #end display_name
-
- def set_display_name(self, value):
- self.display_name = value
- #end set_display_name
-
- def get_display_name(self):
- return self.display_name
- #end get_display_name
-
- def _serialize_field_to_json(self, serialized, fields_to_serialize, field_name):
- if fields_to_serialize is None: # all fields are serialized
- serialized[field_name] = getattr(self, field_name)
- elif field_name in fields_to_serialize:
- serialized[field_name] = getattr(self, field_name)
- #end _serialize_field_to_json
-
- def serialize_to_json(self, field_names = None):
- serialized = {}
-
- # serialize common fields
- self._serialize_field_to_json(serialized, ['uuid'], 'uuid')
- self._serialize_field_to_json(serialized, field_names, 'fq_name')
- if hasattr(self, 'parent_type'):
- self._serialize_field_to_json(serialized, field_names, 'parent_type')
-
- # serialize property fields
- if hasattr(self, '_network_ipam_mgmt'):
- self._serialize_field_to_json(serialized, field_names, 'network_ipam_mgmt')
- if hasattr(self, '_id_perms'):
- self._serialize_field_to_json(serialized, field_names, 'id_perms')
- if hasattr(self, '_display_name'):
- self._serialize_field_to_json(serialized, field_names, 'display_name')
-
- # serialize reference fields
- if hasattr(self, 'virtual_DNS_refs'):
- self._serialize_field_to_json(serialized, field_names, 'virtual_DNS_refs')
- return serialized
- #end serialize_to_json
-
- def set_virtual_DNS(self, ref_obj):
- """Set virtual-DNS for network-ipam.
-
- :param ref_obj: VirtualDns object
-
- """
- self.virtual_DNS_refs = [{'to':ref_obj.get_fq_name()}]
- if ref_obj.uuid:
- self.virtual_DNS_refs[0]['uuid'] = ref_obj.uuid
-
- #end set_virtual_DNS
-
- def add_virtual_DNS(self, ref_obj):
- """Add virtual-DNS to network-ipam.
-
- :param ref_obj: VirtualDns object
-
- """
- refs = getattr(self, 'virtual_DNS_refs', [])
- if not refs:
- self.virtual_DNS_refs = []
-
- # if ref already exists, update any attr with it
- for ref in refs:
- if ref['to'] == ref_obj.get_fq_name():
- ref = {'to':ref_obj.get_fq_name()}
- if ref_obj.uuid:
- ref['uuid'] = ref_obj.uuid
- return
-
- # ref didn't exist before
- ref_info = {'to':ref_obj.get_fq_name()}
- if ref_obj.uuid:
- ref_info['uuid'] = ref_obj.uuid
-
- self.virtual_DNS_refs.append(ref_info)
- #end add_virtual_DNS
-
- def del_virtual_DNS(self, ref_obj):
- refs = self.get_virtual_DNS_refs()
- if not refs:
- return
-
- for ref in refs:
- if ref['to'] == ref_obj.get_fq_name():
- self.virtual_DNS_refs.remove(ref)
- return
- #end del_virtual_DNS
-
- def set_virtual_DNS_list(self, ref_obj_list):
- """Set virtual-DNS list for network-ipam.
-
- :param ref_obj_list: list of VirtualDns object
-
- """
- self.virtual_DNS_refs = ref_obj_list
- #end set_virtual_DNS_list
-
- def get_virtual_DNS_refs(self):
- """Return virtual-DNS list for network-ipam.
-
- :returns: list of <VirtualDns>
-
- """
- return getattr(self, 'virtual_DNS_refs', None)
- #end get_virtual_DNS_refs
-
- def get_project_back_refs(self):
- """Return list of all projects using this network-ipam"""
- return getattr(self, 'project_back_refs', None)
- #end get_project_back_refs
-
- def get_virtual_network_back_refs(self):
- """Return list of all virtual-networks using this network-ipam"""
- return getattr(self, 'virtual_network_back_refs', None)
- #end get_virtual_network_back_refs
-
- def dump(self):
- """Display network-ipam object in compact form."""
- print '------------ network-ipam ------------'
- print 'Name = ', self.get_fq_name()
- print 'Uuid = ', self.uuid
- if hasattr(self, 'parent_type'): # non config-root children
- print 'Parent Type = ', self.parent_type
- print 'P network_ipam_mgmt = ', self.get_network_ipam_mgmt()
- print 'P id_perms = ', self.get_id_perms()
- print 'P display_name = ', self.get_display_name()
- print 'REF virtual_DNS = ', self.get_virtual_DNS_refs()
- print 'BCK virtual_network = ', self.get_virtual_network_back_refs()
- #end dump
-
-#end class NetworkIpam
-
-class LogicalRouter(object):
- """
- Represents logical-router configuration representation.
-
- Child of:
- :class:`.Project` object OR
-
- Properties:
- * id-perms (:class:`.IdPermsType` type)
- * display-name (xsd:string type)
-
- Children:
-
- References to:
- * list of :class:`.VirtualMachineInterface` objects
- * list of :class:`.RouteTarget` objects
- * list of :class:`.VirtualNetwork` objects
- * list of :class:`.ServiceInstance` objects
-
- Referred by:
- """
-
- prop_fields = set([u'id_perms', u'display_name'])
- ref_fields = set(['virtual_machine_interface_refs', 'route_target_refs', u'virtual_network_refs', u'service_instance_refs'])
- backref_fields = set([u'project_back_refs'])
- children_fields = set([])
-
- def __init__(self, name = None, parent_obj = None, id_perms = None, display_name = None, *args, **kwargs):
- # type-independent fields
- self._type = 'logical-router'
- if not name:
- name = u'default-logical-router'
- self.name = name
- self._uuid = None
- # Determine parent type and fq_name
- kwargs_parent_type = kwargs.get('parent_type', None)
- kwargs_fq_name = kwargs.get('fq_name', None)
- if parent_obj:
- self.parent_type = parent_obj._type
- # copy parent's fq_name
- self.fq_name = list(parent_obj.fq_name)
- self.fq_name.append(name)
- elif kwargs_parent_type and kwargs_fq_name:
- self.parent_type = kwargs_parent_type
- self.fq_name = kwargs_fq_name
- else: # No parent obj specified
- self.parent_type = 'project'
- self.fq_name = [u'default-domain', u'default-project']
- self.fq_name.append(name)
-
-
- # property fields
- if id_perms:
- self._id_perms = id_perms
- if display_name:
- self._display_name = display_name
- #end __init__
-
- def get_type(self):
- """Return object type (logical-router)."""
- return self._type
- #end get_type
-
- def get_fq_name(self):
- """Return FQN of logical-router in list form."""
- return self.fq_name
- #end get_fq_name
-
- def get_fq_name_str(self):
- """Return FQN of logical-router as colon delimited string."""
- return ':'.join(self.fq_name)
- #end get_fq_name_str
-
- @property
- def parent_name(self):
- return self.fq_name[:-1][-1]
- #end parent_name
-
- def get_parent_fq_name(self):
- """Return FQN of logical-router's parent in list form."""
- if not hasattr(self, 'parent_type'):
- # child of config-root
- return None
-
- return self.fq_name[:-1]
- #end get_parent_fq_name
-
- def get_parent_fq_name_str(self):
- """Return FQN of logical-router's parent as colon delimted string."""
- if not hasattr(self, 'parent_type'):
- # child of config-root
- return None
-
- return ':'.join(self.fq_name[:-1])
- #end get_parent_fq_name_str
-
- @property
- def uuid(self):
- return getattr(self, '_uuid', None)
- #end uuid
-
- @uuid.setter
- def uuid(self, uuid_val):
- self._uuid = uuid_val
- #end uuid
-
- def set_uuid(self, uuid_val):
- self.uuid = uuid_val
- #end set_uuid
-
- def get_uuid(self):
- return self.uuid
- #end get_uuid
-
- @property
- def id_perms(self):
- """Get id-perms for logical-router.
-
- :returns: IdPermsType object
-
- """
- return getattr(self, '_id_perms', None)
- #end id_perms
-
- @id_perms.setter
- def id_perms(self, id_perms):
- """Set id-perms for logical-router.
-
- :param id_perms: IdPermsType object
-
- """
- self._id_perms = id_perms
- #end id_perms
-
- def set_id_perms(self, value):
- self.id_perms = value
- #end set_id_perms
-
- def get_id_perms(self):
- return self.id_perms
- #end get_id_perms
-
- @property
- def display_name(self):
- """Get display-name for logical-router.
-
- :returns: xsd:string object
-
- """
- return getattr(self, '_display_name', None)
- #end display_name
-
- @display_name.setter
- def display_name(self, display_name):
- """Set display-name for logical-router.
-
- :param display_name: xsd:string object
-
- """
- self._display_name = display_name
- #end display_name
-
- def set_display_name(self, value):
- self.display_name = value
- #end set_display_name
-
- def get_display_name(self):
- return self.display_name
- #end get_display_name
-
- def _serialize_field_to_json(self, serialized, fields_to_serialize, field_name):
- if fields_to_serialize is None: # all fields are serialized
- serialized[field_name] = getattr(self, field_name)
- elif field_name in fields_to_serialize:
- serialized[field_name] = getattr(self, field_name)
- #end _serialize_field_to_json
-
- def serialize_to_json(self, field_names = None):
- serialized = {}
-
- # serialize common fields
- self._serialize_field_to_json(serialized, ['uuid'], 'uuid')
- self._serialize_field_to_json(serialized, field_names, 'fq_name')
- if hasattr(self, 'parent_type'):
- self._serialize_field_to_json(serialized, field_names, 'parent_type')
-
- # serialize property fields
- if hasattr(self, '_id_perms'):
- self._serialize_field_to_json(serialized, field_names, 'id_perms')
- if hasattr(self, '_display_name'):
- self._serialize_field_to_json(serialized, field_names, 'display_name')
-
- # serialize reference fields
- if hasattr(self, 'virtual_machine_interface_refs'):
- self._serialize_field_to_json(serialized, field_names, 'virtual_machine_interface_refs')
- if hasattr(self, 'route_target_refs'):
- self._serialize_field_to_json(serialized, field_names, 'route_target_refs')
- if hasattr(self, 'virtual_network_refs'):
- self._serialize_field_to_json(serialized, field_names, 'virtual_network_refs')
- if hasattr(self, 'service_instance_refs'):
- self._serialize_field_to_json(serialized, field_names, 'service_instance_refs')
- return serialized
- #end serialize_to_json
-
- def set_virtual_machine_interface(self, ref_obj):
- """Set virtual-machine-interface for logical-router.
-
- :param ref_obj: VirtualMachineInterface object
-
- """
- self.virtual_machine_interface_refs = [{'to':ref_obj.get_fq_name()}]
- if ref_obj.uuid:
- self.virtual_machine_interface_refs[0]['uuid'] = ref_obj.uuid
-
- #end set_virtual_machine_interface
-
- def add_virtual_machine_interface(self, ref_obj):
- """Add virtual-machine-interface to logical-router.
-
- :param ref_obj: VirtualMachineInterface object
-
- """
- refs = getattr(self, 'virtual_machine_interface_refs', [])
- if not refs:
- self.virtual_machine_interface_refs = []
-
- # if ref already exists, update any attr with it
- for ref in refs:
- if ref['to'] == ref_obj.get_fq_name():
- ref = {'to':ref_obj.get_fq_name()}
- if ref_obj.uuid:
- ref['uuid'] = ref_obj.uuid
- return
-
- # ref didn't exist before
- ref_info = {'to':ref_obj.get_fq_name()}
- if ref_obj.uuid:
- ref_info['uuid'] = ref_obj.uuid
-
- self.virtual_machine_interface_refs.append(ref_info)
- #end add_virtual_machine_interface
-
- def del_virtual_machine_interface(self, ref_obj):
- refs = self.get_virtual_machine_interface_refs()
- if not refs:
- return
-
- for ref in refs:
- if ref['to'] == ref_obj.get_fq_name():
- self.virtual_machine_interface_refs.remove(ref)
- return
- #end del_virtual_machine_interface
-
- def set_virtual_machine_interface_list(self, ref_obj_list):
- """Set virtual-machine-interface list for logical-router.
-
- :param ref_obj_list: list of VirtualMachineInterface object
-
- """
- self.virtual_machine_interface_refs = ref_obj_list
- #end set_virtual_machine_interface_list
-
- def get_virtual_machine_interface_refs(self):
- """Return virtual-machine-interface list for logical-router.
-
- :returns: list of <VirtualMachineInterface>
-
- """
- return getattr(self, 'virtual_machine_interface_refs', None)
- #end get_virtual_machine_interface_refs
-
- def set_route_target(self, ref_obj):
- """Set route-target for logical-router.
-
- :param ref_obj: RouteTarget object
-
- """
- self.route_target_refs = [{'to':ref_obj.get_fq_name()}]
- if ref_obj.uuid:
- self.route_target_refs[0]['uuid'] = ref_obj.uuid
-
- #end set_route_target
-
- def add_route_target(self, ref_obj):
- """Add route-target to logical-router.
-
- :param ref_obj: RouteTarget object
-
- """
- refs = getattr(self, 'route_target_refs', [])
- if not refs:
- self.route_target_refs = []
-
- # if ref already exists, update any attr with it
- for ref in refs:
- if ref['to'] == ref_obj.get_fq_name():
- ref = {'to':ref_obj.get_fq_name()}
- if ref_obj.uuid:
- ref['uuid'] = ref_obj.uuid
- return
-
- # ref didn't exist before
- ref_info = {'to':ref_obj.get_fq_name()}
- if ref_obj.uuid:
- ref_info['uuid'] = ref_obj.uuid
-
- self.route_target_refs.append(ref_info)
- #end add_route_target
-
- def del_route_target(self, ref_obj):
- refs = self.get_route_target_refs()
- if not refs:
- return
-
- for ref in refs:
- if ref['to'] == ref_obj.get_fq_name():
- self.route_target_refs.remove(ref)
- return
- #end del_route_target
-
- def set_route_target_list(self, ref_obj_list):
- """Set route-target list for logical-router.
-
- :param ref_obj_list: list of RouteTarget object
-
- """
- self.route_target_refs = ref_obj_list
- #end set_route_target_list
-
- def get_route_target_refs(self):
- """Return route-target list for logical-router.
-
- :returns: list of <RouteTarget>
-
- """
- return getattr(self, 'route_target_refs', None)
- #end get_route_target_refs
-
- def set_virtual_network(self, ref_obj):
- """Set virtual-network for logical-router.
-
- :param ref_obj: VirtualNetwork object
-
- """
- self.virtual_network_refs = [{'to':ref_obj.get_fq_name()}]
- if ref_obj.uuid:
- self.virtual_network_refs[0]['uuid'] = ref_obj.uuid
-
- #end set_virtual_network
-
- def add_virtual_network(self, ref_obj):
- """Add virtual-network to logical-router.
-
- :param ref_obj: VirtualNetwork object
-
- """
- refs = getattr(self, 'virtual_network_refs', [])
- if not refs:
- self.virtual_network_refs = []
-
- # if ref already exists, update any attr with it
- for ref in refs:
- if ref['to'] == ref_obj.get_fq_name():
- ref = {'to':ref_obj.get_fq_name()}
- if ref_obj.uuid:
- ref['uuid'] = ref_obj.uuid
- return
-
- # ref didn't exist before
- ref_info = {'to':ref_obj.get_fq_name()}
- if ref_obj.uuid:
- ref_info['uuid'] = ref_obj.uuid
-
- self.virtual_network_refs.append(ref_info)
- #end add_virtual_network
-
- def del_virtual_network(self, ref_obj):
- refs = self.get_virtual_network_refs()
- if not refs:
- return
-
- for ref in refs:
- if ref['to'] == ref_obj.get_fq_name():
- self.virtual_network_refs.remove(ref)
- return
- #end del_virtual_network
-
- def set_virtual_network_list(self, ref_obj_list):
- """Set virtual-network list for logical-router.
-
- :param ref_obj_list: list of VirtualNetwork object
-
- """
- self.virtual_network_refs = ref_obj_list
- #end set_virtual_network_list
-
- def get_virtual_network_refs(self):
- """Return virtual-network list for logical-router.
-
- :returns: list of <VirtualNetwork>
-
- """
- return getattr(self, 'virtual_network_refs', None)
- #end get_virtual_network_refs
-
- def set_service_instance(self, ref_obj):
- """Set service-instance for logical-router.
-
- :param ref_obj: ServiceInstance object
-
- """
- self.service_instance_refs = [{'to':ref_obj.get_fq_name()}]
- if ref_obj.uuid:
- self.service_instance_refs[0]['uuid'] = ref_obj.uuid
-
- #end set_service_instance
-
- def add_service_instance(self, ref_obj):
- """Add service-instance to logical-router.
-
- :param ref_obj: ServiceInstance object
-
- """
- refs = getattr(self, 'service_instance_refs', [])
- if not refs:
- self.service_instance_refs = []
-
- # if ref already exists, update any attr with it
- for ref in refs:
- if ref['to'] == ref_obj.get_fq_name():
- ref = {'to':ref_obj.get_fq_name()}
- if ref_obj.uuid:
- ref['uuid'] = ref_obj.uuid
- return
-
- # ref didn't exist before
- ref_info = {'to':ref_obj.get_fq_name()}
- if ref_obj.uuid:
- ref_info['uuid'] = ref_obj.uuid
-
- self.service_instance_refs.append(ref_info)
- #end add_service_instance
-
- def del_service_instance(self, ref_obj):
- refs = self.get_service_instance_refs()
- if not refs:
- return
-
- for ref in refs:
- if ref['to'] == ref_obj.get_fq_name():
- self.service_instance_refs.remove(ref)
- return
- #end del_service_instance
-
- def set_service_instance_list(self, ref_obj_list):
- """Set service-instance list for logical-router.
-
- :param ref_obj_list: list of ServiceInstance object
-
- """
- self.service_instance_refs = ref_obj_list
- #end set_service_instance_list
-
- def get_service_instance_refs(self):
- """Return service-instance list for logical-router.
-
- :returns: list of <ServiceInstance>
-
- """
- return getattr(self, 'service_instance_refs', None)
- #end get_service_instance_refs
-
- def get_project_back_refs(self):
- """Return list of all projects using this logical-router"""
- return getattr(self, 'project_back_refs', None)
- #end get_project_back_refs
-
- def dump(self):
- """Display logical-router object in compact form."""
- print '------------ logical-router ------------'
- print 'Name = ', self.get_fq_name()
- print 'Uuid = ', self.uuid
- if hasattr(self, 'parent_type'): # non config-root children
- print 'Parent Type = ', self.parent_type
- print 'P id_perms = ', self.get_id_perms()
- print 'P display_name = ', self.get_display_name()
- print 'REF virtual_machine_interface = ', self.get_virtual_machine_interface_refs()
- print 'REF route_target = ', self.get_route_target_refs()
- print 'REF virtual_network = ', self.get_virtual_network_refs()
- print 'REF service_instance = ', self.get_service_instance_refs()
- #end dump
-
-#end class LogicalRouter
-
diff --git a/Testcases/vnc_api/gen/resource_common.pyc b/Testcases/vnc_api/gen/resource_common.pyc
deleted file mode 100644
index 31036f4..0000000
--- a/Testcases/vnc_api/gen/resource_common.pyc
+++ /dev/null
Binary files differ
diff --git a/Testcases/vnc_api/gen/resource_server.py b/Testcases/vnc_api/gen/resource_server.py
deleted file mode 100644
index 239c6f4..0000000
--- a/Testcases/vnc_api/gen/resource_server.py
+++ /dev/null
@@ -1,2161 +0,0 @@
-
-# AUTO-GENERATED file from IFMapApiGenerator. Do Not Edit!
-
-from resource_common import *
-
-class DomainServerGen(Domain):
- generate_default_instance = True
-
- def __init__(self):
- pass
- #end __init__
-
- @classmethod
- def http_get(cls, id):
- return True, ''
- #end http_get
-
- @classmethod
- def http_put(cls, id, fq_name, obj, db_conn):
- return True, ''
- #end http_put
-
- @classmethod
- def http_post(cls, tenant_name, obj):
- return True, ''
- #end http_post
-
- @classmethod
- def http_delete(cls, id, obj, db_conn):
- return True, ''
- #end http_delete
-
- @classmethod
- def http_post_collection(cls, tenant_name, obj, db_conn):
- return True, ''
- #end http_post
-
- @classmethod
- def dbe_create_notification(cls, obj_ids, obj_dict):
- pass
- #end dbe_create_notification
-
- @classmethod
- def dbe_update_notification(cls, obj_ids):
- pass
- #end dbe_update_notification
-
- @classmethod
- def dbe_delete_notification(cls, obj_ids, obj_dict):
- pass
- #end dbe_delete_notification
-
-#end class DomainServerGen
-
-class GlobalVrouterConfigServerGen(GlobalVrouterConfig):
- generate_default_instance = True
-
- def __init__(self):
- pass
- #end __init__
-
- @classmethod
- def http_get(cls, id):
- return True, ''
- #end http_get
-
- @classmethod
- def http_put(cls, id, fq_name, obj, db_conn):
- return True, ''
- #end http_put
-
- @classmethod
- def http_post(cls, tenant_name, obj):
- return True, ''
- #end http_post
-
- @classmethod
- def http_delete(cls, id, obj, db_conn):
- return True, ''
- #end http_delete
-
- @classmethod
- def http_post_collection(cls, tenant_name, obj, db_conn):
- return True, ''
- #end http_post
-
- @classmethod
- def dbe_create_notification(cls, obj_ids, obj_dict):
- pass
- #end dbe_create_notification
-
- @classmethod
- def dbe_update_notification(cls, obj_ids):
- pass
- #end dbe_update_notification
-
- @classmethod
- def dbe_delete_notification(cls, obj_ids, obj_dict):
- pass
- #end dbe_delete_notification
-
-#end class GlobalVrouterConfigServerGen
-
-class InstanceIpServerGen(InstanceIp):
- generate_default_instance = True
-
- def __init__(self):
- pass
- #end __init__
-
- @classmethod
- def http_get(cls, id):
- return True, ''
- #end http_get
-
- @classmethod
- def http_put(cls, id, fq_name, obj, db_conn):
- return True, ''
- #end http_put
-
- @classmethod
- def http_post(cls, tenant_name, obj):
- return True, ''
- #end http_post
-
- @classmethod
- def http_delete(cls, id, obj, db_conn):
- return True, ''
- #end http_delete
-
- @classmethod
- def http_post_collection(cls, tenant_name, obj, db_conn):
- return True, ''
- #end http_post
-
- @classmethod
- def dbe_create_notification(cls, obj_ids, obj_dict):
- pass
- #end dbe_create_notification
-
- @classmethod
- def dbe_update_notification(cls, obj_ids):
- pass
- #end dbe_update_notification
-
- @classmethod
- def dbe_delete_notification(cls, obj_ids, obj_dict):
- pass
- #end dbe_delete_notification
-
-#end class InstanceIpServerGen
-
-class NetworkPolicyServerGen(NetworkPolicy):
- generate_default_instance = True
-
- def __init__(self):
- pass
- #end __init__
-
- @classmethod
- def http_get(cls, id):
- return True, ''
- #end http_get
-
- @classmethod
- def http_put(cls, id, fq_name, obj, db_conn):
- return True, ''
- #end http_put
-
- @classmethod
- def http_post(cls, tenant_name, obj):
- return True, ''
- #end http_post
-
- @classmethod
- def http_delete(cls, id, obj, db_conn):
- return True, ''
- #end http_delete
-
- @classmethod
- def http_post_collection(cls, tenant_name, obj, db_conn):
- return True, ''
- #end http_post
-
- @classmethod
- def dbe_create_notification(cls, obj_ids, obj_dict):
- pass
- #end dbe_create_notification
-
- @classmethod
- def dbe_update_notification(cls, obj_ids):
- pass
- #end dbe_update_notification
-
- @classmethod
- def dbe_delete_notification(cls, obj_ids, obj_dict):
- pass
- #end dbe_delete_notification
-
-#end class NetworkPolicyServerGen
-
-class LoadbalancerPoolServerGen(LoadbalancerPool):
- generate_default_instance = True
-
- def __init__(self):
- pass
- #end __init__
-
- @classmethod
- def http_get(cls, id):
- return True, ''
- #end http_get
-
- @classmethod
- def http_put(cls, id, fq_name, obj, db_conn):
- return True, ''
- #end http_put
-
- @classmethod
- def http_post(cls, tenant_name, obj):
- return True, ''
- #end http_post
-
- @classmethod
- def http_delete(cls, id, obj, db_conn):
- return True, ''
- #end http_delete
-
- @classmethod
- def http_post_collection(cls, tenant_name, obj, db_conn):
- return True, ''
- #end http_post
-
- @classmethod
- def dbe_create_notification(cls, obj_ids, obj_dict):
- pass
- #end dbe_create_notification
-
- @classmethod
- def dbe_update_notification(cls, obj_ids):
- pass
- #end dbe_update_notification
-
- @classmethod
- def dbe_delete_notification(cls, obj_ids, obj_dict):
- pass
- #end dbe_delete_notification
-
-#end class LoadbalancerPoolServerGen
-
-class VirtualDnsRecordServerGen(VirtualDnsRecord):
- generate_default_instance = True
-
- def __init__(self):
- pass
- #end __init__
-
- @classmethod
- def http_get(cls, id):
- return True, ''
- #end http_get
-
- @classmethod
- def http_put(cls, id, fq_name, obj, db_conn):
- return True, ''
- #end http_put
-
- @classmethod
- def http_post(cls, tenant_name, obj):
- return True, ''
- #end http_post
-
- @classmethod
- def http_delete(cls, id, obj, db_conn):
- return True, ''
- #end http_delete
-
- @classmethod
- def http_post_collection(cls, tenant_name, obj, db_conn):
- return True, ''
- #end http_post
-
- @classmethod
- def dbe_create_notification(cls, obj_ids, obj_dict):
- pass
- #end dbe_create_notification
-
- @classmethod
- def dbe_update_notification(cls, obj_ids):
- pass
- #end dbe_update_notification
-
- @classmethod
- def dbe_delete_notification(cls, obj_ids, obj_dict):
- pass
- #end dbe_delete_notification
-
-#end class VirtualDnsRecordServerGen
-
-class RouteTargetServerGen(RouteTarget):
- generate_default_instance = True
-
- def __init__(self):
- pass
- #end __init__
-
- @classmethod
- def http_get(cls, id):
- return True, ''
- #end http_get
-
- @classmethod
- def http_put(cls, id, fq_name, obj, db_conn):
- return True, ''
- #end http_put
-
- @classmethod
- def http_post(cls, tenant_name, obj):
- return True, ''
- #end http_post
-
- @classmethod
- def http_delete(cls, id, obj, db_conn):
- return True, ''
- #end http_delete
-
- @classmethod
- def http_post_collection(cls, tenant_name, obj, db_conn):
- return True, ''
- #end http_post
-
- @classmethod
- def dbe_create_notification(cls, obj_ids, obj_dict):
- pass
- #end dbe_create_notification
-
- @classmethod
- def dbe_update_notification(cls, obj_ids):
- pass
- #end dbe_update_notification
-
- @classmethod
- def dbe_delete_notification(cls, obj_ids, obj_dict):
- pass
- #end dbe_delete_notification
-
-#end class RouteTargetServerGen
-
-class FloatingIpServerGen(FloatingIp):
- generate_default_instance = True
-
- def __init__(self):
- pass
- #end __init__
-
- @classmethod
- def http_get(cls, id):
- return True, ''
- #end http_get
-
- @classmethod
- def http_put(cls, id, fq_name, obj, db_conn):
- return True, ''
- #end http_put
-
- @classmethod
- def http_post(cls, tenant_name, obj):
- return True, ''
- #end http_post
-
- @classmethod
- def http_delete(cls, id, obj, db_conn):
- return True, ''
- #end http_delete
-
- @classmethod
- def http_post_collection(cls, tenant_name, obj, db_conn):
- return True, ''
- #end http_post
-
- @classmethod
- def dbe_create_notification(cls, obj_ids, obj_dict):
- pass
- #end dbe_create_notification
-
- @classmethod
- def dbe_update_notification(cls, obj_ids):
- pass
- #end dbe_update_notification
-
- @classmethod
- def dbe_delete_notification(cls, obj_ids, obj_dict):
- pass
- #end dbe_delete_notification
-
-#end class FloatingIpServerGen
-
-class FloatingIpPoolServerGen(FloatingIpPool):
- generate_default_instance = True
-
- def __init__(self):
- pass
- #end __init__
-
- @classmethod
- def http_get(cls, id):
- return True, ''
- #end http_get
-
- @classmethod
- def http_put(cls, id, fq_name, obj, db_conn):
- return True, ''
- #end http_put
-
- @classmethod
- def http_post(cls, tenant_name, obj):
- return True, ''
- #end http_post
-
- @classmethod
- def http_delete(cls, id, obj, db_conn):
- return True, ''
- #end http_delete
-
- @classmethod
- def http_post_collection(cls, tenant_name, obj, db_conn):
- return True, ''
- #end http_post
-
- @classmethod
- def dbe_create_notification(cls, obj_ids, obj_dict):
- pass
- #end dbe_create_notification
-
- @classmethod
- def dbe_update_notification(cls, obj_ids):
- pass
- #end dbe_update_notification
-
- @classmethod
- def dbe_delete_notification(cls, obj_ids, obj_dict):
- pass
- #end dbe_delete_notification
-
-#end class FloatingIpPoolServerGen
-
-class PhysicalRouterServerGen(PhysicalRouter):
- generate_default_instance = True
-
- def __init__(self):
- pass
- #end __init__
-
- @classmethod
- def http_get(cls, id):
- return True, ''
- #end http_get
-
- @classmethod
- def http_put(cls, id, fq_name, obj, db_conn):
- return True, ''
- #end http_put
-
- @classmethod
- def http_post(cls, tenant_name, obj):
- return True, ''
- #end http_post
-
- @classmethod
- def http_delete(cls, id, obj, db_conn):
- return True, ''
- #end http_delete
-
- @classmethod
- def http_post_collection(cls, tenant_name, obj, db_conn):
- return True, ''
- #end http_post
-
- @classmethod
- def dbe_create_notification(cls, obj_ids, obj_dict):
- pass
- #end dbe_create_notification
-
- @classmethod
- def dbe_update_notification(cls, obj_ids):
- pass
- #end dbe_update_notification
-
- @classmethod
- def dbe_delete_notification(cls, obj_ids, obj_dict):
- pass
- #end dbe_delete_notification
-
-#end class PhysicalRouterServerGen
-
-class BgpRouterServerGen(BgpRouter):
- generate_default_instance = True
-
- def __init__(self):
- pass
- #end __init__
-
- @classmethod
- def http_get(cls, id):
- return True, ''
- #end http_get
-
- @classmethod
- def http_put(cls, id, fq_name, obj, db_conn):
- return True, ''
- #end http_put
-
- @classmethod
- def http_post(cls, tenant_name, obj):
- return True, ''
- #end http_post
-
- @classmethod
- def http_delete(cls, id, obj, db_conn):
- return True, ''
- #end http_delete
-
- @classmethod
- def http_post_collection(cls, tenant_name, obj, db_conn):
- return True, ''
- #end http_post
-
- @classmethod
- def dbe_create_notification(cls, obj_ids, obj_dict):
- pass
- #end dbe_create_notification
-
- @classmethod
- def dbe_update_notification(cls, obj_ids):
- pass
- #end dbe_update_notification
-
- @classmethod
- def dbe_delete_notification(cls, obj_ids, obj_dict):
- pass
- #end dbe_delete_notification
-
-#end class BgpRouterServerGen
-
-class VirtualRouterServerGen(VirtualRouter):
- generate_default_instance = True
-
- def __init__(self):
- pass
- #end __init__
-
- @classmethod
- def http_get(cls, id):
- return True, ''
- #end http_get
-
- @classmethod
- def http_put(cls, id, fq_name, obj, db_conn):
- return True, ''
- #end http_put
-
- @classmethod
- def http_post(cls, tenant_name, obj):
- return True, ''
- #end http_post
-
- @classmethod
- def http_delete(cls, id, obj, db_conn):
- return True, ''
- #end http_delete
-
- @classmethod
- def http_post_collection(cls, tenant_name, obj, db_conn):
- return True, ''
- #end http_post
-
- @classmethod
- def dbe_create_notification(cls, obj_ids, obj_dict):
- pass
- #end dbe_create_notification
-
- @classmethod
- def dbe_update_notification(cls, obj_ids):
- pass
- #end dbe_update_notification
-
- @classmethod
- def dbe_delete_notification(cls, obj_ids, obj_dict):
- pass
- #end dbe_delete_notification
-
-#end class VirtualRouterServerGen
-
-class ConfigRootServerGen(ConfigRoot):
- generate_default_instance = True
-
- def __init__(self):
- pass
- #end __init__
-
- @classmethod
- def http_get(cls, id):
- return True, ''
- #end http_get
-
- @classmethod
- def http_put(cls, id, fq_name, obj, db_conn):
- return True, ''
- #end http_put
-
- @classmethod
- def http_post(cls, tenant_name, obj):
- return True, ''
- #end http_post
-
- @classmethod
- def http_delete(cls, id, obj, db_conn):
- return True, ''
- #end http_delete
-
- @classmethod
- def http_post_collection(cls, tenant_name, obj, db_conn):
- return True, ''
- #end http_post
-
- @classmethod
- def dbe_create_notification(cls, obj_ids, obj_dict):
- pass
- #end dbe_create_notification
-
- @classmethod
- def dbe_update_notification(cls, obj_ids):
- pass
- #end dbe_update_notification
-
- @classmethod
- def dbe_delete_notification(cls, obj_ids, obj_dict):
- pass
- #end dbe_delete_notification
-
-#end class ConfigRootServerGen
-
-class SubnetServerGen(Subnet):
- generate_default_instance = True
-
- def __init__(self):
- pass
- #end __init__
-
- @classmethod
- def http_get(cls, id):
- return True, ''
- #end http_get
-
- @classmethod
- def http_put(cls, id, fq_name, obj, db_conn):
- return True, ''
- #end http_put
-
- @classmethod
- def http_post(cls, tenant_name, obj):
- return True, ''
- #end http_post
-
- @classmethod
- def http_delete(cls, id, obj, db_conn):
- return True, ''
- #end http_delete
-
- @classmethod
- def http_post_collection(cls, tenant_name, obj, db_conn):
- return True, ''
- #end http_post
-
- @classmethod
- def dbe_create_notification(cls, obj_ids, obj_dict):
- pass
- #end dbe_create_notification
-
- @classmethod
- def dbe_update_notification(cls, obj_ids):
- pass
- #end dbe_update_notification
-
- @classmethod
- def dbe_delete_notification(cls, obj_ids, obj_dict):
- pass
- #end dbe_delete_notification
-
-#end class SubnetServerGen
-
-class GlobalSystemConfigServerGen(GlobalSystemConfig):
- generate_default_instance = True
-
- def __init__(self):
- pass
- #end __init__
-
- @classmethod
- def http_get(cls, id):
- return True, ''
- #end http_get
-
- @classmethod
- def http_put(cls, id, fq_name, obj, db_conn):
- return True, ''
- #end http_put
-
- @classmethod
- def http_post(cls, tenant_name, obj):
- return True, ''
- #end http_post
-
- @classmethod
- def http_delete(cls, id, obj, db_conn):
- return True, ''
- #end http_delete
-
- @classmethod
- def http_post_collection(cls, tenant_name, obj, db_conn):
- return True, ''
- #end http_post
-
- @classmethod
- def dbe_create_notification(cls, obj_ids, obj_dict):
- pass
- #end dbe_create_notification
-
- @classmethod
- def dbe_update_notification(cls, obj_ids):
- pass
- #end dbe_update_notification
-
- @classmethod
- def dbe_delete_notification(cls, obj_ids, obj_dict):
- pass
- #end dbe_delete_notification
-
-#end class GlobalSystemConfigServerGen
-
-class ServiceApplianceServerGen(ServiceAppliance):
- generate_default_instance = True
-
- def __init__(self):
- pass
- #end __init__
-
- @classmethod
- def http_get(cls, id):
- return True, ''
- #end http_get
-
- @classmethod
- def http_put(cls, id, fq_name, obj, db_conn):
- return True, ''
- #end http_put
-
- @classmethod
- def http_post(cls, tenant_name, obj):
- return True, ''
- #end http_post
-
- @classmethod
- def http_delete(cls, id, obj, db_conn):
- return True, ''
- #end http_delete
-
- @classmethod
- def http_post_collection(cls, tenant_name, obj, db_conn):
- return True, ''
- #end http_post
-
- @classmethod
- def dbe_create_notification(cls, obj_ids, obj_dict):
- pass
- #end dbe_create_notification
-
- @classmethod
- def dbe_update_notification(cls, obj_ids):
- pass
- #end dbe_update_notification
-
- @classmethod
- def dbe_delete_notification(cls, obj_ids, obj_dict):
- pass
- #end dbe_delete_notification
-
-#end class ServiceApplianceServerGen
-
-class ServiceInstanceServerGen(ServiceInstance):
- generate_default_instance = True
-
- def __init__(self):
- pass
- #end __init__
-
- @classmethod
- def http_get(cls, id):
- return True, ''
- #end http_get
-
- @classmethod
- def http_put(cls, id, fq_name, obj, db_conn):
- return True, ''
- #end http_put
-
- @classmethod
- def http_post(cls, tenant_name, obj):
- return True, ''
- #end http_post
-
- @classmethod
- def http_delete(cls, id, obj, db_conn):
- return True, ''
- #end http_delete
-
- @classmethod
- def http_post_collection(cls, tenant_name, obj, db_conn):
- return True, ''
- #end http_post
-
- @classmethod
- def dbe_create_notification(cls, obj_ids, obj_dict):
- pass
- #end dbe_create_notification
-
- @classmethod
- def dbe_update_notification(cls, obj_ids):
- pass
- #end dbe_update_notification
-
- @classmethod
- def dbe_delete_notification(cls, obj_ids, obj_dict):
- pass
- #end dbe_delete_notification
-
-#end class ServiceInstanceServerGen
-
-class NamespaceServerGen(Namespace):
- generate_default_instance = True
-
- def __init__(self):
- pass
- #end __init__
-
- @classmethod
- def http_get(cls, id):
- return True, ''
- #end http_get
-
- @classmethod
- def http_put(cls, id, fq_name, obj, db_conn):
- return True, ''
- #end http_put
-
- @classmethod
- def http_post(cls, tenant_name, obj):
- return True, ''
- #end http_post
-
- @classmethod
- def http_delete(cls, id, obj, db_conn):
- return True, ''
- #end http_delete
-
- @classmethod
- def http_post_collection(cls, tenant_name, obj, db_conn):
- return True, ''
- #end http_post
-
- @classmethod
- def dbe_create_notification(cls, obj_ids, obj_dict):
- pass
- #end dbe_create_notification
-
- @classmethod
- def dbe_update_notification(cls, obj_ids):
- pass
- #end dbe_update_notification
-
- @classmethod
- def dbe_delete_notification(cls, obj_ids, obj_dict):
- pass
- #end dbe_delete_notification
-
-#end class NamespaceServerGen
-
-class LogicalInterfaceServerGen(LogicalInterface):
- generate_default_instance = True
-
- def __init__(self):
- pass
- #end __init__
-
- @classmethod
- def http_get(cls, id):
- return True, ''
- #end http_get
-
- @classmethod
- def http_put(cls, id, fq_name, obj, db_conn):
- return True, ''
- #end http_put
-
- @classmethod
- def http_post(cls, tenant_name, obj):
- return True, ''
- #end http_post
-
- @classmethod
- def http_delete(cls, id, obj, db_conn):
- return True, ''
- #end http_delete
-
- @classmethod
- def http_post_collection(cls, tenant_name, obj, db_conn):
- return True, ''
- #end http_post
-
- @classmethod
- def dbe_create_notification(cls, obj_ids, obj_dict):
- pass
- #end dbe_create_notification
-
- @classmethod
- def dbe_update_notification(cls, obj_ids):
- pass
- #end dbe_update_notification
-
- @classmethod
- def dbe_delete_notification(cls, obj_ids, obj_dict):
- pass
- #end dbe_delete_notification
-
-#end class LogicalInterfaceServerGen
-
-class RouteTableServerGen(RouteTable):
- generate_default_instance = True
-
- def __init__(self):
- pass
- #end __init__
-
- @classmethod
- def http_get(cls, id):
- return True, ''
- #end http_get
-
- @classmethod
- def http_put(cls, id, fq_name, obj, db_conn):
- return True, ''
- #end http_put
-
- @classmethod
- def http_post(cls, tenant_name, obj):
- return True, ''
- #end http_post
-
- @classmethod
- def http_delete(cls, id, obj, db_conn):
- return True, ''
- #end http_delete
-
- @classmethod
- def http_post_collection(cls, tenant_name, obj, db_conn):
- return True, ''
- #end http_post
-
- @classmethod
- def dbe_create_notification(cls, obj_ids, obj_dict):
- pass
- #end dbe_create_notification
-
- @classmethod
- def dbe_update_notification(cls, obj_ids):
- pass
- #end dbe_update_notification
-
- @classmethod
- def dbe_delete_notification(cls, obj_ids, obj_dict):
- pass
- #end dbe_delete_notification
-
-#end class RouteTableServerGen
-
-class PhysicalInterfaceServerGen(PhysicalInterface):
- generate_default_instance = True
-
- def __init__(self):
- pass
- #end __init__
-
- @classmethod
- def http_get(cls, id):
- return True, ''
- #end http_get
-
- @classmethod
- def http_put(cls, id, fq_name, obj, db_conn):
- return True, ''
- #end http_put
-
- @classmethod
- def http_post(cls, tenant_name, obj):
- return True, ''
- #end http_post
-
- @classmethod
- def http_delete(cls, id, obj, db_conn):
- return True, ''
- #end http_delete
-
- @classmethod
- def http_post_collection(cls, tenant_name, obj, db_conn):
- return True, ''
- #end http_post
-
- @classmethod
- def dbe_create_notification(cls, obj_ids, obj_dict):
- pass
- #end dbe_create_notification
-
- @classmethod
- def dbe_update_notification(cls, obj_ids):
- pass
- #end dbe_update_notification
-
- @classmethod
- def dbe_delete_notification(cls, obj_ids, obj_dict):
- pass
- #end dbe_delete_notification
-
-#end class PhysicalInterfaceServerGen
-
-class AccessControlListServerGen(AccessControlList):
- generate_default_instance = True
-
- def __init__(self):
- pass
- #end __init__
-
- @classmethod
- def http_get(cls, id):
- return True, ''
- #end http_get
-
- @classmethod
- def http_put(cls, id, fq_name, obj, db_conn):
- return True, ''
- #end http_put
-
- @classmethod
- def http_post(cls, tenant_name, obj):
- return True, ''
- #end http_post
-
- @classmethod
- def http_delete(cls, id, obj, db_conn):
- return True, ''
- #end http_delete
-
- @classmethod
- def http_post_collection(cls, tenant_name, obj, db_conn):
- return True, ''
- #end http_post
-
- @classmethod
- def dbe_create_notification(cls, obj_ids, obj_dict):
- pass
- #end dbe_create_notification
-
- @classmethod
- def dbe_update_notification(cls, obj_ids):
- pass
- #end dbe_update_notification
-
- @classmethod
- def dbe_delete_notification(cls, obj_ids, obj_dict):
- pass
- #end dbe_delete_notification
-
-#end class AccessControlListServerGen
-
-class AnalyticsNodeServerGen(AnalyticsNode):
- generate_default_instance = True
-
- def __init__(self):
- pass
- #end __init__
-
- @classmethod
- def http_get(cls, id):
- return True, ''
- #end http_get
-
- @classmethod
- def http_put(cls, id, fq_name, obj, db_conn):
- return True, ''
- #end http_put
-
- @classmethod
- def http_post(cls, tenant_name, obj):
- return True, ''
- #end http_post
-
- @classmethod
- def http_delete(cls, id, obj, db_conn):
- return True, ''
- #end http_delete
-
- @classmethod
- def http_post_collection(cls, tenant_name, obj, db_conn):
- return True, ''
- #end http_post
-
- @classmethod
- def dbe_create_notification(cls, obj_ids, obj_dict):
- pass
- #end dbe_create_notification
-
- @classmethod
- def dbe_update_notification(cls, obj_ids):
- pass
- #end dbe_update_notification
-
- @classmethod
- def dbe_delete_notification(cls, obj_ids, obj_dict):
- pass
- #end dbe_delete_notification
-
-#end class AnalyticsNodeServerGen
-
-class VirtualDnsServerGen(VirtualDns):
- generate_default_instance = True
-
- def __init__(self):
- pass
- #end __init__
-
- @classmethod
- def http_get(cls, id):
- return True, ''
- #end http_get
-
- @classmethod
- def http_put(cls, id, fq_name, obj, db_conn):
- return True, ''
- #end http_put
-
- @classmethod
- def http_post(cls, tenant_name, obj):
- return True, ''
- #end http_post
-
- @classmethod
- def http_delete(cls, id, obj, db_conn):
- return True, ''
- #end http_delete
-
- @classmethod
- def http_post_collection(cls, tenant_name, obj, db_conn):
- return True, ''
- #end http_post
-
- @classmethod
- def dbe_create_notification(cls, obj_ids, obj_dict):
- pass
- #end dbe_create_notification
-
- @classmethod
- def dbe_update_notification(cls, obj_ids):
- pass
- #end dbe_update_notification
-
- @classmethod
- def dbe_delete_notification(cls, obj_ids, obj_dict):
- pass
- #end dbe_delete_notification
-
-#end class VirtualDnsServerGen
-
-class CustomerAttachmentServerGen(CustomerAttachment):
- generate_default_instance = True
-
- def __init__(self):
- pass
- #end __init__
-
- @classmethod
- def http_get(cls, id):
- return True, ''
- #end http_get
-
- @classmethod
- def http_put(cls, id, fq_name, obj, db_conn):
- return True, ''
- #end http_put
-
- @classmethod
- def http_post(cls, tenant_name, obj):
- return True, ''
- #end http_post
-
- @classmethod
- def http_delete(cls, id, obj, db_conn):
- return True, ''
- #end http_delete
-
- @classmethod
- def http_post_collection(cls, tenant_name, obj, db_conn):
- return True, ''
- #end http_post
-
- @classmethod
- def dbe_create_notification(cls, obj_ids, obj_dict):
- pass
- #end dbe_create_notification
-
- @classmethod
- def dbe_update_notification(cls, obj_ids):
- pass
- #end dbe_update_notification
-
- @classmethod
- def dbe_delete_notification(cls, obj_ids, obj_dict):
- pass
- #end dbe_delete_notification
-
-#end class CustomerAttachmentServerGen
-
-class ServiceApplianceSetServerGen(ServiceApplianceSet):
- generate_default_instance = True
-
- def __init__(self):
- pass
- #end __init__
-
- @classmethod
- def http_get(cls, id):
- return True, ''
- #end http_get
-
- @classmethod
- def http_put(cls, id, fq_name, obj, db_conn):
- return True, ''
- #end http_put
-
- @classmethod
- def http_post(cls, tenant_name, obj):
- return True, ''
- #end http_post
-
- @classmethod
- def http_delete(cls, id, obj, db_conn):
- return True, ''
- #end http_delete
-
- @classmethod
- def http_post_collection(cls, tenant_name, obj, db_conn):
- return True, ''
- #end http_post
-
- @classmethod
- def dbe_create_notification(cls, obj_ids, obj_dict):
- pass
- #end dbe_create_notification
-
- @classmethod
- def dbe_update_notification(cls, obj_ids):
- pass
- #end dbe_update_notification
-
- @classmethod
- def dbe_delete_notification(cls, obj_ids, obj_dict):
- pass
- #end dbe_delete_notification
-
-#end class ServiceApplianceSetServerGen
-
-class ConfigNodeServerGen(ConfigNode):
- generate_default_instance = True
-
- def __init__(self):
- pass
- #end __init__
-
- @classmethod
- def http_get(cls, id):
- return True, ''
- #end http_get
-
- @classmethod
- def http_put(cls, id, fq_name, obj, db_conn):
- return True, ''
- #end http_put
-
- @classmethod
- def http_post(cls, tenant_name, obj):
- return True, ''
- #end http_post
-
- @classmethod
- def http_delete(cls, id, obj, db_conn):
- return True, ''
- #end http_delete
-
- @classmethod
- def http_post_collection(cls, tenant_name, obj, db_conn):
- return True, ''
- #end http_post
-
- @classmethod
- def dbe_create_notification(cls, obj_ids, obj_dict):
- pass
- #end dbe_create_notification
-
- @classmethod
- def dbe_update_notification(cls, obj_ids):
- pass
- #end dbe_update_notification
-
- @classmethod
- def dbe_delete_notification(cls, obj_ids, obj_dict):
- pass
- #end dbe_delete_notification
-
-#end class ConfigNodeServerGen
-
-class QosQueueServerGen(QosQueue):
- generate_default_instance = True
-
- def __init__(self):
- pass
- #end __init__
-
- @classmethod
- def http_get(cls, id):
- return True, ''
- #end http_get
-
- @classmethod
- def http_put(cls, id, fq_name, obj, db_conn):
- return True, ''
- #end http_put
-
- @classmethod
- def http_post(cls, tenant_name, obj):
- return True, ''
- #end http_post
-
- @classmethod
- def http_delete(cls, id, obj, db_conn):
- return True, ''
- #end http_delete
-
- @classmethod
- def http_post_collection(cls, tenant_name, obj, db_conn):
- return True, ''
- #end http_post
-
- @classmethod
- def dbe_create_notification(cls, obj_ids, obj_dict):
- pass
- #end dbe_create_notification
-
- @classmethod
- def dbe_update_notification(cls, obj_ids):
- pass
- #end dbe_update_notification
-
- @classmethod
- def dbe_delete_notification(cls, obj_ids, obj_dict):
- pass
- #end dbe_delete_notification
-
-#end class QosQueueServerGen
-
-class VirtualMachineServerGen(VirtualMachine):
- generate_default_instance = True
-
- def __init__(self):
- pass
- #end __init__
-
- @classmethod
- def http_get(cls, id):
- return True, ''
- #end http_get
-
- @classmethod
- def http_put(cls, id, fq_name, obj, db_conn):
- return True, ''
- #end http_put
-
- @classmethod
- def http_post(cls, tenant_name, obj):
- return True, ''
- #end http_post
-
- @classmethod
- def http_delete(cls, id, obj, db_conn):
- return True, ''
- #end http_delete
-
- @classmethod
- def http_post_collection(cls, tenant_name, obj, db_conn):
- return True, ''
- #end http_post
-
- @classmethod
- def dbe_create_notification(cls, obj_ids, obj_dict):
- pass
- #end dbe_create_notification
-
- @classmethod
- def dbe_update_notification(cls, obj_ids):
- pass
- #end dbe_update_notification
-
- @classmethod
- def dbe_delete_notification(cls, obj_ids, obj_dict):
- pass
- #end dbe_delete_notification
-
-#end class VirtualMachineServerGen
-
-class InterfaceRouteTableServerGen(InterfaceRouteTable):
- generate_default_instance = True
-
- def __init__(self):
- pass
- #end __init__
-
- @classmethod
- def http_get(cls, id):
- return True, ''
- #end http_get
-
- @classmethod
- def http_put(cls, id, fq_name, obj, db_conn):
- return True, ''
- #end http_put
-
- @classmethod
- def http_post(cls, tenant_name, obj):
- return True, ''
- #end http_post
-
- @classmethod
- def http_delete(cls, id, obj, db_conn):
- return True, ''
- #end http_delete
-
- @classmethod
- def http_post_collection(cls, tenant_name, obj, db_conn):
- return True, ''
- #end http_post
-
- @classmethod
- def dbe_create_notification(cls, obj_ids, obj_dict):
- pass
- #end dbe_create_notification
-
- @classmethod
- def dbe_update_notification(cls, obj_ids):
- pass
- #end dbe_update_notification
-
- @classmethod
- def dbe_delete_notification(cls, obj_ids, obj_dict):
- pass
- #end dbe_delete_notification
-
-#end class InterfaceRouteTableServerGen
-
-class ServiceTemplateServerGen(ServiceTemplate):
- generate_default_instance = True
-
- def __init__(self):
- pass
- #end __init__
-
- @classmethod
- def http_get(cls, id):
- return True, ''
- #end http_get
-
- @classmethod
- def http_put(cls, id, fq_name, obj, db_conn):
- return True, ''
- #end http_put
-
- @classmethod
- def http_post(cls, tenant_name, obj):
- return True, ''
- #end http_post
-
- @classmethod
- def http_delete(cls, id, obj, db_conn):
- return True, ''
- #end http_delete
-
- @classmethod
- def http_post_collection(cls, tenant_name, obj, db_conn):
- return True, ''
- #end http_post
-
- @classmethod
- def dbe_create_notification(cls, obj_ids, obj_dict):
- pass
- #end dbe_create_notification
-
- @classmethod
- def dbe_update_notification(cls, obj_ids):
- pass
- #end dbe_update_notification
-
- @classmethod
- def dbe_delete_notification(cls, obj_ids, obj_dict):
- pass
- #end dbe_delete_notification
-
-#end class ServiceTemplateServerGen
-
-class VirtualIpServerGen(VirtualIp):
- generate_default_instance = True
-
- def __init__(self):
- pass
- #end __init__
-
- @classmethod
- def http_get(cls, id):
- return True, ''
- #end http_get
-
- @classmethod
- def http_put(cls, id, fq_name, obj, db_conn):
- return True, ''
- #end http_put
-
- @classmethod
- def http_post(cls, tenant_name, obj):
- return True, ''
- #end http_post
-
- @classmethod
- def http_delete(cls, id, obj, db_conn):
- return True, ''
- #end http_delete
-
- @classmethod
- def http_post_collection(cls, tenant_name, obj, db_conn):
- return True, ''
- #end http_post
-
- @classmethod
- def dbe_create_notification(cls, obj_ids, obj_dict):
- pass
- #end dbe_create_notification
-
- @classmethod
- def dbe_update_notification(cls, obj_ids):
- pass
- #end dbe_update_notification
-
- @classmethod
- def dbe_delete_notification(cls, obj_ids, obj_dict):
- pass
- #end dbe_delete_notification
-
-#end class VirtualIpServerGen
-
-class LoadbalancerMemberServerGen(LoadbalancerMember):
- generate_default_instance = True
-
- def __init__(self):
- pass
- #end __init__
-
- @classmethod
- def http_get(cls, id):
- return True, ''
- #end http_get
-
- @classmethod
- def http_put(cls, id, fq_name, obj, db_conn):
- return True, ''
- #end http_put
-
- @classmethod
- def http_post(cls, tenant_name, obj):
- return True, ''
- #end http_post
-
- @classmethod
- def http_delete(cls, id, obj, db_conn):
- return True, ''
- #end http_delete
-
- @classmethod
- def http_post_collection(cls, tenant_name, obj, db_conn):
- return True, ''
- #end http_post
-
- @classmethod
- def dbe_create_notification(cls, obj_ids, obj_dict):
- pass
- #end dbe_create_notification
-
- @classmethod
- def dbe_update_notification(cls, obj_ids):
- pass
- #end dbe_update_notification
-
- @classmethod
- def dbe_delete_notification(cls, obj_ids, obj_dict):
- pass
- #end dbe_delete_notification
-
-#end class LoadbalancerMemberServerGen
-
-class SecurityGroupServerGen(SecurityGroup):
- generate_default_instance = True
-
- def __init__(self):
- pass
- #end __init__
-
- @classmethod
- def http_get(cls, id):
- return True, ''
- #end http_get
-
- @classmethod
- def http_put(cls, id, fq_name, obj, db_conn):
- return True, ''
- #end http_put
-
- @classmethod
- def http_post(cls, tenant_name, obj):
- return True, ''
- #end http_post
-
- @classmethod
- def http_delete(cls, id, obj, db_conn):
- return True, ''
- #end http_delete
-
- @classmethod
- def http_post_collection(cls, tenant_name, obj, db_conn):
- return True, ''
- #end http_post
-
- @classmethod
- def dbe_create_notification(cls, obj_ids, obj_dict):
- pass
- #end dbe_create_notification
-
- @classmethod
- def dbe_update_notification(cls, obj_ids):
- pass
- #end dbe_update_notification
-
- @classmethod
- def dbe_delete_notification(cls, obj_ids, obj_dict):
- pass
- #end dbe_delete_notification
-
-#end class SecurityGroupServerGen
-
-class ProviderAttachmentServerGen(ProviderAttachment):
- generate_default_instance = True
-
- def __init__(self):
- pass
- #end __init__
-
- @classmethod
- def http_get(cls, id):
- return True, ''
- #end http_get
-
- @classmethod
- def http_put(cls, id, fq_name, obj, db_conn):
- return True, ''
- #end http_put
-
- @classmethod
- def http_post(cls, tenant_name, obj):
- return True, ''
- #end http_post
-
- @classmethod
- def http_delete(cls, id, obj, db_conn):
- return True, ''
- #end http_delete
-
- @classmethod
- def http_post_collection(cls, tenant_name, obj, db_conn):
- return True, ''
- #end http_post
-
- @classmethod
- def dbe_create_notification(cls, obj_ids, obj_dict):
- pass
- #end dbe_create_notification
-
- @classmethod
- def dbe_update_notification(cls, obj_ids):
- pass
- #end dbe_update_notification
-
- @classmethod
- def dbe_delete_notification(cls, obj_ids, obj_dict):
- pass
- #end dbe_delete_notification
-
-#end class ProviderAttachmentServerGen
-
-class VirtualMachineInterfaceServerGen(VirtualMachineInterface):
- generate_default_instance = True
-
- def __init__(self):
- pass
- #end __init__
-
- @classmethod
- def http_get(cls, id):
- return True, ''
- #end http_get
-
- @classmethod
- def http_put(cls, id, fq_name, obj, db_conn):
- return True, ''
- #end http_put
-
- @classmethod
- def http_post(cls, tenant_name, obj):
- return True, ''
- #end http_post
-
- @classmethod
- def http_delete(cls, id, obj, db_conn):
- return True, ''
- #end http_delete
-
- @classmethod
- def http_post_collection(cls, tenant_name, obj, db_conn):
- return True, ''
- #end http_post
-
- @classmethod
- def dbe_create_notification(cls, obj_ids, obj_dict):
- pass
- #end dbe_create_notification
-
- @classmethod
- def dbe_update_notification(cls, obj_ids):
- pass
- #end dbe_update_notification
-
- @classmethod
- def dbe_delete_notification(cls, obj_ids, obj_dict):
- pass
- #end dbe_delete_notification
-
-#end class VirtualMachineInterfaceServerGen
-
-class LoadbalancerHealthmonitorServerGen(LoadbalancerHealthmonitor):
- generate_default_instance = True
-
- def __init__(self):
- pass
- #end __init__
-
- @classmethod
- def http_get(cls, id):
- return True, ''
- #end http_get
-
- @classmethod
- def http_put(cls, id, fq_name, obj, db_conn):
- return True, ''
- #end http_put
-
- @classmethod
- def http_post(cls, tenant_name, obj):
- return True, ''
- #end http_post
-
- @classmethod
- def http_delete(cls, id, obj, db_conn):
- return True, ''
- #end http_delete
-
- @classmethod
- def http_post_collection(cls, tenant_name, obj, db_conn):
- return True, ''
- #end http_post
-
- @classmethod
- def dbe_create_notification(cls, obj_ids, obj_dict):
- pass
- #end dbe_create_notification
-
- @classmethod
- def dbe_update_notification(cls, obj_ids):
- pass
- #end dbe_update_notification
-
- @classmethod
- def dbe_delete_notification(cls, obj_ids, obj_dict):
- pass
- #end dbe_delete_notification
-
-#end class LoadbalancerHealthmonitorServerGen
-
-class VirtualNetworkServerGen(VirtualNetwork):
- generate_default_instance = True
-
- def __init__(self):
- pass
- #end __init__
-
- @classmethod
- def http_get(cls, id):
- return True, ''
- #end http_get
-
- @classmethod
- def http_put(cls, id, fq_name, obj, db_conn):
- return True, ''
- #end http_put
-
- @classmethod
- def http_post(cls, tenant_name, obj):
- return True, ''
- #end http_post
-
- @classmethod
- def http_delete(cls, id, obj, db_conn):
- return True, ''
- #end http_delete
-
- @classmethod
- def http_post_collection(cls, tenant_name, obj, db_conn):
- return True, ''
- #end http_post
-
- @classmethod
- def dbe_create_notification(cls, obj_ids, obj_dict):
- pass
- #end dbe_create_notification
-
- @classmethod
- def dbe_update_notification(cls, obj_ids):
- pass
- #end dbe_update_notification
-
- @classmethod
- def dbe_delete_notification(cls, obj_ids, obj_dict):
- pass
- #end dbe_delete_notification
-
-#end class VirtualNetworkServerGen
-
-class ProjectServerGen(Project):
- generate_default_instance = True
-
- def __init__(self):
- pass
- #end __init__
-
- @classmethod
- def http_get(cls, id):
- return True, ''
- #end http_get
-
- @classmethod
- def http_put(cls, id, fq_name, obj, db_conn):
- return True, ''
- #end http_put
-
- @classmethod
- def http_post(cls, tenant_name, obj):
- return True, ''
- #end http_post
-
- @classmethod
- def http_delete(cls, id, obj, db_conn):
- return True, ''
- #end http_delete
-
- @classmethod
- def http_post_collection(cls, tenant_name, obj, db_conn):
- return True, ''
- #end http_post
-
- @classmethod
- def dbe_create_notification(cls, obj_ids, obj_dict):
- pass
- #end dbe_create_notification
-
- @classmethod
- def dbe_update_notification(cls, obj_ids):
- pass
- #end dbe_update_notification
-
- @classmethod
- def dbe_delete_notification(cls, obj_ids, obj_dict):
- pass
- #end dbe_delete_notification
-
-#end class ProjectServerGen
-
-class QosForwardingClassServerGen(QosForwardingClass):
- generate_default_instance = True
-
- def __init__(self):
- pass
- #end __init__
-
- @classmethod
- def http_get(cls, id):
- return True, ''
- #end http_get
-
- @classmethod
- def http_put(cls, id, fq_name, obj, db_conn):
- return True, ''
- #end http_put
-
- @classmethod
- def http_post(cls, tenant_name, obj):
- return True, ''
- #end http_post
-
- @classmethod
- def http_delete(cls, id, obj, db_conn):
- return True, ''
- #end http_delete
-
- @classmethod
- def http_post_collection(cls, tenant_name, obj, db_conn):
- return True, ''
- #end http_post
-
- @classmethod
- def dbe_create_notification(cls, obj_ids, obj_dict):
- pass
- #end dbe_create_notification
-
- @classmethod
- def dbe_update_notification(cls, obj_ids):
- pass
- #end dbe_update_notification
-
- @classmethod
- def dbe_delete_notification(cls, obj_ids, obj_dict):
- pass
- #end dbe_delete_notification
-
-#end class QosForwardingClassServerGen
-
-class DatabaseNodeServerGen(DatabaseNode):
- generate_default_instance = True
-
- def __init__(self):
- pass
- #end __init__
-
- @classmethod
- def http_get(cls, id):
- return True, ''
- #end http_get
-
- @classmethod
- def http_put(cls, id, fq_name, obj, db_conn):
- return True, ''
- #end http_put
-
- @classmethod
- def http_post(cls, tenant_name, obj):
- return True, ''
- #end http_post
-
- @classmethod
- def http_delete(cls, id, obj, db_conn):
- return True, ''
- #end http_delete
-
- @classmethod
- def http_post_collection(cls, tenant_name, obj, db_conn):
- return True, ''
- #end http_post
-
- @classmethod
- def dbe_create_notification(cls, obj_ids, obj_dict):
- pass
- #end dbe_create_notification
-
- @classmethod
- def dbe_update_notification(cls, obj_ids):
- pass
- #end dbe_update_notification
-
- @classmethod
- def dbe_delete_notification(cls, obj_ids, obj_dict):
- pass
- #end dbe_delete_notification
-
-#end class DatabaseNodeServerGen
-
-class RoutingInstanceServerGen(RoutingInstance):
- generate_default_instance = True
-
- def __init__(self):
- pass
- #end __init__
-
- @classmethod
- def http_get(cls, id):
- return True, ''
- #end http_get
-
- @classmethod
- def http_put(cls, id, fq_name, obj, db_conn):
- return True, ''
- #end http_put
-
- @classmethod
- def http_post(cls, tenant_name, obj):
- return True, ''
- #end http_post
-
- @classmethod
- def http_delete(cls, id, obj, db_conn):
- return True, ''
- #end http_delete
-
- @classmethod
- def http_post_collection(cls, tenant_name, obj, db_conn):
- return True, ''
- #end http_post
-
- @classmethod
- def dbe_create_notification(cls, obj_ids, obj_dict):
- pass
- #end dbe_create_notification
-
- @classmethod
- def dbe_update_notification(cls, obj_ids):
- pass
- #end dbe_update_notification
-
- @classmethod
- def dbe_delete_notification(cls, obj_ids, obj_dict):
- pass
- #end dbe_delete_notification
-
-#end class RoutingInstanceServerGen
-
-class NetworkIpamServerGen(NetworkIpam):
- generate_default_instance = True
-
- def __init__(self):
- pass
- #end __init__
-
- @classmethod
- def http_get(cls, id):
- return True, ''
- #end http_get
-
- @classmethod
- def http_put(cls, id, fq_name, obj, db_conn):
- return True, ''
- #end http_put
-
- @classmethod
- def http_post(cls, tenant_name, obj):
- return True, ''
- #end http_post
-
- @classmethod
- def http_delete(cls, id, obj, db_conn):
- return True, ''
- #end http_delete
-
- @classmethod
- def http_post_collection(cls, tenant_name, obj, db_conn):
- return True, ''
- #end http_post
-
- @classmethod
- def dbe_create_notification(cls, obj_ids, obj_dict):
- pass
- #end dbe_create_notification
-
- @classmethod
- def dbe_update_notification(cls, obj_ids):
- pass
- #end dbe_update_notification
-
- @classmethod
- def dbe_delete_notification(cls, obj_ids, obj_dict):
- pass
- #end dbe_delete_notification
-
-#end class NetworkIpamServerGen
-
-class LogicalRouterServerGen(LogicalRouter):
- generate_default_instance = True
-
- def __init__(self):
- pass
- #end __init__
-
- @classmethod
- def http_get(cls, id):
- return True, ''
- #end http_get
-
- @classmethod
- def http_put(cls, id, fq_name, obj, db_conn):
- return True, ''
- #end http_put
-
- @classmethod
- def http_post(cls, tenant_name, obj):
- return True, ''
- #end http_post
-
- @classmethod
- def http_delete(cls, id, obj, db_conn):
- return True, ''
- #end http_delete
-
- @classmethod
- def http_post_collection(cls, tenant_name, obj, db_conn):
- return True, ''
- #end http_post
-
- @classmethod
- def dbe_create_notification(cls, obj_ids, obj_dict):
- pass
- #end dbe_create_notification
-
- @classmethod
- def dbe_update_notification(cls, obj_ids):
- pass
- #end dbe_update_notification
-
- @classmethod
- def dbe_delete_notification(cls, obj_ids, obj_dict):
- pass
- #end dbe_delete_notification
-
-#end class LogicalRouterServerGen
-
diff --git a/Testcases/vnc_api/gen/resource_server.pyc b/Testcases/vnc_api/gen/resource_server.pyc
deleted file mode 100644
index 1e99a6e..0000000
--- a/Testcases/vnc_api/gen/resource_server.pyc
+++ /dev/null
Binary files differ
diff --git a/Testcases/vnc_api/gen/resource_test.py b/Testcases/vnc_api/gen/resource_test.py
deleted file mode 100644
index 032586a..0000000
--- a/Testcases/vnc_api/gen/resource_test.py
+++ /dev/null
@@ -1,5173 +0,0 @@
-'''
-This module defines the fixture classes for all config elements
-'''
-
-# AUTO-GENERATED file from IFMapApiGenerator. Do Not Edit!
-
-import cfixture
-from vnc_api import vnc_api
-from cfgm_common.exceptions import *
-
-from generatedssuper import GeneratedsSuper
-
-class DomainTestFixtureGen(cfixture.ContrailFixture):
- """
- Fixture for :class:`.Domain`
- """
- def __init__(self, conn_drv, domain_name=None, parent_fixt=None, auto_prop_val=False, domain_limits=None, api_access_list=None, id_perms=None, display_name=None):
- '''
- Create DomainTestFixtureGen object
-
- constructor
-
- Args:
- conn_drv (:class:`ConnectionDriver`): connection driver (eg. :class:`vnc_api.vnc_api.VncApi`, :class:`novaclient.client.Client`, etc)
-
- Kwargs:
- domain_name (str): Name of domain
- parent_fixt (:class:`.ConfigRootTestFixtureGen`): Parent fixture
- domain_limits (instance): instance of :class:`DomainLimitsType`
- api_access_list (instance): instance of :class:`ApiAccessListType`
- id_perms (instance): instance of :class:`IdPermsType`
- display_name (instance): instance of :class:`xsd:string`
-
- '''
- super(DomainTestFixtureGen, self).__init__()
- self._conn_drv = conn_drv
- if not domain_name:
- self._name = 'default-domain'
- else:
- self._name = domain_name
- self._obj = None
- self._parent_fixt = parent_fixt
- self._auto_prop_val = auto_prop_val
- self.domain_limits = domain_limits
- self.api_access_list = api_access_list
- self.id_perms = id_perms
- self.display_name = display_name
- #end __init__
-
- def _update_links (self, update_server):
- return None
- #end _update_links
-
-
- def populate (self):
- self._obj.set_domain_limits(self.domain_limits or vnc_api.gen.resource_xsd.DomainLimitsType.populate())
- self._obj.set_api_access_list(self.api_access_list or vnc_api.gen.resource_xsd.ApiAccessListType.populate())
- self._obj.set_id_perms(self.id_perms or vnc_api.gen.resource_xsd.IdPermsType.populate())
- self._obj.set_display_name(self.display_name or GeneratedsSuper.populate_string("display_name"))
- #end populate
-
- def setUp(self):
- super(DomainTestFixtureGen, self).setUp()
- # child of config-root
- self._obj = vnc_api.Domain(self._name)
- try:
- self._obj = self._conn_drv.domain_read (fq_name=self._obj.get_fq_name())
- self._update_links (update_server=True)
- except NoIdError:
- self._update_links (update_server=False)
- if self._auto_prop_val:
- self.populate ()
- else:
- self._obj.domain_limits = self.domain_limits
- self._obj.api_access_list = self.api_access_list
- self._obj.id_perms = self.id_perms
- self._obj.display_name = self.display_name
- self._conn_drv.domain_create(self._obj)
- # read back for server allocated values
- self._obj = self._conn_drv.domain_read(id = self._obj.uuid)
- #end setUp
-
- def cleanUp(self):
- if self._obj.get_projects() or self._obj.get_namespaces() or self._obj.get_service_templates() or self._obj.get_virtual_DNSs():
- return
- parent_fixt = getattr(self, '_parent_fixt', None)
- if parent_fixt:
- # non config-root child
- parent_obj = self._parent_fixt.getObj()
- # remove child from parent obj
- for child_obj in parent_obj.get_domains():
- if type(child_obj) == dict:
- child_uuid = child_obj['uuid']
- else:
- child_uuid = child_obj.uuid
- if child_uuid == self._obj.uuid:
- parent_obj.domains.remove(child_obj)
- break
-
- self._conn_drv.domain_delete(id = self._obj.uuid)
- #end cleanUp
-
- def getObj(self):
- return self._obj
- #end getObj
-
-#end class DomainTestFixtureGen
-
-class GlobalVrouterConfigTestFixtureGen(cfixture.ContrailFixture):
- """
- Fixture for :class:`.GlobalVrouterConfig`
- """
- def __init__(self, conn_drv, global_vrouter_config_name=None, parent_fixt=None, auto_prop_val=False, linklocal_services=None, encapsulation_priorities=None, vxlan_network_identifier_mode=None, id_perms=None, display_name=None):
- '''
- Create GlobalVrouterConfigTestFixtureGen object
-
- constructor
-
- Args:
- conn_drv (:class:`ConnectionDriver`): connection driver (eg. :class:`vnc_api.vnc_api.VncApi`, :class:`novaclient.client.Client`, etc)
-
- Kwargs:
- global_vrouter_config_name (str): Name of global_vrouter_config
- parent_fixt (:class:`.GlobalSystemConfigTestFixtureGen`): Parent fixture
- linklocal_services (instance): instance of :class:`LinklocalServicesTypes`
- encapsulation_priorities (instance): instance of :class:`EncapsulationPrioritiesType`
- vxlan_network_identifier_mode (instance): instance of :class:`xsd:string`
- id_perms (instance): instance of :class:`IdPermsType`
- display_name (instance): instance of :class:`xsd:string`
-
- '''
- super(GlobalVrouterConfigTestFixtureGen, self).__init__()
- self._conn_drv = conn_drv
- if not global_vrouter_config_name:
- self._name = 'default-global-vrouter-config'
- else:
- self._name = global_vrouter_config_name
- self._obj = None
- self._parent_fixt = parent_fixt
- self._auto_prop_val = auto_prop_val
- self.linklocal_services = linklocal_services
- self.encapsulation_priorities = encapsulation_priorities
- self.vxlan_network_identifier_mode = vxlan_network_identifier_mode
- self.id_perms = id_perms
- self.display_name = display_name
- #end __init__
-
- def _update_links (self, update_server):
- return None
- #end _update_links
-
-
- def populate (self):
- self._obj.set_linklocal_services(self.linklocal_services or vnc_api.gen.resource_xsd.LinklocalServicesTypes.populate())
- self._obj.set_encapsulation_priorities(self.encapsulation_priorities or vnc_api.gen.resource_xsd.EncapsulationPrioritiesType.populate())
- self._obj.set_vxlan_network_identifier_mode(self.vxlan_network_identifier_mode or GeneratedsSuper.populate_string("vxlan_network_identifier_mode"))
- self._obj.set_id_perms(self.id_perms or vnc_api.gen.resource_xsd.IdPermsType.populate())
- self._obj.set_display_name(self.display_name or GeneratedsSuper.populate_string("display_name"))
- #end populate
-
- def setUp(self):
- super(GlobalVrouterConfigTestFixtureGen, self).setUp()
- if not self._parent_fixt:
- self._parent_fixt = self.useFixture(GlobalSystemConfigTestFixtureGen(self._conn_drv, 'default-global-system-config'))
-
- self._obj = vnc_api.GlobalVrouterConfig(self._name, self._parent_fixt.getObj ())
- try:
- self._obj = self._conn_drv.global_vrouter_config_read (fq_name=self._obj.get_fq_name())
- self._update_links (update_server=True)
- except NoIdError:
- self._update_links (update_server=False)
- if self._auto_prop_val:
- self.populate ()
- else:
- self._obj.linklocal_services = self.linklocal_services
- self._obj.encapsulation_priorities = self.encapsulation_priorities
- self._obj.vxlan_network_identifier_mode = self.vxlan_network_identifier_mode
- self._obj.id_perms = self.id_perms
- self._obj.display_name = self.display_name
- self._conn_drv.global_vrouter_config_create(self._obj)
- # read back for server allocated values
- self._obj = self._conn_drv.global_vrouter_config_read(id = self._obj.uuid)
- #end setUp
-
- def cleanUp(self):
- parent_fixt = getattr(self, '_parent_fixt', None)
- if parent_fixt:
- # non config-root child
- parent_obj = self._parent_fixt.getObj()
- # remove child from parent obj
- for child_obj in parent_obj.get_global_vrouter_configs():
- if type(child_obj) == dict:
- child_uuid = child_obj['uuid']
- else:
- child_uuid = child_obj.uuid
- if child_uuid == self._obj.uuid:
- parent_obj.global_vrouter_configs.remove(child_obj)
- break
-
- self._conn_drv.global_vrouter_config_delete(id = self._obj.uuid)
- #end cleanUp
-
- def getObj(self):
- return self._obj
- #end getObj
-
-#end class GlobalVrouterConfigTestFixtureGen
-
-class InstanceIpTestFixtureGen(cfixture.ContrailFixture):
- """
- Fixture for :class:`.InstanceIp`
- """
- def __init__(self, conn_drv, instance_ip_name=None, auto_prop_val=False, virtual_network_refs = None, virtual_machine_interface_refs = None, instance_ip_address=None, instance_ip_family=None, instance_ip_mode=None, subnet_uuid=None, id_perms=None, display_name=None):
- '''
- Create InstanceIpTestFixtureGen object
-
- constructor
-
- Args:
- conn_drv (:class:`ConnectionDriver`): connection driver (eg. :class:`vnc_api.vnc_api.VncApi`, :class:`novaclient.client.Client`, etc)
-
- Kwargs:
- %s_name (str): Name of %s
- virtual_network (list): list of :class:`VirtualNetwork` type
- virtual_machine_interface (list): list of :class:`VirtualMachineInterface` type
- instance_ip_address (instance): instance of :class:`xsd:string`
- instance_ip_family (instance): instance of :class:`xsd:string`
- instance_ip_mode (instance): instance of :class:`xsd:string`
- subnet_uuid (instance): instance of :class:`xsd:string`
- id_perms (instance): instance of :class:`IdPermsType`
- display_name (instance): instance of :class:`xsd:string`
-
- '''
- super(InstanceIpTestFixtureGen, self).__init__()
- self._conn_drv = conn_drv
- if not instance_ip_name:
- self._name = 'default-instance-ip'
- else:
- self._name = instance_ip_name
- self._obj = None
- self._auto_prop_val = auto_prop_val
- if virtual_network_refs:
- for ln in virtual_network_refs:
- self.add_virtual_network (ln)
- if virtual_machine_interface_refs:
- for ln in virtual_machine_interface_refs:
- self.add_virtual_machine_interface (ln)
- self.instance_ip_address = instance_ip_address
- self.instance_ip_family = instance_ip_family
- self.instance_ip_mode = instance_ip_mode
- self.subnet_uuid = subnet_uuid
- self.id_perms = id_perms
- self.display_name = display_name
- #end __init__
-
- def _update_links (self, update_server):
- for ln in self.get_virtual_networks ():
- self.add_virtual_network (ln.fixture (), update_server = update_server, add_link = False)
- for ln in self.get_virtual_machine_interfaces ():
- self.add_virtual_machine_interface (ln.fixture (), update_server = update_server, add_link = False)
- return None
- #end _update_links
-
- def add_virtual_network (self, lo, update_server = True, add_link = True):
- '''
- add :class:`VirtualNetwork` link to :class:`InstanceIp`
- Args:
- lo (:class:`VirtualNetwork`): obj to link
- '''
- if self._obj:
- self._obj.add_virtual_network (lo)
- if update_server:
- self._conn_drv.instance_ip_update (self._obj)
-
- if add_link:
- self.add_link('virtual_network', cfixture.ConrtailLink('virtual_network', 'instance_ip', 'virtual_network', ['ref'], lo))
- #end add_virtual_network_link
-
- def get_virtual_networks (self):
- return self.get_links ('virtual_network')
- #end get_virtual_networks
- def add_virtual_machine_interface (self, lo, update_server = True, add_link = True):
- '''
- add :class:`VirtualMachineInterface` link to :class:`InstanceIp`
- Args:
- lo (:class:`VirtualMachineInterface`): obj to link
- '''
- if self._obj:
- self._obj.add_virtual_machine_interface (lo)
- if update_server:
- self._conn_drv.instance_ip_update (self._obj)
-
- if add_link:
- self.add_link('virtual_machine_interface', cfixture.ConrtailLink('virtual_machine_interface', 'instance_ip', 'virtual_machine_interface', ['ref'], lo))
- #end add_virtual_machine_interface_link
-
- def get_virtual_machine_interfaces (self):
- return self.get_links ('virtual_machine_interface')
- #end get_virtual_machine_interfaces
-
- def populate (self):
- self._obj.set_instance_ip_address(self.instance_ip_address or GeneratedsSuper.populate_string("instance_ip_address"))
- self._obj.set_instance_ip_family(self.instance_ip_family or GeneratedsSuper.populate_string("instance_ip_family"))
- self._obj.set_instance_ip_mode(self.instance_ip_mode or GeneratedsSuper.populate_string("instance_ip_mode"))
- self._obj.set_subnet_uuid(self.subnet_uuid or GeneratedsSuper.populate_string("subnet_uuid"))
- self._obj.set_id_perms(self.id_perms or vnc_api.gen.resource_xsd.IdPermsType.populate())
- self._obj.set_display_name(self.display_name or GeneratedsSuper.populate_string("display_name"))
- #end populate
-
- def setUp(self):
- super(InstanceIpTestFixtureGen, self).setUp()
- self._obj = vnc_api.InstanceIp(self._name)
- try:
- self._obj = self._conn_drv.instance_ip_read (fq_name=self._obj.get_fq_name())
- self._update_links (update_server=True)
- except NoIdError:
- self._update_links (update_server=False)
- if self._auto_prop_val:
- self.populate ()
- else:
- self._obj.instance_ip_address = self.instance_ip_address
- self._obj.instance_ip_family = self.instance_ip_family
- self._obj.instance_ip_mode = self.instance_ip_mode
- self._obj.subnet_uuid = self.subnet_uuid
- self._obj.id_perms = self.id_perms
- self._obj.display_name = self.display_name
- self._conn_drv.instance_ip_create(self._obj)
- # read back for server allocated values
- self._obj = self._conn_drv.instance_ip_read(id = self._obj.uuid)
- #end setUp
-
- def cleanUp(self):
- self._conn_drv.instance_ip_delete(id = self._obj.uuid)
- #end cleanUp
-
- def getObj(self):
- return self._obj
- #end getObj
-
-#end class InstanceIpTestFixtureGen
-
-class NetworkPolicyTestFixtureGen(cfixture.ContrailFixture):
- """
- Fixture for :class:`.NetworkPolicy`
- """
- def __init__(self, conn_drv, network_policy_name=None, parent_fixt=None, auto_prop_val=False, network_policy_entries=None, id_perms=None, display_name=None):
- '''
- Create NetworkPolicyTestFixtureGen object
-
- constructor
-
- Args:
- conn_drv (:class:`ConnectionDriver`): connection driver (eg. :class:`vnc_api.vnc_api.VncApi`, :class:`novaclient.client.Client`, etc)
-
- Kwargs:
- network_policy_name (str): Name of network_policy
- parent_fixt (:class:`.ProjectTestFixtureGen`): Parent fixture
- network_policy_entries (instance): instance of :class:`PolicyEntriesType`
- id_perms (instance): instance of :class:`IdPermsType`
- display_name (instance): instance of :class:`xsd:string`
-
- '''
- super(NetworkPolicyTestFixtureGen, self).__init__()
- self._conn_drv = conn_drv
- if not network_policy_name:
- self._name = 'default-network-policy'
- else:
- self._name = network_policy_name
- self._obj = None
- self._parent_fixt = parent_fixt
- self._auto_prop_val = auto_prop_val
- self.network_policy_entries = network_policy_entries
- self.id_perms = id_perms
- self.display_name = display_name
- #end __init__
-
- def _update_links (self, update_server):
- return None
- #end _update_links
-
-
- def populate (self):
- self._obj.set_network_policy_entries(self.network_policy_entries or vnc_api.gen.resource_xsd.PolicyEntriesType.populate())
- self._obj.set_id_perms(self.id_perms or vnc_api.gen.resource_xsd.IdPermsType.populate())
- self._obj.set_display_name(self.display_name or GeneratedsSuper.populate_string("display_name"))
- #end populate
-
- def setUp(self):
- super(NetworkPolicyTestFixtureGen, self).setUp()
- if not self._parent_fixt:
- self._parent_fixt = self.useFixture(ProjectTestFixtureGen(self._conn_drv, 'default-project'))
-
- self._obj = vnc_api.NetworkPolicy(self._name, self._parent_fixt.getObj ())
- try:
- self._obj = self._conn_drv.network_policy_read (fq_name=self._obj.get_fq_name())
- self._update_links (update_server=True)
- except NoIdError:
- self._update_links (update_server=False)
- if self._auto_prop_val:
- self.populate ()
- else:
- self._obj.network_policy_entries = self.network_policy_entries
- self._obj.id_perms = self.id_perms
- self._obj.display_name = self.display_name
- self._conn_drv.network_policy_create(self._obj)
- # read back for server allocated values
- self._obj = self._conn_drv.network_policy_read(id = self._obj.uuid)
- #end setUp
-
- def cleanUp(self):
- if self._obj.get_virtual_network_back_refs():
- return
- parent_fixt = getattr(self, '_parent_fixt', None)
- if parent_fixt:
- # non config-root child
- parent_obj = self._parent_fixt.getObj()
- # remove child from parent obj
- for child_obj in parent_obj.get_network_policys():
- if type(child_obj) == dict:
- child_uuid = child_obj['uuid']
- else:
- child_uuid = child_obj.uuid
- if child_uuid == self._obj.uuid:
- parent_obj.network_policys.remove(child_obj)
- break
-
- self._conn_drv.network_policy_delete(id = self._obj.uuid)
- #end cleanUp
-
- def getObj(self):
- return self._obj
- #end getObj
-
-#end class NetworkPolicyTestFixtureGen
-
-class LoadbalancerPoolTestFixtureGen(cfixture.ContrailFixture):
- """
- Fixture for :class:`.LoadbalancerPool`
- """
- def __init__(self, conn_drv, loadbalancer_pool_name=None, parent_fixt=None, auto_prop_val=False, service_instance_refs = None, virtual_machine_interface_refs = None, service_appliance_set_refs = None, loadbalancer_healthmonitor_refs = None, loadbalancer_pool_properties=None, loadbalancer_pool_provider=None, id_perms=None, display_name=None):
- '''
- Create LoadbalancerPoolTestFixtureGen object
-
- constructor
-
- Args:
- conn_drv (:class:`ConnectionDriver`): connection driver (eg. :class:`vnc_api.vnc_api.VncApi`, :class:`novaclient.client.Client`, etc)
-
- Kwargs:
- loadbalancer_pool_name (str): Name of loadbalancer_pool
- parent_fixt (:class:`.ProjectTestFixtureGen`): Parent fixture
- service_instance (list): list of :class:`ServiceInstance` type
- virtual_machine_interface (list): list of :class:`VirtualMachineInterface` type
- service_appliance_set (list): list of :class:`ServiceApplianceSet` type
- loadbalancer_healthmonitor (list): list of :class:`LoadbalancerHealthmonitor` type
- loadbalancer_pool_properties (instance): instance of :class:`LoadbalancerPoolType`
- loadbalancer_pool_provider (instance): instance of :class:`xsd:string`
- id_perms (instance): instance of :class:`IdPermsType`
- display_name (instance): instance of :class:`xsd:string`
-
- '''
- super(LoadbalancerPoolTestFixtureGen, self).__init__()
- self._conn_drv = conn_drv
- if not loadbalancer_pool_name:
- self._name = 'default-loadbalancer-pool'
- else:
- self._name = loadbalancer_pool_name
- self._obj = None
- self._parent_fixt = parent_fixt
- self._auto_prop_val = auto_prop_val
- if service_instance_refs:
- for ln in service_instance_refs:
- self.add_service_instance (ln)
- if virtual_machine_interface_refs:
- for ln in virtual_machine_interface_refs:
- self.add_virtual_machine_interface (ln)
- if service_appliance_set_refs:
- for ln in service_appliance_set_refs:
- self.add_service_appliance_set (ln)
- if loadbalancer_healthmonitor_refs:
- for ln in loadbalancer_healthmonitor_refs:
- self.add_loadbalancer_healthmonitor (ln)
- self.loadbalancer_pool_properties = loadbalancer_pool_properties
- self.loadbalancer_pool_provider = loadbalancer_pool_provider
- self.id_perms = id_perms
- self.display_name = display_name
- #end __init__
-
- def _update_links (self, update_server):
- for ln in self.get_service_instances ():
- self.add_service_instance (ln.fixture (), update_server = update_server, add_link = False)
- for ln in self.get_virtual_machine_interfaces ():
- self.add_virtual_machine_interface (ln.fixture (), update_server = update_server, add_link = False)
- for ln in self.get_service_appliance_sets ():
- self.add_service_appliance_set (ln.fixture (), update_server = update_server, add_link = False)
- for ln in self.get_loadbalancer_healthmonitors ():
- self.add_loadbalancer_healthmonitor (ln.fixture (), update_server = update_server, add_link = False)
- return None
- #end _update_links
-
- def add_service_instance (self, lo, update_server = True, add_link = True):
- '''
- add :class:`ServiceInstance` link to :class:`LoadbalancerPool`
- Args:
- lo (:class:`ServiceInstance`): obj to link
- '''
- if self._obj:
- self._obj.add_service_instance (lo)
- if update_server:
- self._conn_drv.loadbalancer_pool_update (self._obj)
-
- if add_link:
- self.add_link('service_instance', cfixture.ConrtailLink('service_instance', 'loadbalancer_pool', 'service_instance', ['ref'], lo))
- #end add_service_instance_link
-
- def get_service_instances (self):
- return self.get_links ('service_instance')
- #end get_service_instances
- def add_virtual_machine_interface (self, lo, update_server = True, add_link = True):
- '''
- add :class:`VirtualMachineInterface` link to :class:`LoadbalancerPool`
- Args:
- lo (:class:`VirtualMachineInterface`): obj to link
- '''
- if self._obj:
- self._obj.add_virtual_machine_interface (lo)
- if update_server:
- self._conn_drv.loadbalancer_pool_update (self._obj)
-
- if add_link:
- self.add_link('virtual_machine_interface', cfixture.ConrtailLink('virtual_machine_interface', 'loadbalancer_pool', 'virtual_machine_interface', ['ref'], lo))
- #end add_virtual_machine_interface_link
-
- def get_virtual_machine_interfaces (self):
- return self.get_links ('virtual_machine_interface')
- #end get_virtual_machine_interfaces
- def add_service_appliance_set (self, lo, update_server = True, add_link = True):
- '''
- add :class:`ServiceApplianceSet` link to :class:`LoadbalancerPool`
- Args:
- lo (:class:`ServiceApplianceSet`): obj to link
- '''
- if self._obj:
- self._obj.add_service_appliance_set (lo)
- if update_server:
- self._conn_drv.loadbalancer_pool_update (self._obj)
-
- if add_link:
- self.add_link('service_appliance_set', cfixture.ConrtailLink('service_appliance_set', 'loadbalancer_pool', 'service_appliance_set', ['ref'], lo))
- #end add_service_appliance_set_link
-
- def get_service_appliance_sets (self):
- return self.get_links ('service_appliance_set')
- #end get_service_appliance_sets
- def add_loadbalancer_healthmonitor (self, lo, update_server = True, add_link = True):
- '''
- add :class:`LoadbalancerHealthmonitor` link to :class:`LoadbalancerPool`
- Args:
- lo (:class:`LoadbalancerHealthmonitor`): obj to link
- '''
- if self._obj:
- self._obj.add_loadbalancer_healthmonitor (lo)
- if update_server:
- self._conn_drv.loadbalancer_pool_update (self._obj)
-
- if add_link:
- self.add_link('loadbalancer_healthmonitor', cfixture.ConrtailLink('loadbalancer_healthmonitor', 'loadbalancer_pool', 'loadbalancer_healthmonitor', ['ref'], lo))
- #end add_loadbalancer_healthmonitor_link
-
- def get_loadbalancer_healthmonitors (self):
- return self.get_links ('loadbalancer_healthmonitor')
- #end get_loadbalancer_healthmonitors
-
- def populate (self):
- self._obj.set_loadbalancer_pool_properties(self.loadbalancer_pool_properties or vnc_api.gen.resource_xsd.LoadbalancerPoolType.populate())
- self._obj.set_loadbalancer_pool_provider(self.loadbalancer_pool_provider or GeneratedsSuper.populate_string("loadbalancer_pool_provider"))
- self._obj.set_id_perms(self.id_perms or vnc_api.gen.resource_xsd.IdPermsType.populate())
- self._obj.set_display_name(self.display_name or GeneratedsSuper.populate_string("display_name"))
- #end populate
-
- def setUp(self):
- super(LoadbalancerPoolTestFixtureGen, self).setUp()
- if not self._parent_fixt:
- self._parent_fixt = self.useFixture(ProjectTestFixtureGen(self._conn_drv, 'default-project'))
-
- self._obj = vnc_api.LoadbalancerPool(self._name, self._parent_fixt.getObj ())
- try:
- self._obj = self._conn_drv.loadbalancer_pool_read (fq_name=self._obj.get_fq_name())
- self._update_links (update_server=True)
- except NoIdError:
- self._update_links (update_server=False)
- if self._auto_prop_val:
- self.populate ()
- else:
- self._obj.loadbalancer_pool_properties = self.loadbalancer_pool_properties
- self._obj.loadbalancer_pool_provider = self.loadbalancer_pool_provider
- self._obj.id_perms = self.id_perms
- self._obj.display_name = self.display_name
- self._conn_drv.loadbalancer_pool_create(self._obj)
- # read back for server allocated values
- self._obj = self._conn_drv.loadbalancer_pool_read(id = self._obj.uuid)
- #end setUp
-
- def cleanUp(self):
- if self._obj.get_virtual_ip_back_refs():
- return
- if self._obj.get_loadbalancer_members():
- return
- parent_fixt = getattr(self, '_parent_fixt', None)
- if parent_fixt:
- # non config-root child
- parent_obj = self._parent_fixt.getObj()
- # remove child from parent obj
- for child_obj in parent_obj.get_loadbalancer_pools():
- if type(child_obj) == dict:
- child_uuid = child_obj['uuid']
- else:
- child_uuid = child_obj.uuid
- if child_uuid == self._obj.uuid:
- parent_obj.loadbalancer_pools.remove(child_obj)
- break
-
- self._conn_drv.loadbalancer_pool_delete(id = self._obj.uuid)
- #end cleanUp
-
- def getObj(self):
- return self._obj
- #end getObj
-
-#end class LoadbalancerPoolTestFixtureGen
-
-class VirtualDnsRecordTestFixtureGen(cfixture.ContrailFixture):
- """
- Fixture for :class:`.VirtualDnsRecord`
- """
- def __init__(self, conn_drv, virtual_DNS_record_name=None, parent_fixt=None, auto_prop_val=False, virtual_DNS_record_data=None, id_perms=None, display_name=None):
- '''
- Create VirtualDnsRecordTestFixtureGen object
-
- constructor
-
- Args:
- conn_drv (:class:`ConnectionDriver`): connection driver (eg. :class:`vnc_api.vnc_api.VncApi`, :class:`novaclient.client.Client`, etc)
-
- Kwargs:
- virtual_DNS_record_name (str): Name of virtual_DNS_record
- parent_fixt (:class:`.VirtualDnsTestFixtureGen`): Parent fixture
- virtual_DNS_record_data (instance): instance of :class:`VirtualDnsRecordType`
- id_perms (instance): instance of :class:`IdPermsType`
- display_name (instance): instance of :class:`xsd:string`
-
- '''
- super(VirtualDnsRecordTestFixtureGen, self).__init__()
- self._conn_drv = conn_drv
- if not virtual_DNS_record_name:
- self._name = 'default-virtual-DNS-record'
- else:
- self._name = virtual_DNS_record_name
- self._obj = None
- self._parent_fixt = parent_fixt
- self._auto_prop_val = auto_prop_val
- self.virtual_DNS_record_data = virtual_DNS_record_data
- self.id_perms = id_perms
- self.display_name = display_name
- #end __init__
-
- def _update_links (self, update_server):
- return None
- #end _update_links
-
-
- def populate (self):
- self._obj.set_virtual_DNS_record_data(self.virtual_DNS_record_data or vnc_api.gen.resource_xsd.VirtualDnsRecordType.populate())
- self._obj.set_id_perms(self.id_perms or vnc_api.gen.resource_xsd.IdPermsType.populate())
- self._obj.set_display_name(self.display_name or GeneratedsSuper.populate_string("display_name"))
- #end populate
-
- def setUp(self):
- super(VirtualDnsRecordTestFixtureGen, self).setUp()
- if not self._parent_fixt:
- self._parent_fixt = self.useFixture(VirtualDnsTestFixtureGen(self._conn_drv, 'default-virtual-DNS'))
-
- self._obj = vnc_api.VirtualDnsRecord(self._name, self._parent_fixt.getObj ())
- try:
- self._obj = self._conn_drv.virtual_DNS_record_read (fq_name=self._obj.get_fq_name())
- self._update_links (update_server=True)
- except NoIdError:
- self._update_links (update_server=False)
- if self._auto_prop_val:
- self.populate ()
- else:
- self._obj.virtual_DNS_record_data = self.virtual_DNS_record_data
- self._obj.id_perms = self.id_perms
- self._obj.display_name = self.display_name
- self._conn_drv.virtual_DNS_record_create(self._obj)
- # read back for server allocated values
- self._obj = self._conn_drv.virtual_DNS_record_read(id = self._obj.uuid)
- #end setUp
-
- def cleanUp(self):
- parent_fixt = getattr(self, '_parent_fixt', None)
- if parent_fixt:
- # non config-root child
- parent_obj = self._parent_fixt.getObj()
- # remove child from parent obj
- for child_obj in parent_obj.get_virtual_DNS_records():
- if type(child_obj) == dict:
- child_uuid = child_obj['uuid']
- else:
- child_uuid = child_obj.uuid
- if child_uuid == self._obj.uuid:
- parent_obj.virtual_DNS_records.remove(child_obj)
- break
-
- self._conn_drv.virtual_DNS_record_delete(id = self._obj.uuid)
- #end cleanUp
-
- def getObj(self):
- return self._obj
- #end getObj
-
-#end class VirtualDnsRecordTestFixtureGen
-
-class RouteTargetTestFixtureGen(cfixture.ContrailFixture):
- """
- Fixture for :class:`.RouteTarget`
- """
- def __init__(self, conn_drv, route_target_name=None, auto_prop_val=False, id_perms=None, display_name=None):
- '''
- Create RouteTargetTestFixtureGen object
-
- constructor
-
- Args:
- conn_drv (:class:`ConnectionDriver`): connection driver (eg. :class:`vnc_api.vnc_api.VncApi`, :class:`novaclient.client.Client`, etc)
-
- Kwargs:
- %s_name (str): Name of %s
- id_perms (instance): instance of :class:`IdPermsType`
- display_name (instance): instance of :class:`xsd:string`
-
- '''
- super(RouteTargetTestFixtureGen, self).__init__()
- self._conn_drv = conn_drv
- if not route_target_name:
- self._name = 'default-route-target'
- else:
- self._name = route_target_name
- self._obj = None
- self._auto_prop_val = auto_prop_val
- self.id_perms = id_perms
- self.display_name = display_name
- #end __init__
-
- def _update_links (self, update_server):
- return None
- #end _update_links
-
-
- def populate (self):
- self._obj.set_id_perms(self.id_perms or vnc_api.gen.resource_xsd.IdPermsType.populate())
- self._obj.set_display_name(self.display_name or GeneratedsSuper.populate_string("display_name"))
- #end populate
-
- def setUp(self):
- super(RouteTargetTestFixtureGen, self).setUp()
- self._obj = vnc_api.RouteTarget(self._name)
- try:
- self._obj = self._conn_drv.route_target_read (fq_name=self._obj.get_fq_name())
- self._update_links (update_server=True)
- except NoIdError:
- self._update_links (update_server=False)
- if self._auto_prop_val:
- self.populate ()
- else:
- self._obj.id_perms = self.id_perms
- self._obj.display_name = self.display_name
- self._conn_drv.route_target_create(self._obj)
- # read back for server allocated values
- self._obj = self._conn_drv.route_target_read(id = self._obj.uuid)
- #end setUp
-
- def cleanUp(self):
- if self._obj.get_logical_router_back_refs() or self._obj.get_routing_instance_back_refs():
- return
- self._conn_drv.route_target_delete(id = self._obj.uuid)
- #end cleanUp
-
- def getObj(self):
- return self._obj
- #end getObj
-
-#end class RouteTargetTestFixtureGen
-
-class FloatingIpTestFixtureGen(cfixture.ContrailFixture):
- """
- Fixture for :class:`.FloatingIp`
- """
- def __init__(self, conn_drv, floating_ip_name=None, parent_fixt=None, auto_prop_val=False, project_refs = None, virtual_machine_interface_refs = None, floating_ip_address=None, floating_ip_is_virtual_ip=None, floating_ip_fixed_ip_address=None, floating_ip_address_family=None, id_perms=None, display_name=None):
- '''
- Create FloatingIpTestFixtureGen object
-
- constructor
-
- Args:
- conn_drv (:class:`ConnectionDriver`): connection driver (eg. :class:`vnc_api.vnc_api.VncApi`, :class:`novaclient.client.Client`, etc)
-
- Kwargs:
- floating_ip_name (str): Name of floating_ip
- parent_fixt (:class:`.FloatingIpPoolTestFixtureGen`): Parent fixture
- project (list): list of :class:`Project` type
- virtual_machine_interface (list): list of :class:`VirtualMachineInterface` type
- floating_ip_address (instance): instance of :class:`xsd:string`
- floating_ip_is_virtual_ip (instance): instance of :class:`xsd:boolean`
- floating_ip_fixed_ip_address (instance): instance of :class:`xsd:string`
- floating_ip_address_family (instance): instance of :class:`xsd:string`
- id_perms (instance): instance of :class:`IdPermsType`
- display_name (instance): instance of :class:`xsd:string`
-
- '''
- super(FloatingIpTestFixtureGen, self).__init__()
- self._conn_drv = conn_drv
- if not floating_ip_name:
- self._name = 'default-floating-ip'
- else:
- self._name = floating_ip_name
- self._obj = None
- self._parent_fixt = parent_fixt
- self._auto_prop_val = auto_prop_val
- if project_refs:
- for ln in project_refs:
- self.add_project (ln)
- if virtual_machine_interface_refs:
- for ln in virtual_machine_interface_refs:
- self.add_virtual_machine_interface (ln)
- self.floating_ip_address = floating_ip_address
- self.floating_ip_is_virtual_ip = floating_ip_is_virtual_ip
- self.floating_ip_fixed_ip_address = floating_ip_fixed_ip_address
- self.floating_ip_address_family = floating_ip_address_family
- self.id_perms = id_perms
- self.display_name = display_name
- #end __init__
-
- def _update_links (self, update_server):
- for ln in self.get_projects ():
- self.add_project (ln.fixture (), update_server = update_server, add_link = False)
- for ln in self.get_virtual_machine_interfaces ():
- self.add_virtual_machine_interface (ln.fixture (), update_server = update_server, add_link = False)
- return None
- #end _update_links
-
- def add_project (self, lo, update_server = True, add_link = True):
- '''
- add :class:`Project` link to :class:`FloatingIp`
- Args:
- lo (:class:`Project`): obj to link
- '''
- if self._obj:
- self._obj.add_project (lo)
- if update_server:
- self._conn_drv.floating_ip_update (self._obj)
-
- if add_link:
- self.add_link('project', cfixture.ConrtailLink('project', 'floating_ip', 'project', ['ref'], lo))
- #end add_project_link
-
- def get_projects (self):
- return self.get_links ('project')
- #end get_projects
- def add_virtual_machine_interface (self, lo, update_server = True, add_link = True):
- '''
- add :class:`VirtualMachineInterface` link to :class:`FloatingIp`
- Args:
- lo (:class:`VirtualMachineInterface`): obj to link
- '''
- if self._obj:
- self._obj.add_virtual_machine_interface (lo)
- if update_server:
- self._conn_drv.floating_ip_update (self._obj)
-
- if add_link:
- self.add_link('virtual_machine_interface', cfixture.ConrtailLink('virtual_machine_interface', 'floating_ip', 'virtual_machine_interface', ['ref'], lo))
- #end add_virtual_machine_interface_link
-
- def get_virtual_machine_interfaces (self):
- return self.get_links ('virtual_machine_interface')
- #end get_virtual_machine_interfaces
-
- def populate (self):
- self._obj.set_floating_ip_address(self.floating_ip_address or GeneratedsSuper.populate_string("floating_ip_address"))
- self._obj.set_floating_ip_is_virtual_ip(self.floating_ip_is_virtual_ip or GeneratedsSuper.populate_boolean("floating_ip_is_virtual_ip"))
- self._obj.set_floating_ip_fixed_ip_address(self.floating_ip_fixed_ip_address or GeneratedsSuper.populate_string("floating_ip_fixed_ip_address"))
- self._obj.set_floating_ip_address_family(self.floating_ip_address_family or GeneratedsSuper.populate_string("floating_ip_address_family"))
- self._obj.set_id_perms(self.id_perms or vnc_api.gen.resource_xsd.IdPermsType.populate())
- self._obj.set_display_name(self.display_name or GeneratedsSuper.populate_string("display_name"))
- #end populate
-
- def setUp(self):
- super(FloatingIpTestFixtureGen, self).setUp()
- if not self._parent_fixt:
- self._parent_fixt = self.useFixture(FloatingIpPoolTestFixtureGen(self._conn_drv, 'default-floating-ip-pool'))
-
- self._obj = vnc_api.FloatingIp(self._name, self._parent_fixt.getObj ())
- try:
- self._obj = self._conn_drv.floating_ip_read (fq_name=self._obj.get_fq_name())
- self._update_links (update_server=True)
- except NoIdError:
- self._update_links (update_server=False)
- if self._auto_prop_val:
- self.populate ()
- else:
- self._obj.floating_ip_address = self.floating_ip_address
- self._obj.floating_ip_is_virtual_ip = self.floating_ip_is_virtual_ip
- self._obj.floating_ip_fixed_ip_address = self.floating_ip_fixed_ip_address
- self._obj.floating_ip_address_family = self.floating_ip_address_family
- self._obj.id_perms = self.id_perms
- self._obj.display_name = self.display_name
- self._conn_drv.floating_ip_create(self._obj)
- # read back for server allocated values
- self._obj = self._conn_drv.floating_ip_read(id = self._obj.uuid)
- #end setUp
-
- def cleanUp(self):
- if self._obj.get_customer_attachment_back_refs():
- return
- parent_fixt = getattr(self, '_parent_fixt', None)
- if parent_fixt:
- # non config-root child
- parent_obj = self._parent_fixt.getObj()
- # remove child from parent obj
- for child_obj in parent_obj.get_floating_ips():
- if type(child_obj) == dict:
- child_uuid = child_obj['uuid']
- else:
- child_uuid = child_obj.uuid
- if child_uuid == self._obj.uuid:
- parent_obj.floating_ips.remove(child_obj)
- break
-
- self._conn_drv.floating_ip_delete(id = self._obj.uuid)
- #end cleanUp
-
- def getObj(self):
- return self._obj
- #end getObj
-
-#end class FloatingIpTestFixtureGen
-
-class FloatingIpPoolTestFixtureGen(cfixture.ContrailFixture):
- """
- Fixture for :class:`.FloatingIpPool`
- """
- def __init__(self, conn_drv, floating_ip_pool_name=None, parent_fixt=None, auto_prop_val=False, floating_ip_pool_prefixes=None, id_perms=None, display_name=None):
- '''
- Create FloatingIpPoolTestFixtureGen object
-
- constructor
-
- Args:
- conn_drv (:class:`ConnectionDriver`): connection driver (eg. :class:`vnc_api.vnc_api.VncApi`, :class:`novaclient.client.Client`, etc)
-
- Kwargs:
- floating_ip_pool_name (str): Name of floating_ip_pool
- parent_fixt (:class:`.VirtualNetworkTestFixtureGen`): Parent fixture
- floating_ip_pool_prefixes (instance): instance of :class:`FloatingIpPoolType`
- id_perms (instance): instance of :class:`IdPermsType`
- display_name (instance): instance of :class:`xsd:string`
-
- '''
- super(FloatingIpPoolTestFixtureGen, self).__init__()
- self._conn_drv = conn_drv
- if not floating_ip_pool_name:
- self._name = 'default-floating-ip-pool'
- else:
- self._name = floating_ip_pool_name
- self._obj = None
- self._parent_fixt = parent_fixt
- self._auto_prop_val = auto_prop_val
- self.floating_ip_pool_prefixes = floating_ip_pool_prefixes
- self.id_perms = id_perms
- self.display_name = display_name
- #end __init__
-
- def _update_links (self, update_server):
- return None
- #end _update_links
-
-
- def populate (self):
- self._obj.set_floating_ip_pool_prefixes(self.floating_ip_pool_prefixes or vnc_api.gen.resource_xsd.FloatingIpPoolType.populate())
- self._obj.set_id_perms(self.id_perms or vnc_api.gen.resource_xsd.IdPermsType.populate())
- self._obj.set_display_name(self.display_name or GeneratedsSuper.populate_string("display_name"))
- #end populate
-
- def setUp(self):
- super(FloatingIpPoolTestFixtureGen, self).setUp()
- if not self._parent_fixt:
- self._parent_fixt = self.useFixture(VirtualNetworkTestFixtureGen(self._conn_drv, 'default-virtual-network'))
-
- self._obj = vnc_api.FloatingIpPool(self._name, self._parent_fixt.getObj ())
- try:
- self._obj = self._conn_drv.floating_ip_pool_read (fq_name=self._obj.get_fq_name())
- self._update_links (update_server=True)
- except NoIdError:
- self._update_links (update_server=False)
- if self._auto_prop_val:
- self.populate ()
- else:
- self._obj.floating_ip_pool_prefixes = self.floating_ip_pool_prefixes
- self._obj.id_perms = self.id_perms
- self._obj.display_name = self.display_name
- self._conn_drv.floating_ip_pool_create(self._obj)
- # read back for server allocated values
- self._obj = self._conn_drv.floating_ip_pool_read(id = self._obj.uuid)
- #end setUp
-
- def cleanUp(self):
- if self._obj.get_project_back_refs():
- return
- if self._obj.get_floating_ips():
- return
- parent_fixt = getattr(self, '_parent_fixt', None)
- if parent_fixt:
- # non config-root child
- parent_obj = self._parent_fixt.getObj()
- # remove child from parent obj
- for child_obj in parent_obj.get_floating_ip_pools():
- if type(child_obj) == dict:
- child_uuid = child_obj['uuid']
- else:
- child_uuid = child_obj.uuid
- if child_uuid == self._obj.uuid:
- parent_obj.floating_ip_pools.remove(child_obj)
- break
-
- self._conn_drv.floating_ip_pool_delete(id = self._obj.uuid)
- #end cleanUp
-
- def getObj(self):
- return self._obj
- #end getObj
-
-#end class FloatingIpPoolTestFixtureGen
-
-class PhysicalRouterTestFixtureGen(cfixture.ContrailFixture):
- """
- Fixture for :class:`.PhysicalRouter`
- """
- def __init__(self, conn_drv, physical_router_name=None, parent_fixt=None, auto_prop_val=False, virtual_router_refs = None, bgp_router_refs = None, virtual_network_refs = None, physical_router_management_ip=None, physical_router_dataplane_ip=None, physical_router_vendor_name=None, physical_router_product_name=None, physical_router_vnc_managed=None, physical_router_user_credentials=None, physical_router_snmp_credentials=None, physical_router_junos_service_ports=None, id_perms=None, display_name=None):
- '''
- Create PhysicalRouterTestFixtureGen object
-
- constructor
-
- Args:
- conn_drv (:class:`ConnectionDriver`): connection driver (eg. :class:`vnc_api.vnc_api.VncApi`, :class:`novaclient.client.Client`, etc)
-
- Kwargs:
- physical_router_name (str): Name of physical_router
- parent_fixt (:class:`.GlobalSystemConfigTestFixtureGen`): Parent fixture
- virtual_router (list): list of :class:`VirtualRouter` type
- bgp_router (list): list of :class:`BgpRouter` type
- virtual_network (list): list of :class:`VirtualNetwork` type
- physical_router_management_ip (instance): instance of :class:`xsd:string`
- physical_router_dataplane_ip (instance): instance of :class:`xsd:string`
- physical_router_vendor_name (instance): instance of :class:`xsd:string`
- physical_router_product_name (instance): instance of :class:`xsd:string`
- physical_router_vnc_managed (instance): instance of :class:`xsd:boolean`
- physical_router_user_credentials (instance): instance of :class:`UserCredentials`
- physical_router_snmp_credentials (instance): instance of :class:`SNMPCredentials`
- physical_router_junos_service_ports (instance): instance of :class:`JunosServicePorts`
- id_perms (instance): instance of :class:`IdPermsType`
- display_name (instance): instance of :class:`xsd:string`
-
- '''
- super(PhysicalRouterTestFixtureGen, self).__init__()
- self._conn_drv = conn_drv
- if not physical_router_name:
- self._name = 'default-physical-router'
- else:
- self._name = physical_router_name
- self._obj = None
- self._parent_fixt = parent_fixt
- self._auto_prop_val = auto_prop_val
- if virtual_router_refs:
- for ln in virtual_router_refs:
- self.add_virtual_router (ln)
- if bgp_router_refs:
- for ln in bgp_router_refs:
- self.add_bgp_router (ln)
- if virtual_network_refs:
- for ln in virtual_network_refs:
- self.add_virtual_network (ln)
- self.physical_router_management_ip = physical_router_management_ip
- self.physical_router_dataplane_ip = physical_router_dataplane_ip
- self.physical_router_vendor_name = physical_router_vendor_name
- self.physical_router_product_name = physical_router_product_name
- self.physical_router_vnc_managed = physical_router_vnc_managed
- self.physical_router_user_credentials = physical_router_user_credentials
- self.physical_router_snmp_credentials = physical_router_snmp_credentials
- self.physical_router_junos_service_ports = physical_router_junos_service_ports
- self.id_perms = id_perms
- self.display_name = display_name
- #end __init__
-
- def _update_links (self, update_server):
- for ln in self.get_virtual_routers ():
- self.add_virtual_router (ln.fixture (), update_server = update_server, add_link = False)
- for ln in self.get_bgp_routers ():
- self.add_bgp_router (ln.fixture (), update_server = update_server, add_link = False)
- for ln in self.get_virtual_networks ():
- self.add_virtual_network (ln.fixture (), update_server = update_server, add_link = False)
- return None
- #end _update_links
-
- def add_virtual_router (self, lo, update_server = True, add_link = True):
- '''
- add :class:`VirtualRouter` link to :class:`PhysicalRouter`
- Args:
- lo (:class:`VirtualRouter`): obj to link
- '''
- if self._obj:
- self._obj.add_virtual_router (lo)
- if update_server:
- self._conn_drv.physical_router_update (self._obj)
-
- if add_link:
- self.add_link('virtual_router', cfixture.ConrtailLink('virtual_router', 'physical_router', 'virtual_router', ['ref'], lo))
- #end add_virtual_router_link
-
- def get_virtual_routers (self):
- return self.get_links ('virtual_router')
- #end get_virtual_routers
- def add_bgp_router (self, lo, update_server = True, add_link = True):
- '''
- add :class:`BgpRouter` link to :class:`PhysicalRouter`
- Args:
- lo (:class:`BgpRouter`): obj to link
- '''
- if self._obj:
- self._obj.add_bgp_router (lo)
- if update_server:
- self._conn_drv.physical_router_update (self._obj)
-
- if add_link:
- self.add_link('bgp_router', cfixture.ConrtailLink('bgp_router', 'physical_router', 'bgp_router', ['ref'], lo))
- #end add_bgp_router_link
-
- def get_bgp_routers (self):
- return self.get_links ('bgp_router')
- #end get_bgp_routers
- def add_virtual_network (self, lo, update_server = True, add_link = True):
- '''
- add :class:`VirtualNetwork` link to :class:`PhysicalRouter`
- Args:
- lo (:class:`VirtualNetwork`): obj to link
- '''
- if self._obj:
- self._obj.add_virtual_network (lo)
- if update_server:
- self._conn_drv.physical_router_update (self._obj)
-
- if add_link:
- self.add_link('virtual_network', cfixture.ConrtailLink('virtual_network', 'physical_router', 'virtual_network', ['ref'], lo))
- #end add_virtual_network_link
-
- def get_virtual_networks (self):
- return self.get_links ('virtual_network')
- #end get_virtual_networks
-
- def populate (self):
- self._obj.set_physical_router_management_ip(self.physical_router_management_ip or GeneratedsSuper.populate_string("physical_router_management_ip"))
- self._obj.set_physical_router_dataplane_ip(self.physical_router_dataplane_ip or GeneratedsSuper.populate_string("physical_router_dataplane_ip"))
- self._obj.set_physical_router_vendor_name(self.physical_router_vendor_name or GeneratedsSuper.populate_string("physical_router_vendor_name"))
- self._obj.set_physical_router_product_name(self.physical_router_product_name or GeneratedsSuper.populate_string("physical_router_product_name"))
- self._obj.set_physical_router_vnc_managed(self.physical_router_vnc_managed or GeneratedsSuper.populate_boolean("physical_router_vnc_managed"))
- self._obj.set_physical_router_user_credentials(self.physical_router_user_credentials or [vnc_api.gen.resource_xsd.UserCredentials.populate()])
- self._obj.set_physical_router_snmp_credentials(self.physical_router_snmp_credentials or vnc_api.gen.resource_xsd.SNMPCredentials.populate())
- self._obj.set_physical_router_junos_service_ports(self.physical_router_junos_service_ports or vnc_api.gen.resource_xsd.JunosServicePorts.populate())
- self._obj.set_id_perms(self.id_perms or vnc_api.gen.resource_xsd.IdPermsType.populate())
- self._obj.set_display_name(self.display_name or GeneratedsSuper.populate_string("display_name"))
- #end populate
-
- def setUp(self):
- super(PhysicalRouterTestFixtureGen, self).setUp()
- if not self._parent_fixt:
- self._parent_fixt = self.useFixture(GlobalSystemConfigTestFixtureGen(self._conn_drv, 'default-global-system-config'))
-
- self._obj = vnc_api.PhysicalRouter(self._name, self._parent_fixt.getObj ())
- try:
- self._obj = self._conn_drv.physical_router_read (fq_name=self._obj.get_fq_name())
- self._update_links (update_server=True)
- except NoIdError:
- self._update_links (update_server=False)
- if self._auto_prop_val:
- self.populate ()
- else:
- self._obj.physical_router_management_ip = self.physical_router_management_ip
- self._obj.physical_router_dataplane_ip = self.physical_router_dataplane_ip
- self._obj.physical_router_vendor_name = self.physical_router_vendor_name
- self._obj.physical_router_product_name = self.physical_router_product_name
- self._obj.physical_router_vnc_managed = self.physical_router_vnc_managed
- self._obj.physical_router_user_credentials = self.physical_router_user_credentials
- self._obj.physical_router_snmp_credentials = self.physical_router_snmp_credentials
- self._obj.physical_router_junos_service_ports = self.physical_router_junos_service_ports
- self._obj.id_perms = self.id_perms
- self._obj.display_name = self.display_name
- self._conn_drv.physical_router_create(self._obj)
- # read back for server allocated values
- self._obj = self._conn_drv.physical_router_read(id = self._obj.uuid)
- #end setUp
-
- def cleanUp(self):
- if self._obj.get_physical_interfaces() or self._obj.get_logical_interfaces():
- return
- parent_fixt = getattr(self, '_parent_fixt', None)
- if parent_fixt:
- # non config-root child
- parent_obj = self._parent_fixt.getObj()
- # remove child from parent obj
- for child_obj in parent_obj.get_physical_routers():
- if type(child_obj) == dict:
- child_uuid = child_obj['uuid']
- else:
- child_uuid = child_obj.uuid
- if child_uuid == self._obj.uuid:
- parent_obj.physical_routers.remove(child_obj)
- break
-
- self._conn_drv.physical_router_delete(id = self._obj.uuid)
- #end cleanUp
-
- def getObj(self):
- return self._obj
- #end getObj
-
-#end class PhysicalRouterTestFixtureGen
-
-class BgpRouterTestFixtureGen(cfixture.ContrailFixture):
- """
- Fixture for :class:`.BgpRouter`
- """
- def __init__(self, conn_drv, bgp_router_name=None, parent_fixt=None, auto_prop_val=False, bgp_router_ref_infos = None, bgp_router_parameters=None, id_perms=None, display_name=None):
- '''
- Create BgpRouterTestFixtureGen object
-
- constructor
-
- Args:
- conn_drv (:class:`ConnectionDriver`): connection driver (eg. :class:`vnc_api.vnc_api.VncApi`, :class:`novaclient.client.Client`, etc)
-
- Kwargs:
- bgp_router_name (str): Name of bgp_router
- parent_fixt (:class:`.RoutingInstanceTestFixtureGen`): Parent fixture
- bgp_router (list): list of tuple (:class:`BgpRouter`, :class: `BgpPeeringAttributes`) type
- bgp_router_parameters (instance): instance of :class:`BgpRouterParams`
- id_perms (instance): instance of :class:`IdPermsType`
- display_name (instance): instance of :class:`xsd:string`
-
- '''
- super(BgpRouterTestFixtureGen, self).__init__()
- self._conn_drv = conn_drv
- if not bgp_router_name:
- self._name = 'default-bgp-router'
- else:
- self._name = bgp_router_name
- self._obj = None
- self._parent_fixt = parent_fixt
- self._auto_prop_val = auto_prop_val
- if bgp_router_ref_infos:
- for ln, ref in bgp_router_ref_infos:
- self.add_bgp_router (ln, ref)
- self.bgp_router_parameters = bgp_router_parameters
- self.id_perms = id_perms
- self.display_name = display_name
- #end __init__
-
- def _update_links (self, update_server):
- for ln in self.get_bgp_routers ():
- self.add_bgp_router (*ln.fixture (), update_server = update_server, add_link = False)
- return None
- #end _update_links
-
- def add_bgp_router (self, lo, ref, update_server = True, add_link = True):
- '''
- add :class:`BgpRouter` link to :class:`BgpRouter`
- Args:
- lo (:class:`BgpRouter`): obj to link
- ref (:class:`BgpPeeringAttributes`): property of the link object
- '''
- if self._obj:
- self._obj.add_bgp_router (lo, ref)
- if update_server:
- self._conn_drv.bgp_router_update (self._obj)
-
- if add_link:
- self.add_link('bgp_router', cfixture.ConrtailLink('bgp_router', 'bgp_router', 'bgp_router', ['ref'], (lo, ref)))
- #end add_bgp_router_link
-
- def get_bgp_routers (self):
- return self.get_links ('bgp_router')
- #end get_bgp_routers
-
- def populate (self):
- self._obj.set_bgp_router_parameters(self.bgp_router_parameters or vnc_api.gen.resource_xsd.BgpRouterParams.populate())
- self._obj.set_id_perms(self.id_perms or vnc_api.gen.resource_xsd.IdPermsType.populate())
- self._obj.set_display_name(self.display_name or GeneratedsSuper.populate_string("display_name"))
- #end populate
-
- def setUp(self):
- super(BgpRouterTestFixtureGen, self).setUp()
- if not self._parent_fixt:
- self._parent_fixt = self.useFixture(RoutingInstanceTestFixtureGen(self._conn_drv, 'default-routing-instance'))
-
- self._obj = vnc_api.BgpRouter(self._name, self._parent_fixt.getObj ())
- try:
- self._obj = self._conn_drv.bgp_router_read (fq_name=self._obj.get_fq_name())
- self._update_links (update_server=True)
- except NoIdError:
- self._update_links (update_server=False)
- if self._auto_prop_val:
- self.populate ()
- else:
- self._obj.bgp_router_parameters = self.bgp_router_parameters
- self._obj.id_perms = self.id_perms
- self._obj.display_name = self.display_name
- self._conn_drv.bgp_router_create(self._obj)
- # read back for server allocated values
- self._obj = self._conn_drv.bgp_router_read(id = self._obj.uuid)
- #end setUp
-
- def cleanUp(self):
- if self._obj.get_global_system_config_back_refs() or self._obj.get_physical_router_back_refs() or self._obj.get_virtual_router_back_refs() or self._obj.get_bgp_router_back_refs():
- return
- parent_fixt = getattr(self, '_parent_fixt', None)
- if parent_fixt:
- # non config-root child
- parent_obj = self._parent_fixt.getObj()
- # remove child from parent obj
- for child_obj in parent_obj.get_bgp_routers():
- if type(child_obj) == dict:
- child_uuid = child_obj['uuid']
- else:
- child_uuid = child_obj.uuid
- if child_uuid == self._obj.uuid:
- parent_obj.bgp_routers.remove(child_obj)
- break
-
- self._conn_drv.bgp_router_delete(id = self._obj.uuid)
- #end cleanUp
-
- def getObj(self):
- return self._obj
- #end getObj
-
-#end class BgpRouterTestFixtureGen
-
-class VirtualRouterTestFixtureGen(cfixture.ContrailFixture):
- """
- Fixture for :class:`.VirtualRouter`
- """
- def __init__(self, conn_drv, virtual_router_name=None, parent_fixt=None, auto_prop_val=False, bgp_router_refs = None, virtual_machine_refs = None, virtual_router_type=None, virtual_router_ip_address=None, id_perms=None, display_name=None):
- '''
- Create VirtualRouterTestFixtureGen object
-
- constructor
-
- Args:
- conn_drv (:class:`ConnectionDriver`): connection driver (eg. :class:`vnc_api.vnc_api.VncApi`, :class:`novaclient.client.Client`, etc)
-
- Kwargs:
- virtual_router_name (str): Name of virtual_router
- parent_fixt (:class:`.GlobalSystemConfigTestFixtureGen`): Parent fixture
- bgp_router (list): list of :class:`BgpRouter` type
- virtual_machine (list): list of :class:`VirtualMachine` type
- virtual_router_type (instance): instance of :class:`xsd:string`
- virtual_router_ip_address (instance): instance of :class:`xsd:string`
- id_perms (instance): instance of :class:`IdPermsType`
- display_name (instance): instance of :class:`xsd:string`
-
- '''
- super(VirtualRouterTestFixtureGen, self).__init__()
- self._conn_drv = conn_drv
- if not virtual_router_name:
- self._name = 'default-virtual-router'
- else:
- self._name = virtual_router_name
- self._obj = None
- self._parent_fixt = parent_fixt
- self._auto_prop_val = auto_prop_val
- if bgp_router_refs:
- for ln in bgp_router_refs:
- self.add_bgp_router (ln)
- if virtual_machine_refs:
- for ln in virtual_machine_refs:
- self.add_virtual_machine (ln)
- self.virtual_router_type = virtual_router_type
- self.virtual_router_ip_address = virtual_router_ip_address
- self.id_perms = id_perms
- self.display_name = display_name
- #end __init__
-
- def _update_links (self, update_server):
- for ln in self.get_bgp_routers ():
- self.add_bgp_router (ln.fixture (), update_server = update_server, add_link = False)
- for ln in self.get_virtual_machines ():
- self.add_virtual_machine (ln.fixture (), update_server = update_server, add_link = False)
- return None
- #end _update_links
-
- def add_bgp_router (self, lo, update_server = True, add_link = True):
- '''
- add :class:`BgpRouter` link to :class:`VirtualRouter`
- Args:
- lo (:class:`BgpRouter`): obj to link
- '''
- if self._obj:
- self._obj.add_bgp_router (lo)
- if update_server:
- self._conn_drv.virtual_router_update (self._obj)
-
- if add_link:
- self.add_link('bgp_router', cfixture.ConrtailLink('bgp_router', 'virtual_router', 'bgp_router', ['ref'], lo))
- #end add_bgp_router_link
-
- def get_bgp_routers (self):
- return self.get_links ('bgp_router')
- #end get_bgp_routers
- def add_virtual_machine (self, lo, update_server = True, add_link = True):
- '''
- add :class:`VirtualMachine` link to :class:`VirtualRouter`
- Args:
- lo (:class:`VirtualMachine`): obj to link
- '''
- if self._obj:
- self._obj.add_virtual_machine (lo)
- if update_server:
- self._conn_drv.virtual_router_update (self._obj)
-
- if add_link:
- self.add_link('virtual_machine', cfixture.ConrtailLink('virtual_machine', 'virtual_router', 'virtual_machine', ['ref'], lo))
- #end add_virtual_machine_link
-
- def get_virtual_machines (self):
- return self.get_links ('virtual_machine')
- #end get_virtual_machines
-
- def populate (self):
- self._obj.set_virtual_router_type(self.virtual_router_type or GeneratedsSuper.populate_string("virtual_router_type"))
- self._obj.set_virtual_router_ip_address(self.virtual_router_ip_address or GeneratedsSuper.populate_string("virtual_router_ip_address"))
- self._obj.set_id_perms(self.id_perms or vnc_api.gen.resource_xsd.IdPermsType.populate())
- self._obj.set_display_name(self.display_name or GeneratedsSuper.populate_string("display_name"))
- #end populate
-
- def setUp(self):
- super(VirtualRouterTestFixtureGen, self).setUp()
- if not self._parent_fixt:
- self._parent_fixt = self.useFixture(GlobalSystemConfigTestFixtureGen(self._conn_drv, 'default-global-system-config'))
-
- self._obj = vnc_api.VirtualRouter(self._name, self._parent_fixt.getObj ())
- try:
- self._obj = self._conn_drv.virtual_router_read (fq_name=self._obj.get_fq_name())
- self._update_links (update_server=True)
- except NoIdError:
- self._update_links (update_server=False)
- if self._auto_prop_val:
- self.populate ()
- else:
- self._obj.virtual_router_type = self.virtual_router_type
- self._obj.virtual_router_ip_address = self.virtual_router_ip_address
- self._obj.id_perms = self.id_perms
- self._obj.display_name = self.display_name
- self._conn_drv.virtual_router_create(self._obj)
- # read back for server allocated values
- self._obj = self._conn_drv.virtual_router_read(id = self._obj.uuid)
- #end setUp
-
- def cleanUp(self):
- if self._obj.get_physical_router_back_refs() or self._obj.get_provider_attachment_back_refs():
- return
- parent_fixt = getattr(self, '_parent_fixt', None)
- if parent_fixt:
- # non config-root child
- parent_obj = self._parent_fixt.getObj()
- # remove child from parent obj
- for child_obj in parent_obj.get_virtual_routers():
- if type(child_obj) == dict:
- child_uuid = child_obj['uuid']
- else:
- child_uuid = child_obj.uuid
- if child_uuid == self._obj.uuid:
- parent_obj.virtual_routers.remove(child_obj)
- break
-
- self._conn_drv.virtual_router_delete(id = self._obj.uuid)
- #end cleanUp
-
- def getObj(self):
- return self._obj
- #end getObj
-
-#end class VirtualRouterTestFixtureGen
-
-class SubnetTestFixtureGen(cfixture.ContrailFixture):
- """
- Fixture for :class:`.Subnet`
- """
- def __init__(self, conn_drv, subnet_name=None, auto_prop_val=False, virtual_machine_interface_refs = None, subnet_ip_prefix=None, id_perms=None, display_name=None):
- '''
- Create SubnetTestFixtureGen object
-
- constructor
-
- Args:
- conn_drv (:class:`ConnectionDriver`): connection driver (eg. :class:`vnc_api.vnc_api.VncApi`, :class:`novaclient.client.Client`, etc)
-
- Kwargs:
- %s_name (str): Name of %s
- virtual_machine_interface (list): list of :class:`VirtualMachineInterface` type
- subnet_ip_prefix (instance): instance of :class:`SubnetType`
- id_perms (instance): instance of :class:`IdPermsType`
- display_name (instance): instance of :class:`xsd:string`
-
- '''
- super(SubnetTestFixtureGen, self).__init__()
- self._conn_drv = conn_drv
- if not subnet_name:
- self._name = 'default-subnet'
- else:
- self._name = subnet_name
- self._obj = None
- self._auto_prop_val = auto_prop_val
- if virtual_machine_interface_refs:
- for ln in virtual_machine_interface_refs:
- self.add_virtual_machine_interface (ln)
- self.subnet_ip_prefix = subnet_ip_prefix
- self.id_perms = id_perms
- self.display_name = display_name
- #end __init__
-
- def _update_links (self, update_server):
- for ln in self.get_virtual_machine_interfaces ():
- self.add_virtual_machine_interface (ln.fixture (), update_server = update_server, add_link = False)
- return None
- #end _update_links
-
- def add_virtual_machine_interface (self, lo, update_server = True, add_link = True):
- '''
- add :class:`VirtualMachineInterface` link to :class:`Subnet`
- Args:
- lo (:class:`VirtualMachineInterface`): obj to link
- '''
- if self._obj:
- self._obj.add_virtual_machine_interface (lo)
- if update_server:
- self._conn_drv.subnet_update (self._obj)
-
- if add_link:
- self.add_link('virtual_machine_interface', cfixture.ConrtailLink('virtual_machine_interface', 'subnet', 'virtual_machine_interface', ['ref'], lo))
- #end add_virtual_machine_interface_link
-
- def get_virtual_machine_interfaces (self):
- return self.get_links ('virtual_machine_interface')
- #end get_virtual_machine_interfaces
-
- def populate (self):
- self._obj.set_subnet_ip_prefix(self.subnet_ip_prefix or vnc_api.gen.resource_xsd.SubnetType.populate())
- self._obj.set_id_perms(self.id_perms or vnc_api.gen.resource_xsd.IdPermsType.populate())
- self._obj.set_display_name(self.display_name or GeneratedsSuper.populate_string("display_name"))
- #end populate
-
- def setUp(self):
- super(SubnetTestFixtureGen, self).setUp()
- self._obj = vnc_api.Subnet(self._name)
- try:
- self._obj = self._conn_drv.subnet_read (fq_name=self._obj.get_fq_name())
- self._update_links (update_server=True)
- except NoIdError:
- self._update_links (update_server=False)
- if self._auto_prop_val:
- self.populate ()
- else:
- self._obj.subnet_ip_prefix = self.subnet_ip_prefix
- self._obj.id_perms = self.id_perms
- self._obj.display_name = self.display_name
- self._conn_drv.subnet_create(self._obj)
- # read back for server allocated values
- self._obj = self._conn_drv.subnet_read(id = self._obj.uuid)
- #end setUp
-
- def cleanUp(self):
- self._conn_drv.subnet_delete(id = self._obj.uuid)
- #end cleanUp
-
- def getObj(self):
- return self._obj
- #end getObj
-
-#end class SubnetTestFixtureGen
-
-class GlobalSystemConfigTestFixtureGen(cfixture.ContrailFixture):
- """
- Fixture for :class:`.GlobalSystemConfig`
- """
- def __init__(self, conn_drv, global_system_config_name=None, parent_fixt=None, auto_prop_val=False, bgp_router_refs = None, autonomous_system=None, config_version=None, plugin_tuning=None, ibgp_auto_mesh=None, ip_fabric_subnets=None, id_perms=None, display_name=None):
- '''
- Create GlobalSystemConfigTestFixtureGen object
-
- constructor
-
- Args:
- conn_drv (:class:`ConnectionDriver`): connection driver (eg. :class:`vnc_api.vnc_api.VncApi`, :class:`novaclient.client.Client`, etc)
-
- Kwargs:
- global_system_config_name (str): Name of global_system_config
- parent_fixt (:class:`.ConfigRootTestFixtureGen`): Parent fixture
- bgp_router (list): list of :class:`BgpRouter` type
- autonomous_system (instance): instance of :class:`xsd:integer`
- config_version (instance): instance of :class:`xsd:string`
- plugin_tuning (instance): instance of :class:`PluginProperties`
- ibgp_auto_mesh (instance): instance of :class:`xsd:boolean`
- ip_fabric_subnets (instance): instance of :class:`SubnetListType`
- id_perms (instance): instance of :class:`IdPermsType`
- display_name (instance): instance of :class:`xsd:string`
-
- '''
- super(GlobalSystemConfigTestFixtureGen, self).__init__()
- self._conn_drv = conn_drv
- if not global_system_config_name:
- self._name = 'default-global-system-config'
- else:
- self._name = global_system_config_name
- self._obj = None
- self._parent_fixt = parent_fixt
- self._auto_prop_val = auto_prop_val
- if bgp_router_refs:
- for ln in bgp_router_refs:
- self.add_bgp_router (ln)
- self.autonomous_system = autonomous_system
- self.config_version = config_version
- self.plugin_tuning = plugin_tuning
- self.ibgp_auto_mesh = ibgp_auto_mesh
- self.ip_fabric_subnets = ip_fabric_subnets
- self.id_perms = id_perms
- self.display_name = display_name
- #end __init__
-
- def _update_links (self, update_server):
- for ln in self.get_bgp_routers ():
- self.add_bgp_router (ln.fixture (), update_server = update_server, add_link = False)
- return None
- #end _update_links
-
- def add_bgp_router (self, lo, update_server = True, add_link = True):
- '''
- add :class:`BgpRouter` link to :class:`GlobalSystemConfig`
- Args:
- lo (:class:`BgpRouter`): obj to link
- '''
- if self._obj:
- self._obj.add_bgp_router (lo)
- if update_server:
- self._conn_drv.global_system_config_update (self._obj)
-
- if add_link:
- self.add_link('bgp_router', cfixture.ConrtailLink('bgp_router', 'global_system_config', 'bgp_router', ['ref'], lo))
- #end add_bgp_router_link
-
- def get_bgp_routers (self):
- return self.get_links ('bgp_router')
- #end get_bgp_routers
-
- def populate (self):
- self._obj.set_autonomous_system(self.autonomous_system or GeneratedsSuper.populate_integer("autonomous_system"))
- self._obj.set_config_version(self.config_version or GeneratedsSuper.populate_string("config_version"))
- self._obj.set_plugin_tuning(self.plugin_tuning or vnc_api.gen.resource_xsd.PluginProperties.populate())
- self._obj.set_ibgp_auto_mesh(self.ibgp_auto_mesh or GeneratedsSuper.populate_boolean("ibgp_auto_mesh"))
- self._obj.set_ip_fabric_subnets(self.ip_fabric_subnets or vnc_api.gen.resource_xsd.SubnetListType.populate())
- self._obj.set_id_perms(self.id_perms or vnc_api.gen.resource_xsd.IdPermsType.populate())
- self._obj.set_display_name(self.display_name or GeneratedsSuper.populate_string("display_name"))
- #end populate
-
- def setUp(self):
- super(GlobalSystemConfigTestFixtureGen, self).setUp()
- # child of config-root
- self._obj = vnc_api.GlobalSystemConfig(self._name)
- try:
- self._obj = self._conn_drv.global_system_config_read (fq_name=self._obj.get_fq_name())
- self._update_links (update_server=True)
- except NoIdError:
- self._update_links (update_server=False)
- if self._auto_prop_val:
- self.populate ()
- else:
- self._obj.autonomous_system = self.autonomous_system
- self._obj.config_version = self.config_version
- self._obj.plugin_tuning = self.plugin_tuning
- self._obj.ibgp_auto_mesh = self.ibgp_auto_mesh
- self._obj.ip_fabric_subnets = self.ip_fabric_subnets
- self._obj.id_perms = self.id_perms
- self._obj.display_name = self.display_name
- self._conn_drv.global_system_config_create(self._obj)
- # read back for server allocated values
- self._obj = self._conn_drv.global_system_config_read(id = self._obj.uuid)
- #end setUp
-
- def cleanUp(self):
- if self._obj.get_global_vrouter_configs() or self._obj.get_physical_routers() or self._obj.get_virtual_routers() or self._obj.get_config_nodes() or self._obj.get_analytics_nodes() or self._obj.get_database_nodes() or self._obj.get_service_appliance_sets():
- return
- parent_fixt = getattr(self, '_parent_fixt', None)
- if parent_fixt:
- # non config-root child
- parent_obj = self._parent_fixt.getObj()
- # remove child from parent obj
- for child_obj in parent_obj.get_global_system_configs():
- if type(child_obj) == dict:
- child_uuid = child_obj['uuid']
- else:
- child_uuid = child_obj.uuid
- if child_uuid == self._obj.uuid:
- parent_obj.global_system_configs.remove(child_obj)
- break
-
- self._conn_drv.global_system_config_delete(id = self._obj.uuid)
- #end cleanUp
-
- def getObj(self):
- return self._obj
- #end getObj
-
-#end class GlobalSystemConfigTestFixtureGen
-
-class ServiceApplianceTestFixtureGen(cfixture.ContrailFixture):
- """
- Fixture for :class:`.ServiceAppliance`
- """
- def __init__(self, conn_drv, service_appliance_name=None, parent_fixt=None, auto_prop_val=False, service_appliance_user_credentials=None, service_appliance_ip_address=None, service_appliance_properties=None, id_perms=None, display_name=None):
- '''
- Create ServiceApplianceTestFixtureGen object
-
- constructor
-
- Args:
- conn_drv (:class:`ConnectionDriver`): connection driver (eg. :class:`vnc_api.vnc_api.VncApi`, :class:`novaclient.client.Client`, etc)
-
- Kwargs:
- service_appliance_name (str): Name of service_appliance
- parent_fixt (:class:`.ServiceApplianceSetTestFixtureGen`): Parent fixture
- service_appliance_user_credentials (instance): instance of :class:`UserCredentials`
- service_appliance_ip_address (instance): instance of :class:`xsd:string`
- service_appliance_properties (instance): instance of :class:`KeyValuePairs`
- id_perms (instance): instance of :class:`IdPermsType`
- display_name (instance): instance of :class:`xsd:string`
-
- '''
- super(ServiceApplianceTestFixtureGen, self).__init__()
- self._conn_drv = conn_drv
- if not service_appliance_name:
- self._name = 'default-service-appliance'
- else:
- self._name = service_appliance_name
- self._obj = None
- self._parent_fixt = parent_fixt
- self._auto_prop_val = auto_prop_val
- self.service_appliance_user_credentials = service_appliance_user_credentials
- self.service_appliance_ip_address = service_appliance_ip_address
- self.service_appliance_properties = service_appliance_properties
- self.id_perms = id_perms
- self.display_name = display_name
- #end __init__
-
- def _update_links (self, update_server):
- return None
- #end _update_links
-
-
- def populate (self):
- self._obj.set_service_appliance_user_credentials(self.service_appliance_user_credentials or [vnc_api.gen.resource_xsd.UserCredentials.populate()])
- self._obj.set_service_appliance_ip_address(self.service_appliance_ip_address or GeneratedsSuper.populate_string("service_appliance_ip_address"))
- self._obj.set_service_appliance_properties(self.service_appliance_properties or vnc_api.gen.resource_xsd.KeyValuePairs.populate())
- self._obj.set_id_perms(self.id_perms or vnc_api.gen.resource_xsd.IdPermsType.populate())
- self._obj.set_display_name(self.display_name or GeneratedsSuper.populate_string("display_name"))
- #end populate
-
- def setUp(self):
- super(ServiceApplianceTestFixtureGen, self).setUp()
- if not self._parent_fixt:
- self._parent_fixt = self.useFixture(ServiceApplianceSetTestFixtureGen(self._conn_drv, 'default-service-appliance-set'))
-
- self._obj = vnc_api.ServiceAppliance(self._name, self._parent_fixt.getObj ())
- try:
- self._obj = self._conn_drv.service_appliance_read (fq_name=self._obj.get_fq_name())
- self._update_links (update_server=True)
- except NoIdError:
- self._update_links (update_server=False)
- if self._auto_prop_val:
- self.populate ()
- else:
- self._obj.service_appliance_user_credentials = self.service_appliance_user_credentials
- self._obj.service_appliance_ip_address = self.service_appliance_ip_address
- self._obj.service_appliance_properties = self.service_appliance_properties
- self._obj.id_perms = self.id_perms
- self._obj.display_name = self.display_name
- self._conn_drv.service_appliance_create(self._obj)
- # read back for server allocated values
- self._obj = self._conn_drv.service_appliance_read(id = self._obj.uuid)
- #end setUp
-
- def cleanUp(self):
- parent_fixt = getattr(self, '_parent_fixt', None)
- if parent_fixt:
- # non config-root child
- parent_obj = self._parent_fixt.getObj()
- # remove child from parent obj
- for child_obj in parent_obj.get_service_appliances():
- if type(child_obj) == dict:
- child_uuid = child_obj['uuid']
- else:
- child_uuid = child_obj.uuid
- if child_uuid == self._obj.uuid:
- parent_obj.service_appliances.remove(child_obj)
- break
-
- self._conn_drv.service_appliance_delete(id = self._obj.uuid)
- #end cleanUp
-
- def getObj(self):
- return self._obj
- #end getObj
-
-#end class ServiceApplianceTestFixtureGen
-
-class ServiceInstanceTestFixtureGen(cfixture.ContrailFixture):
- """
- Fixture for :class:`.ServiceInstance`
- """
- def __init__(self, conn_drv, service_instance_name=None, parent_fixt=None, auto_prop_val=False, service_template_refs = None, service_instance_properties=None, id_perms=None, display_name=None):
- '''
- Create ServiceInstanceTestFixtureGen object
-
- constructor
-
- Args:
- conn_drv (:class:`ConnectionDriver`): connection driver (eg. :class:`vnc_api.vnc_api.VncApi`, :class:`novaclient.client.Client`, etc)
-
- Kwargs:
- service_instance_name (str): Name of service_instance
- parent_fixt (:class:`.ProjectTestFixtureGen`): Parent fixture
- service_template (list): list of :class:`ServiceTemplate` type
- service_instance_properties (instance): instance of :class:`ServiceInstanceType`
- id_perms (instance): instance of :class:`IdPermsType`
- display_name (instance): instance of :class:`xsd:string`
-
- '''
- super(ServiceInstanceTestFixtureGen, self).__init__()
- self._conn_drv = conn_drv
- if not service_instance_name:
- self._name = 'default-service-instance'
- else:
- self._name = service_instance_name
- self._obj = None
- self._parent_fixt = parent_fixt
- self._auto_prop_val = auto_prop_val
- if service_template_refs:
- for ln in service_template_refs:
- self.add_service_template (ln)
- self.service_instance_properties = service_instance_properties
- self.id_perms = id_perms
- self.display_name = display_name
- #end __init__
-
- def _update_links (self, update_server):
- for ln in self.get_service_templates ():
- self.add_service_template (ln.fixture (), update_server = update_server, add_link = False)
- return None
- #end _update_links
-
- def add_service_template (self, lo, update_server = True, add_link = True):
- '''
- add :class:`ServiceTemplate` link to :class:`ServiceInstance`
- Args:
- lo (:class:`ServiceTemplate`): obj to link
- '''
- if self._obj:
- self._obj.add_service_template (lo)
- if update_server:
- self._conn_drv.service_instance_update (self._obj)
-
- if add_link:
- self.add_link('service_template', cfixture.ConrtailLink('service_template', 'service_instance', 'service_template', ['ref'], lo))
- #end add_service_template_link
-
- def get_service_templates (self):
- return self.get_links ('service_template')
- #end get_service_templates
-
- def populate (self):
- self._obj.set_service_instance_properties(self.service_instance_properties or vnc_api.gen.resource_xsd.ServiceInstanceType.populate())
- self._obj.set_id_perms(self.id_perms or vnc_api.gen.resource_xsd.IdPermsType.populate())
- self._obj.set_display_name(self.display_name or GeneratedsSuper.populate_string("display_name"))
- #end populate
-
- def setUp(self):
- super(ServiceInstanceTestFixtureGen, self).setUp()
- if not self._parent_fixt:
- self._parent_fixt = self.useFixture(ProjectTestFixtureGen(self._conn_drv, 'default-project'))
-
- self._obj = vnc_api.ServiceInstance(self._name, self._parent_fixt.getObj ())
- try:
- self._obj = self._conn_drv.service_instance_read (fq_name=self._obj.get_fq_name())
- self._update_links (update_server=True)
- except NoIdError:
- self._update_links (update_server=False)
- if self._auto_prop_val:
- self.populate ()
- else:
- self._obj.service_instance_properties = self.service_instance_properties
- self._obj.id_perms = self.id_perms
- self._obj.display_name = self.display_name
- self._conn_drv.service_instance_create(self._obj)
- # read back for server allocated values
- self._obj = self._conn_drv.service_instance_read(id = self._obj.uuid)
- #end setUp
-
- def cleanUp(self):
- if self._obj.get_logical_router_back_refs() or self._obj.get_loadbalancer_pool_back_refs():
- return
- parent_fixt = getattr(self, '_parent_fixt', None)
- if parent_fixt:
- # non config-root child
- parent_obj = self._parent_fixt.getObj()
- # remove child from parent obj
- for child_obj in parent_obj.get_service_instances():
- if type(child_obj) == dict:
- child_uuid = child_obj['uuid']
- else:
- child_uuid = child_obj.uuid
- if child_uuid == self._obj.uuid:
- parent_obj.service_instances.remove(child_obj)
- break
-
- self._conn_drv.service_instance_delete(id = self._obj.uuid)
- #end cleanUp
-
- def getObj(self):
- return self._obj
- #end getObj
-
-#end class ServiceInstanceTestFixtureGen
-
-class NamespaceTestFixtureGen(cfixture.ContrailFixture):
- """
- Fixture for :class:`.Namespace`
- """
- def __init__(self, conn_drv, namespace_name=None, parent_fixt=None, auto_prop_val=False, namespace_cidr=None, id_perms=None, display_name=None):
- '''
- Create NamespaceTestFixtureGen object
-
- constructor
-
- Args:
- conn_drv (:class:`ConnectionDriver`): connection driver (eg. :class:`vnc_api.vnc_api.VncApi`, :class:`novaclient.client.Client`, etc)
-
- Kwargs:
- namespace_name (str): Name of namespace
- parent_fixt (:class:`.DomainTestFixtureGen`): Parent fixture
- namespace_cidr (instance): instance of :class:`SubnetType`
- id_perms (instance): instance of :class:`IdPermsType`
- display_name (instance): instance of :class:`xsd:string`
-
- '''
- super(NamespaceTestFixtureGen, self).__init__()
- self._conn_drv = conn_drv
- if not namespace_name:
- self._name = 'default-namespace'
- else:
- self._name = namespace_name
- self._obj = None
- self._parent_fixt = parent_fixt
- self._auto_prop_val = auto_prop_val
- self.namespace_cidr = namespace_cidr
- self.id_perms = id_perms
- self.display_name = display_name
- #end __init__
-
- def _update_links (self, update_server):
- return None
- #end _update_links
-
-
- def populate (self):
- self._obj.set_namespace_cidr(self.namespace_cidr or vnc_api.gen.resource_xsd.SubnetType.populate())
- self._obj.set_id_perms(self.id_perms or vnc_api.gen.resource_xsd.IdPermsType.populate())
- self._obj.set_display_name(self.display_name or GeneratedsSuper.populate_string("display_name"))
- #end populate
-
- def setUp(self):
- super(NamespaceTestFixtureGen, self).setUp()
- if not self._parent_fixt:
- self._parent_fixt = self.useFixture(DomainTestFixtureGen(self._conn_drv, 'default-domain'))
-
- self._obj = vnc_api.Namespace(self._name, self._parent_fixt.getObj ())
- try:
- self._obj = self._conn_drv.namespace_read (fq_name=self._obj.get_fq_name())
- self._update_links (update_server=True)
- except NoIdError:
- self._update_links (update_server=False)
- if self._auto_prop_val:
- self.populate ()
- else:
- self._obj.namespace_cidr = self.namespace_cidr
- self._obj.id_perms = self.id_perms
- self._obj.display_name = self.display_name
- self._conn_drv.namespace_create(self._obj)
- # read back for server allocated values
- self._obj = self._conn_drv.namespace_read(id = self._obj.uuid)
- #end setUp
-
- def cleanUp(self):
- if self._obj.get_project_back_refs():
- return
- parent_fixt = getattr(self, '_parent_fixt', None)
- if parent_fixt:
- # non config-root child
- parent_obj = self._parent_fixt.getObj()
- # remove child from parent obj
- for child_obj in parent_obj.get_namespaces():
- if type(child_obj) == dict:
- child_uuid = child_obj['uuid']
- else:
- child_uuid = child_obj.uuid
- if child_uuid == self._obj.uuid:
- parent_obj.namespaces.remove(child_obj)
- break
-
- self._conn_drv.namespace_delete(id = self._obj.uuid)
- #end cleanUp
-
- def getObj(self):
- return self._obj
- #end getObj
-
-#end class NamespaceTestFixtureGen
-
-class LogicalInterfaceTestFixtureGen(cfixture.ContrailFixture):
- """
- Fixture for :class:`.LogicalInterface`
- """
- def __init__(self, conn_drv, logical_interface_name=None, parent_fixt=None, auto_prop_val=False, virtual_machine_interface_refs = None, logical_interface_vlan_tag=None, logical_interface_type=None, id_perms=None, display_name=None):
- '''
- Create LogicalInterfaceTestFixtureGen object
-
- constructor
-
- Args:
- conn_drv (:class:`ConnectionDriver`): connection driver (eg. :class:`vnc_api.vnc_api.VncApi`, :class:`novaclient.client.Client`, etc)
-
- Kwargs:
- logical_interface_name (str): Name of logical_interface
- parent_fixt (:class:`.PhysicalRouterTestFixtureGen`): Parent fixture
- virtual_machine_interface (list): list of :class:`VirtualMachineInterface` type
- logical_interface_vlan_tag (instance): instance of :class:`xsd:integer`
- logical_interface_type (instance): instance of :class:`xsd:string`
- id_perms (instance): instance of :class:`IdPermsType`
- display_name (instance): instance of :class:`xsd:string`
-
- '''
- super(LogicalInterfaceTestFixtureGen, self).__init__()
- self._conn_drv = conn_drv
- if not logical_interface_name:
- self._name = 'default-logical-interface'
- else:
- self._name = logical_interface_name
- self._obj = None
- self._parent_fixt = parent_fixt
- self._auto_prop_val = auto_prop_val
- if virtual_machine_interface_refs:
- for ln in virtual_machine_interface_refs:
- self.add_virtual_machine_interface (ln)
- self.logical_interface_vlan_tag = logical_interface_vlan_tag
- self.logical_interface_type = logical_interface_type
- self.id_perms = id_perms
- self.display_name = display_name
- #end __init__
-
- def _update_links (self, update_server):
- for ln in self.get_virtual_machine_interfaces ():
- self.add_virtual_machine_interface (ln.fixture (), update_server = update_server, add_link = False)
- return None
- #end _update_links
-
- def add_virtual_machine_interface (self, lo, update_server = True, add_link = True):
- '''
- add :class:`VirtualMachineInterface` link to :class:`LogicalInterface`
- Args:
- lo (:class:`VirtualMachineInterface`): obj to link
- '''
- if self._obj:
- self._obj.add_virtual_machine_interface (lo)
- if update_server:
- self._conn_drv.logical_interface_update (self._obj)
-
- if add_link:
- self.add_link('virtual_machine_interface', cfixture.ConrtailLink('virtual_machine_interface', 'logical_interface', 'virtual_machine_interface', ['ref'], lo))
- #end add_virtual_machine_interface_link
-
- def get_virtual_machine_interfaces (self):
- return self.get_links ('virtual_machine_interface')
- #end get_virtual_machine_interfaces
-
- def populate (self):
- self._obj.set_logical_interface_vlan_tag(self.logical_interface_vlan_tag or GeneratedsSuper.populate_integer("logical_interface_vlan_tag"))
- self._obj.set_logical_interface_type(self.logical_interface_type or GeneratedsSuper.populate_string("logical_interface_type"))
- self._obj.set_id_perms(self.id_perms or vnc_api.gen.resource_xsd.IdPermsType.populate())
- self._obj.set_display_name(self.display_name or GeneratedsSuper.populate_string("display_name"))
- #end populate
-
- def setUp(self):
- super(LogicalInterfaceTestFixtureGen, self).setUp()
- if not self._parent_fixt:
- raise AmbiguousParentError("[[u'default-global-system-config', u'default-physical-router'], [u'default-global-system-config', u'default-physical-router', u'default-physical-interface']]")
-
- self._obj = vnc_api.LogicalInterface(self._name, self._parent_fixt.getObj ())
- try:
- self._obj = self._conn_drv.logical_interface_read (fq_name=self._obj.get_fq_name())
- self._update_links (update_server=True)
- except NoIdError:
- self._update_links (update_server=False)
- if self._auto_prop_val:
- self.populate ()
- else:
- self._obj.logical_interface_vlan_tag = self.logical_interface_vlan_tag
- self._obj.logical_interface_type = self.logical_interface_type
- self._obj.id_perms = self.id_perms
- self._obj.display_name = self.display_name
- self._conn_drv.logical_interface_create(self._obj)
- # read back for server allocated values
- self._obj = self._conn_drv.logical_interface_read(id = self._obj.uuid)
- #end setUp
-
- def cleanUp(self):
- parent_fixt = getattr(self, '_parent_fixt', None)
- if parent_fixt:
- # non config-root child
- parent_obj = self._parent_fixt.getObj()
- # remove child from parent obj
- for child_obj in parent_obj.get_logical_interfaces():
- if type(child_obj) == dict:
- child_uuid = child_obj['uuid']
- else:
- child_uuid = child_obj.uuid
- if child_uuid == self._obj.uuid:
- parent_obj.logical_interfaces.remove(child_obj)
- break
-
- self._conn_drv.logical_interface_delete(id = self._obj.uuid)
- #end cleanUp
-
- def getObj(self):
- return self._obj
- #end getObj
-
-#end class LogicalInterfaceTestFixtureGen
-
-class RouteTableTestFixtureGen(cfixture.ContrailFixture):
- """
- Fixture for :class:`.RouteTable`
- """
- def __init__(self, conn_drv, route_table_name=None, parent_fixt=None, auto_prop_val=False, routes=None, id_perms=None, display_name=None):
- '''
- Create RouteTableTestFixtureGen object
-
- constructor
-
- Args:
- conn_drv (:class:`ConnectionDriver`): connection driver (eg. :class:`vnc_api.vnc_api.VncApi`, :class:`novaclient.client.Client`, etc)
-
- Kwargs:
- route_table_name (str): Name of route_table
- parent_fixt (:class:`.ProjectTestFixtureGen`): Parent fixture
- routes (instance): instance of :class:`RouteTableType`
- id_perms (instance): instance of :class:`IdPermsType`
- display_name (instance): instance of :class:`xsd:string`
-
- '''
- super(RouteTableTestFixtureGen, self).__init__()
- self._conn_drv = conn_drv
- if not route_table_name:
- self._name = 'default-route-table'
- else:
- self._name = route_table_name
- self._obj = None
- self._parent_fixt = parent_fixt
- self._auto_prop_val = auto_prop_val
- self.routes = routes
- self.id_perms = id_perms
- self.display_name = display_name
- #end __init__
-
- def _update_links (self, update_server):
- return None
- #end _update_links
-
-
- def populate (self):
- self._obj.set_routes(self.routes or vnc_api.gen.resource_xsd.RouteTableType.populate())
- self._obj.set_id_perms(self.id_perms or vnc_api.gen.resource_xsd.IdPermsType.populate())
- self._obj.set_display_name(self.display_name or GeneratedsSuper.populate_string("display_name"))
- #end populate
-
- def setUp(self):
- super(RouteTableTestFixtureGen, self).setUp()
- if not self._parent_fixt:
- self._parent_fixt = self.useFixture(ProjectTestFixtureGen(self._conn_drv, 'default-project'))
-
- self._obj = vnc_api.RouteTable(self._name, self._parent_fixt.getObj ())
- try:
- self._obj = self._conn_drv.route_table_read (fq_name=self._obj.get_fq_name())
- self._update_links (update_server=True)
- except NoIdError:
- self._update_links (update_server=False)
- if self._auto_prop_val:
- self.populate ()
- else:
- self._obj.routes = self.routes
- self._obj.id_perms = self.id_perms
- self._obj.display_name = self.display_name
- self._conn_drv.route_table_create(self._obj)
- # read back for server allocated values
- self._obj = self._conn_drv.route_table_read(id = self._obj.uuid)
- #end setUp
-
- def cleanUp(self):
- if self._obj.get_virtual_network_back_refs():
- return
- parent_fixt = getattr(self, '_parent_fixt', None)
- if parent_fixt:
- # non config-root child
- parent_obj = self._parent_fixt.getObj()
- # remove child from parent obj
- for child_obj in parent_obj.get_route_tables():
- if type(child_obj) == dict:
- child_uuid = child_obj['uuid']
- else:
- child_uuid = child_obj.uuid
- if child_uuid == self._obj.uuid:
- parent_obj.route_tables.remove(child_obj)
- break
-
- self._conn_drv.route_table_delete(id = self._obj.uuid)
- #end cleanUp
-
- def getObj(self):
- return self._obj
- #end getObj
-
-#end class RouteTableTestFixtureGen
-
-class PhysicalInterfaceTestFixtureGen(cfixture.ContrailFixture):
- """
- Fixture for :class:`.PhysicalInterface`
- """
- def __init__(self, conn_drv, physical_interface_name=None, parent_fixt=None, auto_prop_val=False, id_perms=None, display_name=None):
- '''
- Create PhysicalInterfaceTestFixtureGen object
-
- constructor
-
- Args:
- conn_drv (:class:`ConnectionDriver`): connection driver (eg. :class:`vnc_api.vnc_api.VncApi`, :class:`novaclient.client.Client`, etc)
-
- Kwargs:
- physical_interface_name (str): Name of physical_interface
- parent_fixt (:class:`.PhysicalRouterTestFixtureGen`): Parent fixture
- id_perms (instance): instance of :class:`IdPermsType`
- display_name (instance): instance of :class:`xsd:string`
-
- '''
- super(PhysicalInterfaceTestFixtureGen, self).__init__()
- self._conn_drv = conn_drv
- if not physical_interface_name:
- self._name = 'default-physical-interface'
- else:
- self._name = physical_interface_name
- self._obj = None
- self._parent_fixt = parent_fixt
- self._auto_prop_val = auto_prop_val
- self.id_perms = id_perms
- self.display_name = display_name
- #end __init__
-
- def _update_links (self, update_server):
- return None
- #end _update_links
-
-
- def populate (self):
- self._obj.set_id_perms(self.id_perms or vnc_api.gen.resource_xsd.IdPermsType.populate())
- self._obj.set_display_name(self.display_name or GeneratedsSuper.populate_string("display_name"))
- #end populate
-
- def setUp(self):
- super(PhysicalInterfaceTestFixtureGen, self).setUp()
- if not self._parent_fixt:
- self._parent_fixt = self.useFixture(PhysicalRouterTestFixtureGen(self._conn_drv, 'default-physical-router'))
-
- self._obj = vnc_api.PhysicalInterface(self._name, self._parent_fixt.getObj ())
- try:
- self._obj = self._conn_drv.physical_interface_read (fq_name=self._obj.get_fq_name())
- self._update_links (update_server=True)
- except NoIdError:
- self._update_links (update_server=False)
- if self._auto_prop_val:
- self.populate ()
- else:
- self._obj.id_perms = self.id_perms
- self._obj.display_name = self.display_name
- self._conn_drv.physical_interface_create(self._obj)
- # read back for server allocated values
- self._obj = self._conn_drv.physical_interface_read(id = self._obj.uuid)
- #end setUp
-
- def cleanUp(self):
- if self._obj.get_logical_interfaces():
- return
- parent_fixt = getattr(self, '_parent_fixt', None)
- if parent_fixt:
- # non config-root child
- parent_obj = self._parent_fixt.getObj()
- # remove child from parent obj
- for child_obj in parent_obj.get_physical_interfaces():
- if type(child_obj) == dict:
- child_uuid = child_obj['uuid']
- else:
- child_uuid = child_obj.uuid
- if child_uuid == self._obj.uuid:
- parent_obj.physical_interfaces.remove(child_obj)
- break
-
- self._conn_drv.physical_interface_delete(id = self._obj.uuid)
- #end cleanUp
-
- def getObj(self):
- return self._obj
- #end getObj
-
-#end class PhysicalInterfaceTestFixtureGen
-
-class AccessControlListTestFixtureGen(cfixture.ContrailFixture):
- """
- Fixture for :class:`.AccessControlList`
- """
- def __init__(self, conn_drv, access_control_list_name=None, parent_fixt=None, auto_prop_val=False, access_control_list_entries=None, id_perms=None, display_name=None):
- '''
- Create AccessControlListTestFixtureGen object
-
- constructor
-
- Args:
- conn_drv (:class:`ConnectionDriver`): connection driver (eg. :class:`vnc_api.vnc_api.VncApi`, :class:`novaclient.client.Client`, etc)
-
- Kwargs:
- access_control_list_name (str): Name of access_control_list
- parent_fixt (:class:`.VirtualNetworkTestFixtureGen`): Parent fixture
- access_control_list_entries (instance): instance of :class:`AclEntriesType`
- id_perms (instance): instance of :class:`IdPermsType`
- display_name (instance): instance of :class:`xsd:string`
-
- '''
- super(AccessControlListTestFixtureGen, self).__init__()
- self._conn_drv = conn_drv
- if not access_control_list_name:
- self._name = 'default-access-control-list'
- else:
- self._name = access_control_list_name
- self._obj = None
- self._parent_fixt = parent_fixt
- self._auto_prop_val = auto_prop_val
- self.access_control_list_entries = access_control_list_entries
- self.id_perms = id_perms
- self.display_name = display_name
- #end __init__
-
- def _update_links (self, update_server):
- return None
- #end _update_links
-
-
- def populate (self):
- self._obj.set_access_control_list_entries(self.access_control_list_entries or vnc_api.gen.resource_xsd.AclEntriesType.populate())
- self._obj.set_id_perms(self.id_perms or vnc_api.gen.resource_xsd.IdPermsType.populate())
- self._obj.set_display_name(self.display_name or GeneratedsSuper.populate_string("display_name"))
- #end populate
-
- def setUp(self):
- super(AccessControlListTestFixtureGen, self).setUp()
- if not self._parent_fixt:
- raise AmbiguousParentError("[[u'default-domain', u'default-project', u'default-virtual-network'], [u'default-domain', u'default-project', u'default-security-group']]")
-
- self._obj = vnc_api.AccessControlList(self._name, self._parent_fixt.getObj ())
- try:
- self._obj = self._conn_drv.access_control_list_read (fq_name=self._obj.get_fq_name())
- self._update_links (update_server=True)
- except NoIdError:
- self._update_links (update_server=False)
- if self._auto_prop_val:
- self.populate ()
- else:
- self._obj.access_control_list_entries = self.access_control_list_entries
- self._obj.id_perms = self.id_perms
- self._obj.display_name = self.display_name
- self._conn_drv.access_control_list_create(self._obj)
- # read back for server allocated values
- self._obj = self._conn_drv.access_control_list_read(id = self._obj.uuid)
- #end setUp
-
- def cleanUp(self):
- parent_fixt = getattr(self, '_parent_fixt', None)
- if parent_fixt:
- # non config-root child
- parent_obj = self._parent_fixt.getObj()
- # remove child from parent obj
- for child_obj in parent_obj.get_access_control_lists():
- if type(child_obj) == dict:
- child_uuid = child_obj['uuid']
- else:
- child_uuid = child_obj.uuid
- if child_uuid == self._obj.uuid:
- parent_obj.access_control_lists.remove(child_obj)
- break
-
- self._conn_drv.access_control_list_delete(id = self._obj.uuid)
- #end cleanUp
-
- def getObj(self):
- return self._obj
- #end getObj
-
-#end class AccessControlListTestFixtureGen
-
-class AnalyticsNodeTestFixtureGen(cfixture.ContrailFixture):
- """
- Fixture for :class:`.AnalyticsNode`
- """
- def __init__(self, conn_drv, analytics_node_name=None, parent_fixt=None, auto_prop_val=False, analytics_node_ip_address=None, id_perms=None, display_name=None):
- '''
- Create AnalyticsNodeTestFixtureGen object
-
- constructor
-
- Args:
- conn_drv (:class:`ConnectionDriver`): connection driver (eg. :class:`vnc_api.vnc_api.VncApi`, :class:`novaclient.client.Client`, etc)
-
- Kwargs:
- analytics_node_name (str): Name of analytics_node
- parent_fixt (:class:`.GlobalSystemConfigTestFixtureGen`): Parent fixture
- analytics_node_ip_address (instance): instance of :class:`xsd:string`
- id_perms (instance): instance of :class:`IdPermsType`
- display_name (instance): instance of :class:`xsd:string`
-
- '''
- super(AnalyticsNodeTestFixtureGen, self).__init__()
- self._conn_drv = conn_drv
- if not analytics_node_name:
- self._name = 'default-analytics-node'
- else:
- self._name = analytics_node_name
- self._obj = None
- self._parent_fixt = parent_fixt
- self._auto_prop_val = auto_prop_val
- self.analytics_node_ip_address = analytics_node_ip_address
- self.id_perms = id_perms
- self.display_name = display_name
- #end __init__
-
- def _update_links (self, update_server):
- return None
- #end _update_links
-
-
- def populate (self):
- self._obj.set_analytics_node_ip_address(self.analytics_node_ip_address or GeneratedsSuper.populate_string("analytics_node_ip_address"))
- self._obj.set_id_perms(self.id_perms or vnc_api.gen.resource_xsd.IdPermsType.populate())
- self._obj.set_display_name(self.display_name or GeneratedsSuper.populate_string("display_name"))
- #end populate
-
- def setUp(self):
- super(AnalyticsNodeTestFixtureGen, self).setUp()
- if not self._parent_fixt:
- self._parent_fixt = self.useFixture(GlobalSystemConfigTestFixtureGen(self._conn_drv, 'default-global-system-config'))
-
- self._obj = vnc_api.AnalyticsNode(self._name, self._parent_fixt.getObj ())
- try:
- self._obj = self._conn_drv.analytics_node_read (fq_name=self._obj.get_fq_name())
- self._update_links (update_server=True)
- except NoIdError:
- self._update_links (update_server=False)
- if self._auto_prop_val:
- self.populate ()
- else:
- self._obj.analytics_node_ip_address = self.analytics_node_ip_address
- self._obj.id_perms = self.id_perms
- self._obj.display_name = self.display_name
- self._conn_drv.analytics_node_create(self._obj)
- # read back for server allocated values
- self._obj = self._conn_drv.analytics_node_read(id = self._obj.uuid)
- #end setUp
-
- def cleanUp(self):
- parent_fixt = getattr(self, '_parent_fixt', None)
- if parent_fixt:
- # non config-root child
- parent_obj = self._parent_fixt.getObj()
- # remove child from parent obj
- for child_obj in parent_obj.get_analytics_nodes():
- if type(child_obj) == dict:
- child_uuid = child_obj['uuid']
- else:
- child_uuid = child_obj.uuid
- if child_uuid == self._obj.uuid:
- parent_obj.analytics_nodes.remove(child_obj)
- break
-
- self._conn_drv.analytics_node_delete(id = self._obj.uuid)
- #end cleanUp
-
- def getObj(self):
- return self._obj
- #end getObj
-
-#end class AnalyticsNodeTestFixtureGen
-
-class VirtualDnsTestFixtureGen(cfixture.ContrailFixture):
- """
- Fixture for :class:`.VirtualDns`
- """
- def __init__(self, conn_drv, virtual_DNS_name=None, parent_fixt=None, auto_prop_val=False, virtual_DNS_data=None, id_perms=None, display_name=None):
- '''
- Create VirtualDnsTestFixtureGen object
-
- constructor
-
- Args:
- conn_drv (:class:`ConnectionDriver`): connection driver (eg. :class:`vnc_api.vnc_api.VncApi`, :class:`novaclient.client.Client`, etc)
-
- Kwargs:
- virtual_DNS_name (str): Name of virtual_DNS
- parent_fixt (:class:`.DomainTestFixtureGen`): Parent fixture
- virtual_DNS_data (instance): instance of :class:`VirtualDnsType`
- id_perms (instance): instance of :class:`IdPermsType`
- display_name (instance): instance of :class:`xsd:string`
-
- '''
- super(VirtualDnsTestFixtureGen, self).__init__()
- self._conn_drv = conn_drv
- if not virtual_DNS_name:
- self._name = 'default-virtual-DNS'
- else:
- self._name = virtual_DNS_name
- self._obj = None
- self._parent_fixt = parent_fixt
- self._auto_prop_val = auto_prop_val
- self.virtual_DNS_data = virtual_DNS_data
- self.id_perms = id_perms
- self.display_name = display_name
- #end __init__
-
- def _update_links (self, update_server):
- return None
- #end _update_links
-
-
- def populate (self):
- self._obj.set_virtual_DNS_data(self.virtual_DNS_data or vnc_api.gen.resource_xsd.VirtualDnsType.populate())
- self._obj.set_id_perms(self.id_perms or vnc_api.gen.resource_xsd.IdPermsType.populate())
- self._obj.set_display_name(self.display_name or GeneratedsSuper.populate_string("display_name"))
- #end populate
-
- def setUp(self):
- super(VirtualDnsTestFixtureGen, self).setUp()
- if not self._parent_fixt:
- self._parent_fixt = self.useFixture(DomainTestFixtureGen(self._conn_drv, 'default-domain'))
-
- self._obj = vnc_api.VirtualDns(self._name, self._parent_fixt.getObj ())
- try:
- self._obj = self._conn_drv.virtual_DNS_read (fq_name=self._obj.get_fq_name())
- self._update_links (update_server=True)
- except NoIdError:
- self._update_links (update_server=False)
- if self._auto_prop_val:
- self.populate ()
- else:
- self._obj.virtual_DNS_data = self.virtual_DNS_data
- self._obj.id_perms = self.id_perms
- self._obj.display_name = self.display_name
- self._conn_drv.virtual_DNS_create(self._obj)
- # read back for server allocated values
- self._obj = self._conn_drv.virtual_DNS_read(id = self._obj.uuid)
- #end setUp
-
- def cleanUp(self):
- if self._obj.get_network_ipam_back_refs():
- return
- if self._obj.get_virtual_DNS_records():
- return
- parent_fixt = getattr(self, '_parent_fixt', None)
- if parent_fixt:
- # non config-root child
- parent_obj = self._parent_fixt.getObj()
- # remove child from parent obj
- for child_obj in parent_obj.get_virtual_DNSs():
- if type(child_obj) == dict:
- child_uuid = child_obj['uuid']
- else:
- child_uuid = child_obj.uuid
- if child_uuid == self._obj.uuid:
- parent_obj.virtual_DNSs.remove(child_obj)
- break
-
- self._conn_drv.virtual_DNS_delete(id = self._obj.uuid)
- #end cleanUp
-
- def getObj(self):
- return self._obj
- #end getObj
-
-#end class VirtualDnsTestFixtureGen
-
-class CustomerAttachmentTestFixtureGen(cfixture.ContrailFixture):
- """
- Fixture for :class:`.CustomerAttachment`
- """
- def __init__(self, conn_drv, customer_attachment_name=None, auto_prop_val=False, virtual_machine_interface_refs = None, floating_ip_refs = None, attachment_address=None, id_perms=None, display_name=None):
- '''
- Create CustomerAttachmentTestFixtureGen object
-
- constructor
-
- Args:
- conn_drv (:class:`ConnectionDriver`): connection driver (eg. :class:`vnc_api.vnc_api.VncApi`, :class:`novaclient.client.Client`, etc)
-
- Kwargs:
- %s_name (str): Name of %s
- virtual_machine_interface (list): list of :class:`VirtualMachineInterface` type
- floating_ip (list): list of :class:`FloatingIp` type
- attachment_address (instance): instance of :class:`AttachmentAddressType`
- id_perms (instance): instance of :class:`IdPermsType`
- display_name (instance): instance of :class:`xsd:string`
-
- '''
- super(CustomerAttachmentTestFixtureGen, self).__init__()
- self._conn_drv = conn_drv
- if not customer_attachment_name:
- self._name = 'default-customer-attachment'
- else:
- self._name = customer_attachment_name
- self._obj = None
- self._auto_prop_val = auto_prop_val
- if virtual_machine_interface_refs:
- for ln in virtual_machine_interface_refs:
- self.add_virtual_machine_interface (ln)
- if floating_ip_refs:
- for ln in floating_ip_refs:
- self.add_floating_ip (ln)
- self.attachment_address = attachment_address
- self.id_perms = id_perms
- self.display_name = display_name
- #end __init__
-
- def _update_links (self, update_server):
- for ln in self.get_virtual_machine_interfaces ():
- self.add_virtual_machine_interface (ln.fixture (), update_server = update_server, add_link = False)
- for ln in self.get_floating_ips ():
- self.add_floating_ip (ln.fixture (), update_server = update_server, add_link = False)
- return None
- #end _update_links
-
- def add_virtual_machine_interface (self, lo, update_server = True, add_link = True):
- '''
- add :class:`VirtualMachineInterface` link to :class:`CustomerAttachment`
- Args:
- lo (:class:`VirtualMachineInterface`): obj to link
- '''
- if self._obj:
- self._obj.add_virtual_machine_interface (lo)
- if update_server:
- self._conn_drv.customer_attachment_update (self._obj)
-
- if add_link:
- self.add_link('virtual_machine_interface', cfixture.ConrtailLink('virtual_machine_interface', 'customer_attachment', 'virtual_machine_interface', ['ref'], lo))
- #end add_virtual_machine_interface_link
-
- def get_virtual_machine_interfaces (self):
- return self.get_links ('virtual_machine_interface')
- #end get_virtual_machine_interfaces
- def add_floating_ip (self, lo, update_server = True, add_link = True):
- '''
- add :class:`FloatingIp` link to :class:`CustomerAttachment`
- Args:
- lo (:class:`FloatingIp`): obj to link
- '''
- if self._obj:
- self._obj.add_floating_ip (lo)
- if update_server:
- self._conn_drv.customer_attachment_update (self._obj)
-
- if add_link:
- self.add_link('floating_ip', cfixture.ConrtailLink('floating_ip', 'customer_attachment', 'floating_ip', ['ref'], lo))
- #end add_floating_ip_link
-
- def get_floating_ips (self):
- return self.get_links ('floating_ip')
- #end get_floating_ips
-
- def populate (self):
- self._obj.set_attachment_address(self.attachment_address or vnc_api.gen.resource_xsd.AttachmentAddressType.populate())
- self._obj.set_id_perms(self.id_perms or vnc_api.gen.resource_xsd.IdPermsType.populate())
- self._obj.set_display_name(self.display_name or GeneratedsSuper.populate_string("display_name"))
- #end populate
-
- def setUp(self):
- super(CustomerAttachmentTestFixtureGen, self).setUp()
- self._obj = vnc_api.CustomerAttachment(self._name)
- try:
- self._obj = self._conn_drv.customer_attachment_read (fq_name=self._obj.get_fq_name())
- self._update_links (update_server=True)
- except NoIdError:
- self._update_links (update_server=False)
- if self._auto_prop_val:
- self.populate ()
- else:
- self._obj.attachment_address = self.attachment_address
- self._obj.id_perms = self.id_perms
- self._obj.display_name = self.display_name
- self._conn_drv.customer_attachment_create(self._obj)
- # read back for server allocated values
- self._obj = self._conn_drv.customer_attachment_read(id = self._obj.uuid)
- #end setUp
-
- def cleanUp(self):
- self._conn_drv.customer_attachment_delete(id = self._obj.uuid)
- #end cleanUp
-
- def getObj(self):
- return self._obj
- #end getObj
-
-#end class CustomerAttachmentTestFixtureGen
-
-class ServiceApplianceSetTestFixtureGen(cfixture.ContrailFixture):
- """
- Fixture for :class:`.ServiceApplianceSet`
- """
- def __init__(self, conn_drv, service_appliance_set_name=None, parent_fixt=None, auto_prop_val=False, service_appliance_set_properties=None, service_appliance_driver=None, service_appliance_ha_mode=None, id_perms=None, display_name=None):
- '''
- Create ServiceApplianceSetTestFixtureGen object
-
- constructor
-
- Args:
- conn_drv (:class:`ConnectionDriver`): connection driver (eg. :class:`vnc_api.vnc_api.VncApi`, :class:`novaclient.client.Client`, etc)
-
- Kwargs:
- service_appliance_set_name (str): Name of service_appliance_set
- parent_fixt (:class:`.GlobalSystemConfigTestFixtureGen`): Parent fixture
- service_appliance_set_properties (instance): instance of :class:`KeyValuePairs`
- service_appliance_driver (instance): instance of :class:`xsd:string`
- service_appliance_ha_mode (instance): instance of :class:`xsd:string`
- id_perms (instance): instance of :class:`IdPermsType`
- display_name (instance): instance of :class:`xsd:string`
-
- '''
- super(ServiceApplianceSetTestFixtureGen, self).__init__()
- self._conn_drv = conn_drv
- if not service_appliance_set_name:
- self._name = 'default-service-appliance-set'
- else:
- self._name = service_appliance_set_name
- self._obj = None
- self._parent_fixt = parent_fixt
- self._auto_prop_val = auto_prop_val
- self.service_appliance_set_properties = service_appliance_set_properties
- self.service_appliance_driver = service_appliance_driver
- self.service_appliance_ha_mode = service_appliance_ha_mode
- self.id_perms = id_perms
- self.display_name = display_name
- #end __init__
-
- def _update_links (self, update_server):
- return None
- #end _update_links
-
-
- def populate (self):
- self._obj.set_service_appliance_set_properties(self.service_appliance_set_properties or vnc_api.gen.resource_xsd.KeyValuePairs.populate())
- self._obj.set_service_appliance_driver(self.service_appliance_driver or GeneratedsSuper.populate_string("service_appliance_driver"))
- self._obj.set_service_appliance_ha_mode(self.service_appliance_ha_mode or GeneratedsSuper.populate_string("service_appliance_ha_mode"))
- self._obj.set_id_perms(self.id_perms or vnc_api.gen.resource_xsd.IdPermsType.populate())
- self._obj.set_display_name(self.display_name or GeneratedsSuper.populate_string("display_name"))
- #end populate
-
- def setUp(self):
- super(ServiceApplianceSetTestFixtureGen, self).setUp()
- if not self._parent_fixt:
- self._parent_fixt = self.useFixture(GlobalSystemConfigTestFixtureGen(self._conn_drv, 'default-global-system-config'))
-
- self._obj = vnc_api.ServiceApplianceSet(self._name, self._parent_fixt.getObj ())
- try:
- self._obj = self._conn_drv.service_appliance_set_read (fq_name=self._obj.get_fq_name())
- self._update_links (update_server=True)
- except NoIdError:
- self._update_links (update_server=False)
- if self._auto_prop_val:
- self.populate ()
- else:
- self._obj.service_appliance_set_properties = self.service_appliance_set_properties
- self._obj.service_appliance_driver = self.service_appliance_driver
- self._obj.service_appliance_ha_mode = self.service_appliance_ha_mode
- self._obj.id_perms = self.id_perms
- self._obj.display_name = self.display_name
- self._conn_drv.service_appliance_set_create(self._obj)
- # read back for server allocated values
- self._obj = self._conn_drv.service_appliance_set_read(id = self._obj.uuid)
- #end setUp
-
- def cleanUp(self):
- if self._obj.get_loadbalancer_pool_back_refs():
- return
- if self._obj.get_service_appliances():
- return
- parent_fixt = getattr(self, '_parent_fixt', None)
- if parent_fixt:
- # non config-root child
- parent_obj = self._parent_fixt.getObj()
- # remove child from parent obj
- for child_obj in parent_obj.get_service_appliance_sets():
- if type(child_obj) == dict:
- child_uuid = child_obj['uuid']
- else:
- child_uuid = child_obj.uuid
- if child_uuid == self._obj.uuid:
- parent_obj.service_appliance_sets.remove(child_obj)
- break
-
- self._conn_drv.service_appliance_set_delete(id = self._obj.uuid)
- #end cleanUp
-
- def getObj(self):
- return self._obj
- #end getObj
-
-#end class ServiceApplianceSetTestFixtureGen
-
-class ConfigNodeTestFixtureGen(cfixture.ContrailFixture):
- """
- Fixture for :class:`.ConfigNode`
- """
- def __init__(self, conn_drv, config_node_name=None, parent_fixt=None, auto_prop_val=False, config_node_ip_address=None, id_perms=None, display_name=None):
- '''
- Create ConfigNodeTestFixtureGen object
-
- constructor
-
- Args:
- conn_drv (:class:`ConnectionDriver`): connection driver (eg. :class:`vnc_api.vnc_api.VncApi`, :class:`novaclient.client.Client`, etc)
-
- Kwargs:
- config_node_name (str): Name of config_node
- parent_fixt (:class:`.GlobalSystemConfigTestFixtureGen`): Parent fixture
- config_node_ip_address (instance): instance of :class:`xsd:string`
- id_perms (instance): instance of :class:`IdPermsType`
- display_name (instance): instance of :class:`xsd:string`
-
- '''
- super(ConfigNodeTestFixtureGen, self).__init__()
- self._conn_drv = conn_drv
- if not config_node_name:
- self._name = 'default-config-node'
- else:
- self._name = config_node_name
- self._obj = None
- self._parent_fixt = parent_fixt
- self._auto_prop_val = auto_prop_val
- self.config_node_ip_address = config_node_ip_address
- self.id_perms = id_perms
- self.display_name = display_name
- #end __init__
-
- def _update_links (self, update_server):
- return None
- #end _update_links
-
-
- def populate (self):
- self._obj.set_config_node_ip_address(self.config_node_ip_address or GeneratedsSuper.populate_string("config_node_ip_address"))
- self._obj.set_id_perms(self.id_perms or vnc_api.gen.resource_xsd.IdPermsType.populate())
- self._obj.set_display_name(self.display_name or GeneratedsSuper.populate_string("display_name"))
- #end populate
-
- def setUp(self):
- super(ConfigNodeTestFixtureGen, self).setUp()
- if not self._parent_fixt:
- self._parent_fixt = self.useFixture(GlobalSystemConfigTestFixtureGen(self._conn_drv, 'default-global-system-config'))
-
- self._obj = vnc_api.ConfigNode(self._name, self._parent_fixt.getObj ())
- try:
- self._obj = self._conn_drv.config_node_read (fq_name=self._obj.get_fq_name())
- self._update_links (update_server=True)
- except NoIdError:
- self._update_links (update_server=False)
- if self._auto_prop_val:
- self.populate ()
- else:
- self._obj.config_node_ip_address = self.config_node_ip_address
- self._obj.id_perms = self.id_perms
- self._obj.display_name = self.display_name
- self._conn_drv.config_node_create(self._obj)
- # read back for server allocated values
- self._obj = self._conn_drv.config_node_read(id = self._obj.uuid)
- #end setUp
-
- def cleanUp(self):
- parent_fixt = getattr(self, '_parent_fixt', None)
- if parent_fixt:
- # non config-root child
- parent_obj = self._parent_fixt.getObj()
- # remove child from parent obj
- for child_obj in parent_obj.get_config_nodes():
- if type(child_obj) == dict:
- child_uuid = child_obj['uuid']
- else:
- child_uuid = child_obj.uuid
- if child_uuid == self._obj.uuid:
- parent_obj.config_nodes.remove(child_obj)
- break
-
- self._conn_drv.config_node_delete(id = self._obj.uuid)
- #end cleanUp
-
- def getObj(self):
- return self._obj
- #end getObj
-
-#end class ConfigNodeTestFixtureGen
-
-class QosQueueTestFixtureGen(cfixture.ContrailFixture):
- """
- Fixture for :class:`.QosQueue`
- """
- def __init__(self, conn_drv, qos_queue_name=None, parent_fixt=None, auto_prop_val=False, min_bandwidth=None, max_bandwidth=None, id_perms=None, display_name=None):
- '''
- Create QosQueueTestFixtureGen object
-
- constructor
-
- Args:
- conn_drv (:class:`ConnectionDriver`): connection driver (eg. :class:`vnc_api.vnc_api.VncApi`, :class:`novaclient.client.Client`, etc)
-
- Kwargs:
- qos_queue_name (str): Name of qos_queue
- parent_fixt (:class:`.ProjectTestFixtureGen`): Parent fixture
- min_bandwidth (instance): instance of :class:`xsd:integer`
- max_bandwidth (instance): instance of :class:`xsd:integer`
- id_perms (instance): instance of :class:`IdPermsType`
- display_name (instance): instance of :class:`xsd:string`
-
- '''
- super(QosQueueTestFixtureGen, self).__init__()
- self._conn_drv = conn_drv
- if not qos_queue_name:
- self._name = 'default-qos-queue'
- else:
- self._name = qos_queue_name
- self._obj = None
- self._parent_fixt = parent_fixt
- self._auto_prop_val = auto_prop_val
- self.min_bandwidth = min_bandwidth
- self.max_bandwidth = max_bandwidth
- self.id_perms = id_perms
- self.display_name = display_name
- #end __init__
-
- def _update_links (self, update_server):
- return None
- #end _update_links
-
-
- def populate (self):
- self._obj.set_min_bandwidth(self.min_bandwidth or GeneratedsSuper.populate_integer("min_bandwidth"))
- self._obj.set_max_bandwidth(self.max_bandwidth or GeneratedsSuper.populate_integer("max_bandwidth"))
- self._obj.set_id_perms(self.id_perms or vnc_api.gen.resource_xsd.IdPermsType.populate())
- self._obj.set_display_name(self.display_name or GeneratedsSuper.populate_string("display_name"))
- #end populate
-
- def setUp(self):
- super(QosQueueTestFixtureGen, self).setUp()
- if not self._parent_fixt:
- self._parent_fixt = self.useFixture(ProjectTestFixtureGen(self._conn_drv, 'default-project'))
-
- self._obj = vnc_api.QosQueue(self._name, self._parent_fixt.getObj ())
- try:
- self._obj = self._conn_drv.qos_queue_read (fq_name=self._obj.get_fq_name())
- self._update_links (update_server=True)
- except NoIdError:
- self._update_links (update_server=False)
- if self._auto_prop_val:
- self.populate ()
- else:
- self._obj.min_bandwidth = self.min_bandwidth
- self._obj.max_bandwidth = self.max_bandwidth
- self._obj.id_perms = self.id_perms
- self._obj.display_name = self.display_name
- self._conn_drv.qos_queue_create(self._obj)
- # read back for server allocated values
- self._obj = self._conn_drv.qos_queue_read(id = self._obj.uuid)
- #end setUp
-
- def cleanUp(self):
- if self._obj.get_qos_forwarding_class_back_refs():
- return
- parent_fixt = getattr(self, '_parent_fixt', None)
- if parent_fixt:
- # non config-root child
- parent_obj = self._parent_fixt.getObj()
- # remove child from parent obj
- for child_obj in parent_obj.get_qos_queues():
- if type(child_obj) == dict:
- child_uuid = child_obj['uuid']
- else:
- child_uuid = child_obj.uuid
- if child_uuid == self._obj.uuid:
- parent_obj.qos_queues.remove(child_obj)
- break
-
- self._conn_drv.qos_queue_delete(id = self._obj.uuid)
- #end cleanUp
-
- def getObj(self):
- return self._obj
- #end getObj
-
-#end class QosQueueTestFixtureGen
-
-class VirtualMachineTestFixtureGen(cfixture.ContrailFixture):
- """
- Fixture for :class:`.VirtualMachine`
- """
- def __init__(self, conn_drv, virtual_machine_name=None, auto_prop_val=False, service_instance_refs = None, id_perms=None, display_name=None):
- '''
- Create VirtualMachineTestFixtureGen object
-
- constructor
-
- Args:
- conn_drv (:class:`ConnectionDriver`): connection driver (eg. :class:`vnc_api.vnc_api.VncApi`, :class:`novaclient.client.Client`, etc)
-
- Kwargs:
- %s_name (str): Name of %s
- service_instance (list): list of :class:`ServiceInstance` type
- id_perms (instance): instance of :class:`IdPermsType`
- display_name (instance): instance of :class:`xsd:string`
-
- '''
- super(VirtualMachineTestFixtureGen, self).__init__()
- self._conn_drv = conn_drv
- if not virtual_machine_name:
- self._name = 'default-virtual-machine'
- else:
- self._name = virtual_machine_name
- self._obj = None
- self._auto_prop_val = auto_prop_val
- if service_instance_refs:
- for ln in service_instance_refs:
- self.add_service_instance (ln)
- self.id_perms = id_perms
- self.display_name = display_name
- #end __init__
-
- def _update_links (self, update_server):
- for ln in self.get_service_instances ():
- self.add_service_instance (ln.fixture (), update_server = update_server, add_link = False)
- return None
- #end _update_links
-
- def add_service_instance (self, lo, update_server = True, add_link = True):
- '''
- add :class:`ServiceInstance` link to :class:`VirtualMachine`
- Args:
- lo (:class:`ServiceInstance`): obj to link
- '''
- if self._obj:
- self._obj.add_service_instance (lo)
- if update_server:
- self._conn_drv.virtual_machine_update (self._obj)
-
- if add_link:
- self.add_link('service_instance', cfixture.ConrtailLink('service_instance', 'virtual_machine', 'service_instance', ['ref', 'derived'], lo))
- #end add_service_instance_link
-
- def get_service_instances (self):
- return self.get_links ('service_instance')
- #end get_service_instances
-
- def populate (self):
- self._obj.set_id_perms(self.id_perms or vnc_api.gen.resource_xsd.IdPermsType.populate())
- self._obj.set_display_name(self.display_name or GeneratedsSuper.populate_string("display_name"))
- #end populate
-
- def setUp(self):
- super(VirtualMachineTestFixtureGen, self).setUp()
- self._obj = vnc_api.VirtualMachine(self._name)
- try:
- self._obj = self._conn_drv.virtual_machine_read (fq_name=self._obj.get_fq_name())
- self._update_links (update_server=True)
- except NoIdError:
- self._update_links (update_server=False)
- if self._auto_prop_val:
- self.populate ()
- else:
- self._obj.id_perms = self.id_perms
- self._obj.display_name = self.display_name
- self._conn_drv.virtual_machine_create(self._obj)
- # read back for server allocated values
- self._obj = self._conn_drv.virtual_machine_read(id = self._obj.uuid)
- #end setUp
-
- def cleanUp(self):
- if self._obj.get_virtual_machine_interface_back_refs() or self._obj.get_virtual_router_back_refs():
- return
- if self._obj.get_virtual_machine_interfaces():
- return
- self._conn_drv.virtual_machine_delete(id = self._obj.uuid)
- #end cleanUp
-
- def getObj(self):
- return self._obj
- #end getObj
-
-#end class VirtualMachineTestFixtureGen
-
-class InterfaceRouteTableTestFixtureGen(cfixture.ContrailFixture):
- """
- Fixture for :class:`.InterfaceRouteTable`
- """
- def __init__(self, conn_drv, interface_route_table_name=None, parent_fixt=None, auto_prop_val=False, interface_route_table_routes=None, id_perms=None, display_name=None):
- '''
- Create InterfaceRouteTableTestFixtureGen object
-
- constructor
-
- Args:
- conn_drv (:class:`ConnectionDriver`): connection driver (eg. :class:`vnc_api.vnc_api.VncApi`, :class:`novaclient.client.Client`, etc)
-
- Kwargs:
- interface_route_table_name (str): Name of interface_route_table
- parent_fixt (:class:`.ProjectTestFixtureGen`): Parent fixture
- interface_route_table_routes (instance): instance of :class:`RouteTableType`
- id_perms (instance): instance of :class:`IdPermsType`
- display_name (instance): instance of :class:`xsd:string`
-
- '''
- super(InterfaceRouteTableTestFixtureGen, self).__init__()
- self._conn_drv = conn_drv
- if not interface_route_table_name:
- self._name = 'default-interface-route-table'
- else:
- self._name = interface_route_table_name
- self._obj = None
- self._parent_fixt = parent_fixt
- self._auto_prop_val = auto_prop_val
- self.interface_route_table_routes = interface_route_table_routes
- self.id_perms = id_perms
- self.display_name = display_name
- #end __init__
-
- def _update_links (self, update_server):
- return None
- #end _update_links
-
-
- def populate (self):
- self._obj.set_interface_route_table_routes(self.interface_route_table_routes or vnc_api.gen.resource_xsd.RouteTableType.populate())
- self._obj.set_id_perms(self.id_perms or vnc_api.gen.resource_xsd.IdPermsType.populate())
- self._obj.set_display_name(self.display_name or GeneratedsSuper.populate_string("display_name"))
- #end populate
-
- def setUp(self):
- super(InterfaceRouteTableTestFixtureGen, self).setUp()
- if not self._parent_fixt:
- self._parent_fixt = self.useFixture(ProjectTestFixtureGen(self._conn_drv, 'default-project'))
-
- self._obj = vnc_api.InterfaceRouteTable(self._name, self._parent_fixt.getObj ())
- try:
- self._obj = self._conn_drv.interface_route_table_read (fq_name=self._obj.get_fq_name())
- self._update_links (update_server=True)
- except NoIdError:
- self._update_links (update_server=False)
- if self._auto_prop_val:
- self.populate ()
- else:
- self._obj.interface_route_table_routes = self.interface_route_table_routes
- self._obj.id_perms = self.id_perms
- self._obj.display_name = self.display_name
- self._conn_drv.interface_route_table_create(self._obj)
- # read back for server allocated values
- self._obj = self._conn_drv.interface_route_table_read(id = self._obj.uuid)
- #end setUp
-
- def cleanUp(self):
- if self._obj.get_virtual_machine_interface_back_refs():
- return
- parent_fixt = getattr(self, '_parent_fixt', None)
- if parent_fixt:
- # non config-root child
- parent_obj = self._parent_fixt.getObj()
- # remove child from parent obj
- for child_obj in parent_obj.get_interface_route_tables():
- if type(child_obj) == dict:
- child_uuid = child_obj['uuid']
- else:
- child_uuid = child_obj.uuid
- if child_uuid == self._obj.uuid:
- parent_obj.interface_route_tables.remove(child_obj)
- break
-
- self._conn_drv.interface_route_table_delete(id = self._obj.uuid)
- #end cleanUp
-
- def getObj(self):
- return self._obj
- #end getObj
-
-#end class InterfaceRouteTableTestFixtureGen
-
-class ServiceTemplateTestFixtureGen(cfixture.ContrailFixture):
- """
- Fixture for :class:`.ServiceTemplate`
- """
- def __init__(self, conn_drv, service_template_name=None, parent_fixt=None, auto_prop_val=False, service_template_properties=None, id_perms=None, display_name=None):
- '''
- Create ServiceTemplateTestFixtureGen object
-
- constructor
-
- Args:
- conn_drv (:class:`ConnectionDriver`): connection driver (eg. :class:`vnc_api.vnc_api.VncApi`, :class:`novaclient.client.Client`, etc)
-
- Kwargs:
- service_template_name (str): Name of service_template
- parent_fixt (:class:`.DomainTestFixtureGen`): Parent fixture
- service_template_properties (instance): instance of :class:`ServiceTemplateType`
- id_perms (instance): instance of :class:`IdPermsType`
- display_name (instance): instance of :class:`xsd:string`
-
- '''
- super(ServiceTemplateTestFixtureGen, self).__init__()
- self._conn_drv = conn_drv
- if not service_template_name:
- self._name = 'default-service-template'
- else:
- self._name = service_template_name
- self._obj = None
- self._parent_fixt = parent_fixt
- self._auto_prop_val = auto_prop_val
- self.service_template_properties = service_template_properties
- self.id_perms = id_perms
- self.display_name = display_name
- #end __init__
-
- def _update_links (self, update_server):
- return None
- #end _update_links
-
-
- def populate (self):
- self._obj.set_service_template_properties(self.service_template_properties or vnc_api.gen.resource_xsd.ServiceTemplateType.populate())
- self._obj.set_id_perms(self.id_perms or vnc_api.gen.resource_xsd.IdPermsType.populate())
- self._obj.set_display_name(self.display_name or GeneratedsSuper.populate_string("display_name"))
- #end populate
-
- def setUp(self):
- super(ServiceTemplateTestFixtureGen, self).setUp()
- if not self._parent_fixt:
- self._parent_fixt = self.useFixture(DomainTestFixtureGen(self._conn_drv, 'default-domain'))
-
- self._obj = vnc_api.ServiceTemplate(self._name, self._parent_fixt.getObj ())
- try:
- self._obj = self._conn_drv.service_template_read (fq_name=self._obj.get_fq_name())
- self._update_links (update_server=True)
- except NoIdError:
- self._update_links (update_server=False)
- if self._auto_prop_val:
- self.populate ()
- else:
- self._obj.service_template_properties = self.service_template_properties
- self._obj.id_perms = self.id_perms
- self._obj.display_name = self.display_name
- self._conn_drv.service_template_create(self._obj)
- # read back for server allocated values
- self._obj = self._conn_drv.service_template_read(id = self._obj.uuid)
- #end setUp
-
- def cleanUp(self):
- if self._obj.get_service_instance_back_refs():
- return
- parent_fixt = getattr(self, '_parent_fixt', None)
- if parent_fixt:
- # non config-root child
- parent_obj = self._parent_fixt.getObj()
- # remove child from parent obj
- for child_obj in parent_obj.get_service_templates():
- if type(child_obj) == dict:
- child_uuid = child_obj['uuid']
- else:
- child_uuid = child_obj.uuid
- if child_uuid == self._obj.uuid:
- parent_obj.service_templates.remove(child_obj)
- break
-
- self._conn_drv.service_template_delete(id = self._obj.uuid)
- #end cleanUp
-
- def getObj(self):
- return self._obj
- #end getObj
-
-#end class ServiceTemplateTestFixtureGen
-
-class VirtualIpTestFixtureGen(cfixture.ContrailFixture):
- """
- Fixture for :class:`.VirtualIp`
- """
- def __init__(self, conn_drv, virtual_ip_name=None, parent_fixt=None, auto_prop_val=False, loadbalancer_pool_refs = None, virtual_machine_interface_refs = None, virtual_ip_properties=None, id_perms=None, display_name=None):
- '''
- Create VirtualIpTestFixtureGen object
-
- constructor
-
- Args:
- conn_drv (:class:`ConnectionDriver`): connection driver (eg. :class:`vnc_api.vnc_api.VncApi`, :class:`novaclient.client.Client`, etc)
-
- Kwargs:
- virtual_ip_name (str): Name of virtual_ip
- parent_fixt (:class:`.ProjectTestFixtureGen`): Parent fixture
- loadbalancer_pool (list): list of :class:`LoadbalancerPool` type
- virtual_machine_interface (list): list of :class:`VirtualMachineInterface` type
- virtual_ip_properties (instance): instance of :class:`VirtualIpType`
- id_perms (instance): instance of :class:`IdPermsType`
- display_name (instance): instance of :class:`xsd:string`
-
- '''
- super(VirtualIpTestFixtureGen, self).__init__()
- self._conn_drv = conn_drv
- if not virtual_ip_name:
- self._name = 'default-virtual-ip'
- else:
- self._name = virtual_ip_name
- self._obj = None
- self._parent_fixt = parent_fixt
- self._auto_prop_val = auto_prop_val
- if loadbalancer_pool_refs:
- for ln in loadbalancer_pool_refs:
- self.add_loadbalancer_pool (ln)
- if virtual_machine_interface_refs:
- for ln in virtual_machine_interface_refs:
- self.add_virtual_machine_interface (ln)
- self.virtual_ip_properties = virtual_ip_properties
- self.id_perms = id_perms
- self.display_name = display_name
- #end __init__
-
- def _update_links (self, update_server):
- for ln in self.get_loadbalancer_pools ():
- self.add_loadbalancer_pool (ln.fixture (), update_server = update_server, add_link = False)
- for ln in self.get_virtual_machine_interfaces ():
- self.add_virtual_machine_interface (ln.fixture (), update_server = update_server, add_link = False)
- return None
- #end _update_links
-
- def add_loadbalancer_pool (self, lo, update_server = True, add_link = True):
- '''
- add :class:`LoadbalancerPool` link to :class:`VirtualIp`
- Args:
- lo (:class:`LoadbalancerPool`): obj to link
- '''
- if self._obj:
- self._obj.add_loadbalancer_pool (lo)
- if update_server:
- self._conn_drv.virtual_ip_update (self._obj)
-
- if add_link:
- self.add_link('loadbalancer_pool', cfixture.ConrtailLink('loadbalancer_pool', 'virtual_ip', 'loadbalancer_pool', ['ref'], lo))
- #end add_loadbalancer_pool_link
-
- def get_loadbalancer_pools (self):
- return self.get_links ('loadbalancer_pool')
- #end get_loadbalancer_pools
- def add_virtual_machine_interface (self, lo, update_server = True, add_link = True):
- '''
- add :class:`VirtualMachineInterface` link to :class:`VirtualIp`
- Args:
- lo (:class:`VirtualMachineInterface`): obj to link
- '''
- if self._obj:
- self._obj.add_virtual_machine_interface (lo)
- if update_server:
- self._conn_drv.virtual_ip_update (self._obj)
-
- if add_link:
- self.add_link('virtual_machine_interface', cfixture.ConrtailLink('virtual_machine_interface', 'virtual_ip', 'virtual_machine_interface', ['ref'], lo))
- #end add_virtual_machine_interface_link
-
- def get_virtual_machine_interfaces (self):
- return self.get_links ('virtual_machine_interface')
- #end get_virtual_machine_interfaces
-
- def populate (self):
- self._obj.set_virtual_ip_properties(self.virtual_ip_properties or vnc_api.gen.resource_xsd.VirtualIpType.populate())
- self._obj.set_id_perms(self.id_perms or vnc_api.gen.resource_xsd.IdPermsType.populate())
- self._obj.set_display_name(self.display_name or GeneratedsSuper.populate_string("display_name"))
- #end populate
-
- def setUp(self):
- super(VirtualIpTestFixtureGen, self).setUp()
- if not self._parent_fixt:
- self._parent_fixt = self.useFixture(ProjectTestFixtureGen(self._conn_drv, 'default-project'))
-
- self._obj = vnc_api.VirtualIp(self._name, self._parent_fixt.getObj ())
- try:
- self._obj = self._conn_drv.virtual_ip_read (fq_name=self._obj.get_fq_name())
- self._update_links (update_server=True)
- except NoIdError:
- self._update_links (update_server=False)
- if self._auto_prop_val:
- self.populate ()
- else:
- self._obj.virtual_ip_properties = self.virtual_ip_properties
- self._obj.id_perms = self.id_perms
- self._obj.display_name = self.display_name
- self._conn_drv.virtual_ip_create(self._obj)
- # read back for server allocated values
- self._obj = self._conn_drv.virtual_ip_read(id = self._obj.uuid)
- #end setUp
-
- def cleanUp(self):
- parent_fixt = getattr(self, '_parent_fixt', None)
- if parent_fixt:
- # non config-root child
- parent_obj = self._parent_fixt.getObj()
- # remove child from parent obj
- for child_obj in parent_obj.get_virtual_ips():
- if type(child_obj) == dict:
- child_uuid = child_obj['uuid']
- else:
- child_uuid = child_obj.uuid
- if child_uuid == self._obj.uuid:
- parent_obj.virtual_ips.remove(child_obj)
- break
-
- self._conn_drv.virtual_ip_delete(id = self._obj.uuid)
- #end cleanUp
-
- def getObj(self):
- return self._obj
- #end getObj
-
-#end class VirtualIpTestFixtureGen
-
-class LoadbalancerMemberTestFixtureGen(cfixture.ContrailFixture):
- """
- Fixture for :class:`.LoadbalancerMember`
- """
- def __init__(self, conn_drv, loadbalancer_member_name=None, parent_fixt=None, auto_prop_val=False, loadbalancer_member_properties=None, id_perms=None, display_name=None):
- '''
- Create LoadbalancerMemberTestFixtureGen object
-
- constructor
-
- Args:
- conn_drv (:class:`ConnectionDriver`): connection driver (eg. :class:`vnc_api.vnc_api.VncApi`, :class:`novaclient.client.Client`, etc)
-
- Kwargs:
- loadbalancer_member_name (str): Name of loadbalancer_member
- parent_fixt (:class:`.LoadbalancerPoolTestFixtureGen`): Parent fixture
- loadbalancer_member_properties (instance): instance of :class:`LoadbalancerMemberType`
- id_perms (instance): instance of :class:`IdPermsType`
- display_name (instance): instance of :class:`xsd:string`
-
- '''
- super(LoadbalancerMemberTestFixtureGen, self).__init__()
- self._conn_drv = conn_drv
- if not loadbalancer_member_name:
- self._name = 'default-loadbalancer-member'
- else:
- self._name = loadbalancer_member_name
- self._obj = None
- self._parent_fixt = parent_fixt
- self._auto_prop_val = auto_prop_val
- self.loadbalancer_member_properties = loadbalancer_member_properties
- self.id_perms = id_perms
- self.display_name = display_name
- #end __init__
-
- def _update_links (self, update_server):
- return None
- #end _update_links
-
-
- def populate (self):
- self._obj.set_loadbalancer_member_properties(self.loadbalancer_member_properties or vnc_api.gen.resource_xsd.LoadbalancerMemberType.populate())
- self._obj.set_id_perms(self.id_perms or vnc_api.gen.resource_xsd.IdPermsType.populate())
- self._obj.set_display_name(self.display_name or GeneratedsSuper.populate_string("display_name"))
- #end populate
-
- def setUp(self):
- super(LoadbalancerMemberTestFixtureGen, self).setUp()
- if not self._parent_fixt:
- self._parent_fixt = self.useFixture(LoadbalancerPoolTestFixtureGen(self._conn_drv, 'default-loadbalancer-pool'))
-
- self._obj = vnc_api.LoadbalancerMember(self._name, self._parent_fixt.getObj ())
- try:
- self._obj = self._conn_drv.loadbalancer_member_read (fq_name=self._obj.get_fq_name())
- self._update_links (update_server=True)
- except NoIdError:
- self._update_links (update_server=False)
- if self._auto_prop_val:
- self.populate ()
- else:
- self._obj.loadbalancer_member_properties = self.loadbalancer_member_properties
- self._obj.id_perms = self.id_perms
- self._obj.display_name = self.display_name
- self._conn_drv.loadbalancer_member_create(self._obj)
- # read back for server allocated values
- self._obj = self._conn_drv.loadbalancer_member_read(id = self._obj.uuid)
- #end setUp
-
- def cleanUp(self):
- parent_fixt = getattr(self, '_parent_fixt', None)
- if parent_fixt:
- # non config-root child
- parent_obj = self._parent_fixt.getObj()
- # remove child from parent obj
- for child_obj in parent_obj.get_loadbalancer_members():
- if type(child_obj) == dict:
- child_uuid = child_obj['uuid']
- else:
- child_uuid = child_obj.uuid
- if child_uuid == self._obj.uuid:
- parent_obj.loadbalancer_members.remove(child_obj)
- break
-
- self._conn_drv.loadbalancer_member_delete(id = self._obj.uuid)
- #end cleanUp
-
- def getObj(self):
- return self._obj
- #end getObj
-
-#end class LoadbalancerMemberTestFixtureGen
-
-class SecurityGroupTestFixtureGen(cfixture.ContrailFixture):
- """
- Fixture for :class:`.SecurityGroup`
- """
- def __init__(self, conn_drv, security_group_name=None, parent_fixt=None, auto_prop_val=False, security_group_id=None, configured_security_group_id=None, security_group_entries=None, id_perms=None, display_name=None):
- '''
- Create SecurityGroupTestFixtureGen object
-
- constructor
-
- Args:
- conn_drv (:class:`ConnectionDriver`): connection driver (eg. :class:`vnc_api.vnc_api.VncApi`, :class:`novaclient.client.Client`, etc)
-
- Kwargs:
- security_group_name (str): Name of security_group
- parent_fixt (:class:`.ProjectTestFixtureGen`): Parent fixture
- security_group_id (instance): instance of :class:`xsd:string`
- configured_security_group_id (instance): instance of :class:`xsd:integer`
- security_group_entries (instance): instance of :class:`PolicyEntriesType`
- id_perms (instance): instance of :class:`IdPermsType`
- display_name (instance): instance of :class:`xsd:string`
-
- '''
- super(SecurityGroupTestFixtureGen, self).__init__()
- self._conn_drv = conn_drv
- if not security_group_name:
- self._name = 'default-security-group'
- else:
- self._name = security_group_name
- self._obj = None
- self._parent_fixt = parent_fixt
- self._auto_prop_val = auto_prop_val
- self.security_group_id = security_group_id
- self.configured_security_group_id = configured_security_group_id
- self.security_group_entries = security_group_entries
- self.id_perms = id_perms
- self.display_name = display_name
- #end __init__
-
- def _update_links (self, update_server):
- return None
- #end _update_links
-
-
- def populate (self):
- self._obj.set_security_group_id(self.security_group_id or GeneratedsSuper.populate_string("security_group_id"))
- self._obj.set_configured_security_group_id(self.configured_security_group_id or GeneratedsSuper.populate_integer("configured_security_group_id"))
- self._obj.set_security_group_entries(self.security_group_entries or vnc_api.gen.resource_xsd.PolicyEntriesType.populate())
- self._obj.set_id_perms(self.id_perms or vnc_api.gen.resource_xsd.IdPermsType.populate())
- self._obj.set_display_name(self.display_name or GeneratedsSuper.populate_string("display_name"))
- #end populate
-
- def setUp(self):
- super(SecurityGroupTestFixtureGen, self).setUp()
- if not self._parent_fixt:
- self._parent_fixt = self.useFixture(ProjectTestFixtureGen(self._conn_drv, 'default-project'))
-
- self._obj = vnc_api.SecurityGroup(self._name, self._parent_fixt.getObj ())
- try:
- self._obj = self._conn_drv.security_group_read (fq_name=self._obj.get_fq_name())
- self._update_links (update_server=True)
- except NoIdError:
- self._update_links (update_server=False)
- if self._auto_prop_val:
- self.populate ()
- else:
- self._obj.security_group_id = self.security_group_id
- self._obj.configured_security_group_id = self.configured_security_group_id
- self._obj.security_group_entries = self.security_group_entries
- self._obj.id_perms = self.id_perms
- self._obj.display_name = self.display_name
- self._conn_drv.security_group_create(self._obj)
- # read back for server allocated values
- self._obj = self._conn_drv.security_group_read(id = self._obj.uuid)
- #end setUp
-
- def cleanUp(self):
- if self._obj.get_virtual_machine_interface_back_refs():
- return
- parent_fixt = getattr(self, '_parent_fixt', None)
- if parent_fixt:
- # non config-root child
- parent_obj = self._parent_fixt.getObj()
- # remove child from parent obj
- for child_obj in parent_obj.get_security_groups():
- if type(child_obj) == dict:
- child_uuid = child_obj['uuid']
- else:
- child_uuid = child_obj.uuid
- if child_uuid == self._obj.uuid:
- parent_obj.security_groups.remove(child_obj)
- break
-
- self._conn_drv.security_group_delete(id = self._obj.uuid)
- #end cleanUp
-
- def getObj(self):
- return self._obj
- #end getObj
-
-#end class SecurityGroupTestFixtureGen
-
-class ProviderAttachmentTestFixtureGen(cfixture.ContrailFixture):
- """
- Fixture for :class:`.ProviderAttachment`
- """
- def __init__(self, conn_drv, provider_attachment_name=None, auto_prop_val=False, virtual_router_refs = None, id_perms=None, display_name=None):
- '''
- Create ProviderAttachmentTestFixtureGen object
-
- constructor
-
- Args:
- conn_drv (:class:`ConnectionDriver`): connection driver (eg. :class:`vnc_api.vnc_api.VncApi`, :class:`novaclient.client.Client`, etc)
-
- Kwargs:
- %s_name (str): Name of %s
- virtual_router (list): list of :class:`VirtualRouter` type
- id_perms (instance): instance of :class:`IdPermsType`
- display_name (instance): instance of :class:`xsd:string`
-
- '''
- super(ProviderAttachmentTestFixtureGen, self).__init__()
- self._conn_drv = conn_drv
- if not provider_attachment_name:
- self._name = 'default-provider-attachment'
- else:
- self._name = provider_attachment_name
- self._obj = None
- self._auto_prop_val = auto_prop_val
- if virtual_router_refs:
- for ln in virtual_router_refs:
- self.add_virtual_router (ln)
- self.id_perms = id_perms
- self.display_name = display_name
- #end __init__
-
- def _update_links (self, update_server):
- for ln in self.get_virtual_routers ():
- self.add_virtual_router (ln.fixture (), update_server = update_server, add_link = False)
- return None
- #end _update_links
-
- def add_virtual_router (self, lo, update_server = True, add_link = True):
- '''
- add :class:`VirtualRouter` link to :class:`ProviderAttachment`
- Args:
- lo (:class:`VirtualRouter`): obj to link
- '''
- if self._obj:
- self._obj.add_virtual_router (lo)
- if update_server:
- self._conn_drv.provider_attachment_update (self._obj)
-
- if add_link:
- self.add_link('virtual_router', cfixture.ConrtailLink('virtual_router', 'provider_attachment', 'virtual_router', ['ref'], lo))
- #end add_virtual_router_link
-
- def get_virtual_routers (self):
- return self.get_links ('virtual_router')
- #end get_virtual_routers
-
- def populate (self):
- self._obj.set_id_perms(self.id_perms or vnc_api.gen.resource_xsd.IdPermsType.populate())
- self._obj.set_display_name(self.display_name or GeneratedsSuper.populate_string("display_name"))
- #end populate
-
- def setUp(self):
- super(ProviderAttachmentTestFixtureGen, self).setUp()
- self._obj = vnc_api.ProviderAttachment(self._name)
- try:
- self._obj = self._conn_drv.provider_attachment_read (fq_name=self._obj.get_fq_name())
- self._update_links (update_server=True)
- except NoIdError:
- self._update_links (update_server=False)
- if self._auto_prop_val:
- self.populate ()
- else:
- self._obj.id_perms = self.id_perms
- self._obj.display_name = self.display_name
- self._conn_drv.provider_attachment_create(self._obj)
- # read back for server allocated values
- self._obj = self._conn_drv.provider_attachment_read(id = self._obj.uuid)
- #end setUp
-
- def cleanUp(self):
- self._conn_drv.provider_attachment_delete(id = self._obj.uuid)
- #end cleanUp
-
- def getObj(self):
- return self._obj
- #end getObj
-
-#end class ProviderAttachmentTestFixtureGen
-
-class VirtualMachineInterfaceTestFixtureGen(cfixture.ContrailFixture):
- """
- Fixture for :class:`.VirtualMachineInterface`
- """
- def __init__(self, conn_drv, virtual_machine_interface_name=None, parent_fixt=None, auto_prop_val=False, qos_forwarding_class_refs = None, security_group_refs = None, virtual_machine_interface_refs = None, virtual_machine_refs = None, virtual_network_refs = None, routing_instance_ref_infos = None, interface_route_table_refs = None, virtual_machine_interface_mac_addresses=None, virtual_machine_interface_dhcp_option_list=None, virtual_machine_interface_host_routes=None, virtual_machine_interface_allowed_address_pairs=None, vrf_assign_table=None, virtual_machine_interface_device_owner=None, virtual_machine_interface_properties=None, id_perms=None, display_name=None):
- '''
- Create VirtualMachineInterfaceTestFixtureGen object
-
- constructor
-
- Args:
- conn_drv (:class:`ConnectionDriver`): connection driver (eg. :class:`vnc_api.vnc_api.VncApi`, :class:`novaclient.client.Client`, etc)
-
- Kwargs:
- virtual_machine_interface_name (str): Name of virtual_machine_interface
- parent_fixt (:class:`.VirtualMachineTestFixtureGen`): Parent fixture
- qos_forwarding_class (list): list of :class:`QosForwardingClass` type
- security_group (list): list of :class:`SecurityGroup` type
- virtual_machine_interface (list): list of :class:`VirtualMachineInterface` type
- virtual_machine (list): list of :class:`VirtualMachine` type
- virtual_network (list): list of :class:`VirtualNetwork` type
- routing_instance (list): list of tuple (:class:`RoutingInstance`, :class: `PolicyBasedForwardingRuleType`) type
- interface_route_table (list): list of :class:`InterfaceRouteTable` type
- virtual_machine_interface_mac_addresses (instance): instance of :class:`MacAddressesType`
- virtual_machine_interface_dhcp_option_list (instance): instance of :class:`DhcpOptionsListType`
- virtual_machine_interface_host_routes (instance): instance of :class:`RouteTableType`
- virtual_machine_interface_allowed_address_pairs (instance): instance of :class:`AllowedAddressPairs`
- vrf_assign_table (instance): instance of :class:`VrfAssignTableType`
- virtual_machine_interface_device_owner (instance): instance of :class:`xsd:string`
- virtual_machine_interface_properties (instance): instance of :class:`VirtualMachineInterfacePropertiesType`
- id_perms (instance): instance of :class:`IdPermsType`
- display_name (instance): instance of :class:`xsd:string`
-
- '''
- super(VirtualMachineInterfaceTestFixtureGen, self).__init__()
- self._conn_drv = conn_drv
- if not virtual_machine_interface_name:
- self._name = 'default-virtual-machine-interface'
- else:
- self._name = virtual_machine_interface_name
- self._obj = None
- self._parent_fixt = parent_fixt
- self._auto_prop_val = auto_prop_val
- if qos_forwarding_class_refs:
- for ln in qos_forwarding_class_refs:
- self.add_qos_forwarding_class (ln)
- if security_group_refs:
- for ln in security_group_refs:
- self.add_security_group (ln)
- if virtual_machine_interface_refs:
- for ln in virtual_machine_interface_refs:
- self.add_virtual_machine_interface (ln)
- if virtual_machine_refs:
- for ln in virtual_machine_refs:
- self.add_virtual_machine (ln)
- if virtual_network_refs:
- for ln in virtual_network_refs:
- self.add_virtual_network (ln)
- if routing_instance_ref_infos:
- for ln, ref in routing_instance_ref_infos:
- self.add_routing_instance (ln, ref)
- if interface_route_table_refs:
- for ln in interface_route_table_refs:
- self.add_interface_route_table (ln)
- self.virtual_machine_interface_mac_addresses = virtual_machine_interface_mac_addresses
- self.virtual_machine_interface_dhcp_option_list = virtual_machine_interface_dhcp_option_list
- self.virtual_machine_interface_host_routes = virtual_machine_interface_host_routes
- self.virtual_machine_interface_allowed_address_pairs = virtual_machine_interface_allowed_address_pairs
- self.vrf_assign_table = vrf_assign_table
- self.virtual_machine_interface_device_owner = virtual_machine_interface_device_owner
- self.virtual_machine_interface_properties = virtual_machine_interface_properties
- self.id_perms = id_perms
- self.display_name = display_name
- #end __init__
-
- def _update_links (self, update_server):
- for ln in self.get_qos_forwarding_classs ():
- self.add_qos_forwarding_class (ln.fixture (), update_server = update_server, add_link = False)
- for ln in self.get_security_groups ():
- self.add_security_group (ln.fixture (), update_server = update_server, add_link = False)
- for ln in self.get_virtual_machine_interfaces ():
- self.add_virtual_machine_interface (ln.fixture (), update_server = update_server, add_link = False)
- for ln in self.get_virtual_machines ():
- self.add_virtual_machine (ln.fixture (), update_server = update_server, add_link = False)
- for ln in self.get_virtual_networks ():
- self.add_virtual_network (ln.fixture (), update_server = update_server, add_link = False)
- for ln in self.get_routing_instances ():
- self.add_routing_instance (*ln.fixture (), update_server = update_server, add_link = False)
- for ln in self.get_interface_route_tables ():
- self.add_interface_route_table (ln.fixture (), update_server = update_server, add_link = False)
- return None
- #end _update_links
-
- def add_qos_forwarding_class (self, lo, update_server = True, add_link = True):
- '''
- add :class:`QosForwardingClass` link to :class:`VirtualMachineInterface`
- Args:
- lo (:class:`QosForwardingClass`): obj to link
- '''
- if self._obj:
- self._obj.add_qos_forwarding_class (lo)
- if update_server:
- self._conn_drv.virtual_machine_interface_update (self._obj)
-
- if add_link:
- self.add_link('qos_forwarding_class', cfixture.ConrtailLink('qos_forwarding_class', 'virtual_machine_interface', 'qos_forwarding_class', ['ref'], lo))
- #end add_qos_forwarding_class_link
-
- def get_qos_forwarding_classs (self):
- return self.get_links ('qos_forwarding_class')
- #end get_qos_forwarding_classs
- def add_security_group (self, lo, update_server = True, add_link = True):
- '''
- add :class:`SecurityGroup` link to :class:`VirtualMachineInterface`
- Args:
- lo (:class:`SecurityGroup`): obj to link
- '''
- if self._obj:
- self._obj.add_security_group (lo)
- if update_server:
- self._conn_drv.virtual_machine_interface_update (self._obj)
-
- if add_link:
- self.add_link('security_group', cfixture.ConrtailLink('security_group', 'virtual_machine_interface', 'security_group', ['ref'], lo))
- #end add_security_group_link
-
- def get_security_groups (self):
- return self.get_links ('security_group')
- #end get_security_groups
- def add_virtual_machine_interface (self, lo, update_server = True, add_link = True):
- '''
- add :class:`VirtualMachineInterface` link to :class:`VirtualMachineInterface`
- Args:
- lo (:class:`VirtualMachineInterface`): obj to link
- '''
- if self._obj:
- self._obj.add_virtual_machine_interface (lo)
- if update_server:
- self._conn_drv.virtual_machine_interface_update (self._obj)
-
- if add_link:
- self.add_link('virtual_machine_interface', cfixture.ConrtailLink('virtual_machine_interface', 'virtual_machine_interface', 'virtual_machine_interface', ['ref'], lo))
- #end add_virtual_machine_interface_link
-
- def get_virtual_machine_interfaces (self):
- return self.get_links ('virtual_machine_interface')
- #end get_virtual_machine_interfaces
- def add_virtual_machine (self, lo, update_server = True, add_link = True):
- '''
- add :class:`VirtualMachine` link to :class:`VirtualMachineInterface`
- Args:
- lo (:class:`VirtualMachine`): obj to link
- '''
- if self._obj:
- self._obj.add_virtual_machine (lo)
- if update_server:
- self._conn_drv.virtual_machine_interface_update (self._obj)
-
- if add_link:
- self.add_link('virtual_machine', cfixture.ConrtailLink('virtual_machine', 'virtual_machine_interface', 'virtual_machine', ['ref'], lo))
- #end add_virtual_machine_link
-
- def get_virtual_machines (self):
- return self.get_links ('virtual_machine')
- #end get_virtual_machines
- def add_virtual_network (self, lo, update_server = True, add_link = True):
- '''
- add :class:`VirtualNetwork` link to :class:`VirtualMachineInterface`
- Args:
- lo (:class:`VirtualNetwork`): obj to link
- '''
- if self._obj:
- self._obj.add_virtual_network (lo)
- if update_server:
- self._conn_drv.virtual_machine_interface_update (self._obj)
-
- if add_link:
- self.add_link('virtual_network', cfixture.ConrtailLink('virtual_network', 'virtual_machine_interface', 'virtual_network', ['ref'], lo))
- #end add_virtual_network_link
-
- def get_virtual_networks (self):
- return self.get_links ('virtual_network')
- #end get_virtual_networks
- def add_routing_instance (self, lo, ref, update_server = True, add_link = True):
- '''
- add :class:`RoutingInstance` link to :class:`VirtualMachineInterface`
- Args:
- lo (:class:`RoutingInstance`): obj to link
- ref (:class:`PolicyBasedForwardingRuleType`): property of the link object
- '''
- if self._obj:
- self._obj.add_routing_instance (lo, ref)
- if update_server:
- self._conn_drv.virtual_machine_interface_update (self._obj)
-
- if add_link:
- self.add_link('routing_instance', cfixture.ConrtailLink('routing_instance', 'virtual_machine_interface', 'routing_instance', ['ref'], (lo, ref)))
- #end add_routing_instance_link
-
- def get_routing_instances (self):
- return self.get_links ('routing_instance')
- #end get_routing_instances
- def add_interface_route_table (self, lo, update_server = True, add_link = True):
- '''
- add :class:`InterfaceRouteTable` link to :class:`VirtualMachineInterface`
- Args:
- lo (:class:`InterfaceRouteTable`): obj to link
- '''
- if self._obj:
- self._obj.add_interface_route_table (lo)
- if update_server:
- self._conn_drv.virtual_machine_interface_update (self._obj)
-
- if add_link:
- self.add_link('interface_route_table', cfixture.ConrtailLink('interface_route_table', 'virtual_machine_interface', 'interface_route_table', ['ref'], lo))
- #end add_interface_route_table_link
-
- def get_interface_route_tables (self):
- return self.get_links ('interface_route_table')
- #end get_interface_route_tables
-
- def populate (self):
- self._obj.set_virtual_machine_interface_mac_addresses(self.virtual_machine_interface_mac_addresses or vnc_api.gen.resource_xsd.MacAddressesType.populate())
- self._obj.set_virtual_machine_interface_dhcp_option_list(self.virtual_machine_interface_dhcp_option_list or vnc_api.gen.resource_xsd.DhcpOptionsListType.populate())
- self._obj.set_virtual_machine_interface_host_routes(self.virtual_machine_interface_host_routes or vnc_api.gen.resource_xsd.RouteTableType.populate())
- self._obj.set_virtual_machine_interface_allowed_address_pairs(self.virtual_machine_interface_allowed_address_pairs or vnc_api.gen.resource_xsd.AllowedAddressPairs.populate())
- self._obj.set_vrf_assign_table(self.vrf_assign_table or vnc_api.gen.resource_xsd.VrfAssignTableType.populate())
- self._obj.set_virtual_machine_interface_device_owner(self.virtual_machine_interface_device_owner or GeneratedsSuper.populate_string("virtual_machine_interface_device_owner"))
- self._obj.set_virtual_machine_interface_properties(self.virtual_machine_interface_properties or vnc_api.gen.resource_xsd.VirtualMachineInterfacePropertiesType.populate())
- self._obj.set_id_perms(self.id_perms or vnc_api.gen.resource_xsd.IdPermsType.populate())
- self._obj.set_display_name(self.display_name or GeneratedsSuper.populate_string("display_name"))
- #end populate
-
- def setUp(self):
- super(VirtualMachineInterfaceTestFixtureGen, self).setUp()
- if not self._parent_fixt:
- raise AmbiguousParentError("[[u'default-virtual-machine'], [u'default-domain', u'default-project']]")
-
- self._obj = vnc_api.VirtualMachineInterface(self._name, self._parent_fixt.getObj ())
- try:
- self._obj = self._conn_drv.virtual_machine_interface_read (fq_name=self._obj.get_fq_name())
- self._update_links (update_server=True)
- except NoIdError:
- self._update_links (update_server=False)
- if self._auto_prop_val:
- self.populate ()
- else:
- self._obj.virtual_machine_interface_mac_addresses = self.virtual_machine_interface_mac_addresses
- self._obj.virtual_machine_interface_dhcp_option_list = self.virtual_machine_interface_dhcp_option_list
- self._obj.virtual_machine_interface_host_routes = self.virtual_machine_interface_host_routes
- self._obj.virtual_machine_interface_allowed_address_pairs = self.virtual_machine_interface_allowed_address_pairs
- self._obj.vrf_assign_table = self.vrf_assign_table
- self._obj.virtual_machine_interface_device_owner = self.virtual_machine_interface_device_owner
- self._obj.virtual_machine_interface_properties = self.virtual_machine_interface_properties
- self._obj.id_perms = self.id_perms
- self._obj.display_name = self.display_name
- self._conn_drv.virtual_machine_interface_create(self._obj)
- # read back for server allocated values
- self._obj = self._conn_drv.virtual_machine_interface_read(id = self._obj.uuid)
- #end setUp
-
- def cleanUp(self):
- if self._obj.get_virtual_machine_interface_back_refs() or self._obj.get_instance_ip_back_refs() or self._obj.get_subnet_back_refs() or self._obj.get_floating_ip_back_refs() or self._obj.get_logical_interface_back_refs() or self._obj.get_customer_attachment_back_refs() or self._obj.get_logical_router_back_refs() or self._obj.get_loadbalancer_pool_back_refs() or self._obj.get_virtual_ip_back_refs():
- return
- parent_fixt = getattr(self, '_parent_fixt', None)
- if parent_fixt:
- # non config-root child
- parent_obj = self._parent_fixt.getObj()
- # remove child from parent obj
- for child_obj in parent_obj.get_virtual_machine_interfaces():
- if type(child_obj) == dict:
- child_uuid = child_obj['uuid']
- else:
- child_uuid = child_obj.uuid
- if child_uuid == self._obj.uuid:
- parent_obj.virtual_machine_interfaces.remove(child_obj)
- break
-
- self._conn_drv.virtual_machine_interface_delete(id = self._obj.uuid)
- #end cleanUp
-
- def getObj(self):
- return self._obj
- #end getObj
-
-#end class VirtualMachineInterfaceTestFixtureGen
-
-class LoadbalancerHealthmonitorTestFixtureGen(cfixture.ContrailFixture):
- """
- Fixture for :class:`.LoadbalancerHealthmonitor`
- """
- def __init__(self, conn_drv, loadbalancer_healthmonitor_name=None, parent_fixt=None, auto_prop_val=False, loadbalancer_healthmonitor_properties=None, id_perms=None, display_name=None):
- '''
- Create LoadbalancerHealthmonitorTestFixtureGen object
-
- constructor
-
- Args:
- conn_drv (:class:`ConnectionDriver`): connection driver (eg. :class:`vnc_api.vnc_api.VncApi`, :class:`novaclient.client.Client`, etc)
-
- Kwargs:
- loadbalancer_healthmonitor_name (str): Name of loadbalancer_healthmonitor
- parent_fixt (:class:`.ProjectTestFixtureGen`): Parent fixture
- loadbalancer_healthmonitor_properties (instance): instance of :class:`LoadbalancerHealthmonitorType`
- id_perms (instance): instance of :class:`IdPermsType`
- display_name (instance): instance of :class:`xsd:string`
-
- '''
- super(LoadbalancerHealthmonitorTestFixtureGen, self).__init__()
- self._conn_drv = conn_drv
- if not loadbalancer_healthmonitor_name:
- self._name = 'default-loadbalancer-healthmonitor'
- else:
- self._name = loadbalancer_healthmonitor_name
- self._obj = None
- self._parent_fixt = parent_fixt
- self._auto_prop_val = auto_prop_val
- self.loadbalancer_healthmonitor_properties = loadbalancer_healthmonitor_properties
- self.id_perms = id_perms
- self.display_name = display_name
- #end __init__
-
- def _update_links (self, update_server):
- return None
- #end _update_links
-
-
- def populate (self):
- self._obj.set_loadbalancer_healthmonitor_properties(self.loadbalancer_healthmonitor_properties or vnc_api.gen.resource_xsd.LoadbalancerHealthmonitorType.populate())
- self._obj.set_id_perms(self.id_perms or vnc_api.gen.resource_xsd.IdPermsType.populate())
- self._obj.set_display_name(self.display_name or GeneratedsSuper.populate_string("display_name"))
- #end populate
-
- def setUp(self):
- super(LoadbalancerHealthmonitorTestFixtureGen, self).setUp()
- if not self._parent_fixt:
- self._parent_fixt = self.useFixture(ProjectTestFixtureGen(self._conn_drv, 'default-project'))
-
- self._obj = vnc_api.LoadbalancerHealthmonitor(self._name, self._parent_fixt.getObj ())
- try:
- self._obj = self._conn_drv.loadbalancer_healthmonitor_read (fq_name=self._obj.get_fq_name())
- self._update_links (update_server=True)
- except NoIdError:
- self._update_links (update_server=False)
- if self._auto_prop_val:
- self.populate ()
- else:
- self._obj.loadbalancer_healthmonitor_properties = self.loadbalancer_healthmonitor_properties
- self._obj.id_perms = self.id_perms
- self._obj.display_name = self.display_name
- self._conn_drv.loadbalancer_healthmonitor_create(self._obj)
- # read back for server allocated values
- self._obj = self._conn_drv.loadbalancer_healthmonitor_read(id = self._obj.uuid)
- #end setUp
-
- def cleanUp(self):
- if self._obj.get_loadbalancer_pool_back_refs():
- return
- parent_fixt = getattr(self, '_parent_fixt', None)
- if parent_fixt:
- # non config-root child
- parent_obj = self._parent_fixt.getObj()
- # remove child from parent obj
- for child_obj in parent_obj.get_loadbalancer_healthmonitors():
- if type(child_obj) == dict:
- child_uuid = child_obj['uuid']
- else:
- child_uuid = child_obj.uuid
- if child_uuid == self._obj.uuid:
- parent_obj.loadbalancer_healthmonitors.remove(child_obj)
- break
-
- self._conn_drv.loadbalancer_healthmonitor_delete(id = self._obj.uuid)
- #end cleanUp
-
- def getObj(self):
- return self._obj
- #end getObj
-
-#end class LoadbalancerHealthmonitorTestFixtureGen
-
-class VirtualNetworkTestFixtureGen(cfixture.ContrailFixture):
- """
- Fixture for :class:`.VirtualNetwork`
- """
- def __init__(self, conn_drv, virtual_network_name=None, parent_fixt=None, auto_prop_val=False, qos_forwarding_class_refs = None, network_ipam_ref_infos = None, network_policy_ref_infos = None, route_table_refs = None, virtual_network_properties=None, virtual_network_network_id=None, route_target_list=None, router_external=None, is_shared=None, external_ipam=None, flood_unknown_unicast=None, id_perms=None, display_name=None):
- '''
- Create VirtualNetworkTestFixtureGen object
-
- constructor
-
- Args:
- conn_drv (:class:`ConnectionDriver`): connection driver (eg. :class:`vnc_api.vnc_api.VncApi`, :class:`novaclient.client.Client`, etc)
-
- Kwargs:
- virtual_network_name (str): Name of virtual_network
- parent_fixt (:class:`.ProjectTestFixtureGen`): Parent fixture
- qos_forwarding_class (list): list of :class:`QosForwardingClass` type
- network_ipam (list): list of tuple (:class:`NetworkIpam`, :class: `VnSubnetsType`) type
- network_policy (list): list of tuple (:class:`NetworkPolicy`, :class: `VirtualNetworkPolicyType`) type
- route_table (list): list of :class:`RouteTable` type
- virtual_network_properties (instance): instance of :class:`VirtualNetworkType`
- virtual_network_network_id (instance): instance of :class:`xsd:integer`
- route_target_list (instance): instance of :class:`RouteTargetList`
- router_external (instance): instance of :class:`xsd:boolean`
- is_shared (instance): instance of :class:`xsd:boolean`
- external_ipam (instance): instance of :class:`xsd:boolean`
- flood_unknown_unicast (instance): instance of :class:`xsd:boolean`
- id_perms (instance): instance of :class:`IdPermsType`
- display_name (instance): instance of :class:`xsd:string`
-
- '''
- super(VirtualNetworkTestFixtureGen, self).__init__()
- self._conn_drv = conn_drv
- if not virtual_network_name:
- self._name = 'default-virtual-network'
- else:
- self._name = virtual_network_name
- self._obj = None
- self._parent_fixt = parent_fixt
- self._auto_prop_val = auto_prop_val
- if qos_forwarding_class_refs:
- for ln in qos_forwarding_class_refs:
- self.add_qos_forwarding_class (ln)
- if network_ipam_ref_infos:
- for ln, ref in network_ipam_ref_infos:
- self.add_network_ipam (ln, ref)
- if network_policy_ref_infos:
- for ln, ref in network_policy_ref_infos:
- self.add_network_policy (ln, ref)
- if route_table_refs:
- for ln in route_table_refs:
- self.add_route_table (ln)
- self.virtual_network_properties = virtual_network_properties
- self.virtual_network_network_id = virtual_network_network_id
- self.route_target_list = route_target_list
- self.router_external = router_external
- self.is_shared = is_shared
- self.external_ipam = external_ipam
- self.flood_unknown_unicast = flood_unknown_unicast
- self.id_perms = id_perms
- self.display_name = display_name
- #end __init__
-
- def _update_links (self, update_server):
- for ln in self.get_qos_forwarding_classs ():
- self.add_qos_forwarding_class (ln.fixture (), update_server = update_server, add_link = False)
- for ln in self.get_network_ipams ():
- self.add_network_ipam (*ln.fixture (), update_server = update_server, add_link = False)
- for ln in self.get_network_policys ():
- self.add_network_policy (*ln.fixture (), update_server = update_server, add_link = False)
- for ln in self.get_route_tables ():
- self.add_route_table (ln.fixture (), update_server = update_server, add_link = False)
- return None
- #end _update_links
-
- def add_qos_forwarding_class (self, lo, update_server = True, add_link = True):
- '''
- add :class:`QosForwardingClass` link to :class:`VirtualNetwork`
- Args:
- lo (:class:`QosForwardingClass`): obj to link
- '''
- if self._obj:
- self._obj.add_qos_forwarding_class (lo)
- if update_server:
- self._conn_drv.virtual_network_update (self._obj)
-
- if add_link:
- self.add_link('qos_forwarding_class', cfixture.ConrtailLink('qos_forwarding_class', 'virtual_network', 'qos_forwarding_class', ['ref'], lo))
- #end add_qos_forwarding_class_link
-
- def get_qos_forwarding_classs (self):
- return self.get_links ('qos_forwarding_class')
- #end get_qos_forwarding_classs
- def add_network_ipam (self, lo, ref, update_server = True, add_link = True):
- '''
- add :class:`NetworkIpam` link to :class:`VirtualNetwork`
- Args:
- lo (:class:`NetworkIpam`): obj to link
- ref (:class:`VnSubnetsType`): property of the link object
- '''
- if self._obj:
- self._obj.add_network_ipam (lo, ref)
- if update_server:
- self._conn_drv.virtual_network_update (self._obj)
-
- if add_link:
- self.add_link('network_ipam', cfixture.ConrtailLink('network_ipam', 'virtual_network', 'network_ipam', ['ref'], (lo, ref)))
- #end add_network_ipam_link
-
- def get_network_ipams (self):
- return self.get_links ('network_ipam')
- #end get_network_ipams
- def add_network_policy (self, lo, ref, update_server = True, add_link = True):
- '''
- add :class:`NetworkPolicy` link to :class:`VirtualNetwork`
- Args:
- lo (:class:`NetworkPolicy`): obj to link
- ref (:class:`VirtualNetworkPolicyType`): property of the link object
- '''
- if self._obj:
- self._obj.add_network_policy (lo, ref)
- if update_server:
- self._conn_drv.virtual_network_update (self._obj)
-
- if add_link:
- self.add_link('network_policy', cfixture.ConrtailLink('network_policy', 'virtual_network', 'network_policy', ['ref'], (lo, ref)))
- #end add_network_policy_link
-
- def get_network_policys (self):
- return self.get_links ('network_policy')
- #end get_network_policys
- def add_route_table (self, lo, update_server = True, add_link = True):
- '''
- add :class:`RouteTable` link to :class:`VirtualNetwork`
- Args:
- lo (:class:`RouteTable`): obj to link
- '''
- if self._obj:
- self._obj.add_route_table (lo)
- if update_server:
- self._conn_drv.virtual_network_update (self._obj)
-
- if add_link:
- self.add_link('route_table', cfixture.ConrtailLink('route_table', 'virtual_network', 'route_table', ['ref'], lo))
- #end add_route_table_link
-
- def get_route_tables (self):
- return self.get_links ('route_table')
- #end get_route_tables
-
- def populate (self):
- self._obj.set_virtual_network_properties(self.virtual_network_properties or vnc_api.gen.resource_xsd.VirtualNetworkType.populate())
- self._obj.set_virtual_network_network_id(self.virtual_network_network_id or GeneratedsSuper.populate_integer("virtual_network_network_id"))
- self._obj.set_route_target_list(self.route_target_list or vnc_api.gen.resource_xsd.RouteTargetList.populate())
- self._obj.set_router_external(self.router_external or GeneratedsSuper.populate_boolean("router_external"))
- self._obj.set_is_shared(self.is_shared or GeneratedsSuper.populate_boolean("is_shared"))
- self._obj.set_external_ipam(self.external_ipam or GeneratedsSuper.populate_boolean("external_ipam"))
- self._obj.set_flood_unknown_unicast(self.flood_unknown_unicast or GeneratedsSuper.populate_boolean("flood_unknown_unicast"))
- self._obj.set_id_perms(self.id_perms or vnc_api.gen.resource_xsd.IdPermsType.populate())
- self._obj.set_display_name(self.display_name or GeneratedsSuper.populate_string("display_name"))
- #end populate
-
- def setUp(self):
- super(VirtualNetworkTestFixtureGen, self).setUp()
- if not self._parent_fixt:
- self._parent_fixt = self.useFixture(ProjectTestFixtureGen(self._conn_drv, 'default-project'))
-
- self._obj = vnc_api.VirtualNetwork(self._name, self._parent_fixt.getObj ())
- try:
- self._obj = self._conn_drv.virtual_network_read (fq_name=self._obj.get_fq_name())
- self._update_links (update_server=True)
- except NoIdError:
- self._update_links (update_server=False)
- if self._auto_prop_val:
- self.populate ()
- else:
- self._obj.virtual_network_properties = self.virtual_network_properties
- self._obj.virtual_network_network_id = self.virtual_network_network_id
- self._obj.route_target_list = self.route_target_list
- self._obj.router_external = self.router_external
- self._obj.is_shared = self.is_shared
- self._obj.external_ipam = self.external_ipam
- self._obj.flood_unknown_unicast = self.flood_unknown_unicast
- self._obj.id_perms = self.id_perms
- self._obj.display_name = self.display_name
- self._conn_drv.virtual_network_create(self._obj)
- # read back for server allocated values
- self._obj = self._conn_drv.virtual_network_read(id = self._obj.uuid)
- #end setUp
-
- def cleanUp(self):
- if self._obj.get_virtual_machine_interface_back_refs() or self._obj.get_instance_ip_back_refs() or self._obj.get_physical_router_back_refs() or self._obj.get_logical_router_back_refs():
- return
- if self._obj.get_floating_ip_pools():
- return
- parent_fixt = getattr(self, '_parent_fixt', None)
- if parent_fixt:
- # non config-root child
- parent_obj = self._parent_fixt.getObj()
- # remove child from parent obj
- for child_obj in parent_obj.get_virtual_networks():
- if type(child_obj) == dict:
- child_uuid = child_obj['uuid']
- else:
- child_uuid = child_obj.uuid
- if child_uuid == self._obj.uuid:
- parent_obj.virtual_networks.remove(child_obj)
- break
-
- self._conn_drv.virtual_network_delete(id = self._obj.uuid)
- #end cleanUp
-
- def getObj(self):
- return self._obj
- #end getObj
-
-#end class VirtualNetworkTestFixtureGen
-
-class ProjectTestFixtureGen(cfixture.ContrailFixture):
- """
- Fixture for :class:`.Project`
- """
- def __init__(self, conn_drv, project_name=None, parent_fixt=None, auto_prop_val=False, namespace_ref_infos = None, floating_ip_pool_refs = None, quota=None, id_perms=None, display_name=None):
- '''
- Create ProjectTestFixtureGen object
-
- constructor
-
- Args:
- conn_drv (:class:`ConnectionDriver`): connection driver (eg. :class:`vnc_api.vnc_api.VncApi`, :class:`novaclient.client.Client`, etc)
-
- Kwargs:
- project_name (str): Name of project
- parent_fixt (:class:`.DomainTestFixtureGen`): Parent fixture
- namespace (list): list of tuple (:class:`Namespace`, :class: `SubnetType`) type
- floating_ip_pool (list): list of :class:`FloatingIpPool` type
- quota (instance): instance of :class:`QuotaType`
- id_perms (instance): instance of :class:`IdPermsType`
- display_name (instance): instance of :class:`xsd:string`
-
- '''
- super(ProjectTestFixtureGen, self).__init__()
- self._conn_drv = conn_drv
- if not project_name:
- self._name = 'default-project'
- else:
- self._name = project_name
- self._obj = None
- self._parent_fixt = parent_fixt
- self._auto_prop_val = auto_prop_val
- if namespace_ref_infos:
- for ln, ref in namespace_ref_infos:
- self.add_namespace (ln, ref)
- if floating_ip_pool_refs:
- for ln in floating_ip_pool_refs:
- self.add_floating_ip_pool (ln)
- self.quota = quota
- self.id_perms = id_perms
- self.display_name = display_name
- #end __init__
-
- def _update_links (self, update_server):
- for ln in self.get_namespaces ():
- self.add_namespace (*ln.fixture (), update_server = update_server, add_link = False)
- for ln in self.get_floating_ip_pools ():
- self.add_floating_ip_pool (ln.fixture (), update_server = update_server, add_link = False)
- return None
- #end _update_links
-
- def add_namespace (self, lo, ref, update_server = True, add_link = True):
- '''
- add :class:`Namespace` link to :class:`Project`
- Args:
- lo (:class:`Namespace`): obj to link
- ref (:class:`SubnetType`): property of the link object
- '''
- if self._obj:
- self._obj.add_namespace (lo, ref)
- if update_server:
- self._conn_drv.project_update (self._obj)
-
- if add_link:
- self.add_link('namespace', cfixture.ConrtailLink('namespace', 'project', 'namespace', ['ref'], (lo, ref)))
- #end add_namespace_link
-
- def get_namespaces (self):
- return self.get_links ('namespace')
- #end get_namespaces
- def add_floating_ip_pool (self, lo, update_server = True, add_link = True):
- '''
- add :class:`FloatingIpPool` link to :class:`Project`
- Args:
- lo (:class:`FloatingIpPool`): obj to link
- '''
- if self._obj:
- self._obj.add_floating_ip_pool (lo)
- if update_server:
- self._conn_drv.project_update (self._obj)
-
- if add_link:
- self.add_link('floating_ip_pool', cfixture.ConrtailLink('floating_ip_pool', 'project', 'floating_ip_pool', ['ref'], lo))
- #end add_floating_ip_pool_link
-
- def get_floating_ip_pools (self):
- return self.get_links ('floating_ip_pool')
- #end get_floating_ip_pools
-
- def populate (self):
- self._obj.set_quota(self.quota or vnc_api.gen.resource_xsd.QuotaType.populate())
- self._obj.set_id_perms(self.id_perms or vnc_api.gen.resource_xsd.IdPermsType.populate())
- self._obj.set_display_name(self.display_name or GeneratedsSuper.populate_string("display_name"))
- #end populate
-
- def setUp(self):
- super(ProjectTestFixtureGen, self).setUp()
- if not self._parent_fixt:
- self._parent_fixt = self.useFixture(DomainTestFixtureGen(self._conn_drv, 'default-domain'))
-
- self._obj = vnc_api.Project(self._name, self._parent_fixt.getObj ())
- try:
- self._obj = self._conn_drv.project_read (fq_name=self._obj.get_fq_name())
- self._update_links (update_server=True)
- except NoIdError:
- self._update_links (update_server=False)
- if self._auto_prop_val:
- self.populate ()
- else:
- self._obj.quota = self.quota
- self._obj.id_perms = self.id_perms
- self._obj.display_name = self.display_name
- self._conn_drv.project_create(self._obj)
- # read back for server allocated values
- self._obj = self._conn_drv.project_read(id = self._obj.uuid)
- #end setUp
-
- def cleanUp(self):
- if self._obj.get_floating_ip_back_refs():
- return
- if self._obj.get_security_groups() or self._obj.get_virtual_networks() or self._obj.get_qos_queues() or self._obj.get_qos_forwarding_classs() or self._obj.get_network_ipams() or self._obj.get_network_policys() or self._obj.get_virtual_machine_interfaces() or self._obj.get_service_instances() or self._obj.get_route_tables() or self._obj.get_interface_route_tables() or self._obj.get_logical_routers() or self._obj.get_loadbalancer_pools() or self._obj.get_loadbalancer_healthmonitors() or self._obj.get_virtual_ips():
- return
- parent_fixt = getattr(self, '_parent_fixt', None)
- if parent_fixt:
- # non config-root child
- parent_obj = self._parent_fixt.getObj()
- # remove child from parent obj
- for child_obj in parent_obj.get_projects():
- if type(child_obj) == dict:
- child_uuid = child_obj['uuid']
- else:
- child_uuid = child_obj.uuid
- if child_uuid == self._obj.uuid:
- parent_obj.projects.remove(child_obj)
- break
-
- self._conn_drv.project_delete(id = self._obj.uuid)
- #end cleanUp
-
- def getObj(self):
- return self._obj
- #end getObj
-
-#end class ProjectTestFixtureGen
-
-class QosForwardingClassTestFixtureGen(cfixture.ContrailFixture):
- """
- Fixture for :class:`.QosForwardingClass`
- """
- def __init__(self, conn_drv, qos_forwarding_class_name=None, parent_fixt=None, auto_prop_val=False, qos_queue_refs = None, dscp=None, trusted=None, id_perms=None, display_name=None):
- '''
- Create QosForwardingClassTestFixtureGen object
-
- constructor
-
- Args:
- conn_drv (:class:`ConnectionDriver`): connection driver (eg. :class:`vnc_api.vnc_api.VncApi`, :class:`novaclient.client.Client`, etc)
-
- Kwargs:
- qos_forwarding_class_name (str): Name of qos_forwarding_class
- parent_fixt (:class:`.ProjectTestFixtureGen`): Parent fixture
- qos_queue (list): list of :class:`QosQueue` type
- dscp (instance): instance of :class:`xsd:integer`
- trusted (instance): instance of :class:`xsd:boolean`
- id_perms (instance): instance of :class:`IdPermsType`
- display_name (instance): instance of :class:`xsd:string`
-
- '''
- super(QosForwardingClassTestFixtureGen, self).__init__()
- self._conn_drv = conn_drv
- if not qos_forwarding_class_name:
- self._name = 'default-qos-forwarding-class'
- else:
- self._name = qos_forwarding_class_name
- self._obj = None
- self._parent_fixt = parent_fixt
- self._auto_prop_val = auto_prop_val
- if qos_queue_refs:
- for ln in qos_queue_refs:
- self.add_qos_queue (ln)
- self.dscp = dscp
- self.trusted = trusted
- self.id_perms = id_perms
- self.display_name = display_name
- #end __init__
-
- def _update_links (self, update_server):
- for ln in self.get_qos_queues ():
- self.add_qos_queue (ln.fixture (), update_server = update_server, add_link = False)
- return None
- #end _update_links
-
- def add_qos_queue (self, lo, update_server = True, add_link = True):
- '''
- add :class:`QosQueue` link to :class:`QosForwardingClass`
- Args:
- lo (:class:`QosQueue`): obj to link
- '''
- if self._obj:
- self._obj.add_qos_queue (lo)
- if update_server:
- self._conn_drv.qos_forwarding_class_update (self._obj)
-
- if add_link:
- self.add_link('qos_queue', cfixture.ConrtailLink('qos_queue', 'qos_forwarding_class', 'qos_queue', ['ref'], lo))
- #end add_qos_queue_link
-
- def get_qos_queues (self):
- return self.get_links ('qos_queue')
- #end get_qos_queues
-
- def populate (self):
- self._obj.set_dscp(self.dscp or GeneratedsSuper.populate_integer("dscp"))
- self._obj.set_trusted(self.trusted or GeneratedsSuper.populate_boolean("trusted"))
- self._obj.set_id_perms(self.id_perms or vnc_api.gen.resource_xsd.IdPermsType.populate())
- self._obj.set_display_name(self.display_name or GeneratedsSuper.populate_string("display_name"))
- #end populate
-
- def setUp(self):
- super(QosForwardingClassTestFixtureGen, self).setUp()
- if not self._parent_fixt:
- self._parent_fixt = self.useFixture(ProjectTestFixtureGen(self._conn_drv, 'default-project'))
-
- self._obj = vnc_api.QosForwardingClass(self._name, self._parent_fixt.getObj ())
- try:
- self._obj = self._conn_drv.qos_forwarding_class_read (fq_name=self._obj.get_fq_name())
- self._update_links (update_server=True)
- except NoIdError:
- self._update_links (update_server=False)
- if self._auto_prop_val:
- self.populate ()
- else:
- self._obj.dscp = self.dscp
- self._obj.trusted = self.trusted
- self._obj.id_perms = self.id_perms
- self._obj.display_name = self.display_name
- self._conn_drv.qos_forwarding_class_create(self._obj)
- # read back for server allocated values
- self._obj = self._conn_drv.qos_forwarding_class_read(id = self._obj.uuid)
- #end setUp
-
- def cleanUp(self):
- if self._obj.get_virtual_network_back_refs() or self._obj.get_virtual_machine_interface_back_refs():
- return
- parent_fixt = getattr(self, '_parent_fixt', None)
- if parent_fixt:
- # non config-root child
- parent_obj = self._parent_fixt.getObj()
- # remove child from parent obj
- for child_obj in parent_obj.get_qos_forwarding_classs():
- if type(child_obj) == dict:
- child_uuid = child_obj['uuid']
- else:
- child_uuid = child_obj.uuid
- if child_uuid == self._obj.uuid:
- parent_obj.qos_forwarding_classs.remove(child_obj)
- break
-
- self._conn_drv.qos_forwarding_class_delete(id = self._obj.uuid)
- #end cleanUp
-
- def getObj(self):
- return self._obj
- #end getObj
-
-#end class QosForwardingClassTestFixtureGen
-
-class DatabaseNodeTestFixtureGen(cfixture.ContrailFixture):
- """
- Fixture for :class:`.DatabaseNode`
- """
- def __init__(self, conn_drv, database_node_name=None, parent_fixt=None, auto_prop_val=False, database_node_ip_address=None, id_perms=None, display_name=None):
- '''
- Create DatabaseNodeTestFixtureGen object
-
- constructor
-
- Args:
- conn_drv (:class:`ConnectionDriver`): connection driver (eg. :class:`vnc_api.vnc_api.VncApi`, :class:`novaclient.client.Client`, etc)
-
- Kwargs:
- database_node_name (str): Name of database_node
- parent_fixt (:class:`.GlobalSystemConfigTestFixtureGen`): Parent fixture
- database_node_ip_address (instance): instance of :class:`xsd:string`
- id_perms (instance): instance of :class:`IdPermsType`
- display_name (instance): instance of :class:`xsd:string`
-
- '''
- super(DatabaseNodeTestFixtureGen, self).__init__()
- self._conn_drv = conn_drv
- if not database_node_name:
- self._name = 'default-database-node'
- else:
- self._name = database_node_name
- self._obj = None
- self._parent_fixt = parent_fixt
- self._auto_prop_val = auto_prop_val
- self.database_node_ip_address = database_node_ip_address
- self.id_perms = id_perms
- self.display_name = display_name
- #end __init__
-
- def _update_links (self, update_server):
- return None
- #end _update_links
-
-
- def populate (self):
- self._obj.set_database_node_ip_address(self.database_node_ip_address or GeneratedsSuper.populate_string("database_node_ip_address"))
- self._obj.set_id_perms(self.id_perms or vnc_api.gen.resource_xsd.IdPermsType.populate())
- self._obj.set_display_name(self.display_name or GeneratedsSuper.populate_string("display_name"))
- #end populate
-
- def setUp(self):
- super(DatabaseNodeTestFixtureGen, self).setUp()
- if not self._parent_fixt:
- self._parent_fixt = self.useFixture(GlobalSystemConfigTestFixtureGen(self._conn_drv, 'default-global-system-config'))
-
- self._obj = vnc_api.DatabaseNode(self._name, self._parent_fixt.getObj ())
- try:
- self._obj = self._conn_drv.database_node_read (fq_name=self._obj.get_fq_name())
- self._update_links (update_server=True)
- except NoIdError:
- self._update_links (update_server=False)
- if self._auto_prop_val:
- self.populate ()
- else:
- self._obj.database_node_ip_address = self.database_node_ip_address
- self._obj.id_perms = self.id_perms
- self._obj.display_name = self.display_name
- self._conn_drv.database_node_create(self._obj)
- # read back for server allocated values
- self._obj = self._conn_drv.database_node_read(id = self._obj.uuid)
- #end setUp
-
- def cleanUp(self):
- parent_fixt = getattr(self, '_parent_fixt', None)
- if parent_fixt:
- # non config-root child
- parent_obj = self._parent_fixt.getObj()
- # remove child from parent obj
- for child_obj in parent_obj.get_database_nodes():
- if type(child_obj) == dict:
- child_uuid = child_obj['uuid']
- else:
- child_uuid = child_obj.uuid
- if child_uuid == self._obj.uuid:
- parent_obj.database_nodes.remove(child_obj)
- break
-
- self._conn_drv.database_node_delete(id = self._obj.uuid)
- #end cleanUp
-
- def getObj(self):
- return self._obj
- #end getObj
-
-#end class DatabaseNodeTestFixtureGen
-
-class RoutingInstanceTestFixtureGen(cfixture.ContrailFixture):
- """
- Fixture for :class:`.RoutingInstance`
- """
- def __init__(self, conn_drv, routing_instance_name=None, parent_fixt=None, auto_prop_val=False, routing_instance_ref_infos = None, route_target_ref_infos = None, service_chain_information=None, routing_instance_is_default=None, static_route_entries=None, default_ce_protocol=None, id_perms=None, display_name=None):
- '''
- Create RoutingInstanceTestFixtureGen object
-
- constructor
-
- Args:
- conn_drv (:class:`ConnectionDriver`): connection driver (eg. :class:`vnc_api.vnc_api.VncApi`, :class:`novaclient.client.Client`, etc)
-
- Kwargs:
- routing_instance_name (str): Name of routing_instance
- parent_fixt (:class:`.VirtualNetworkTestFixtureGen`): Parent fixture
- routing_instance (list): list of tuple (:class:`RoutingInstance`, :class: `ConnectionType`) type
- route_target (list): list of tuple (:class:`RouteTarget`, :class: `InstanceTargetType`) type
- service_chain_information (instance): instance of :class:`ServiceChainInfo`
- routing_instance_is_default (instance): instance of :class:`xsd:boolean`
- static_route_entries (instance): instance of :class:`StaticRouteEntriesType`
- default_ce_protocol (instance): instance of :class:`DefaultProtocolType`
- id_perms (instance): instance of :class:`IdPermsType`
- display_name (instance): instance of :class:`xsd:string`
-
- '''
- super(RoutingInstanceTestFixtureGen, self).__init__()
- self._conn_drv = conn_drv
- if not routing_instance_name:
- self._name = 'default-routing-instance'
- else:
- self._name = routing_instance_name
- self._obj = None
- self._parent_fixt = parent_fixt
- self._auto_prop_val = auto_prop_val
- if routing_instance_ref_infos:
- for ln, ref in routing_instance_ref_infos:
- self.add_routing_instance (ln, ref)
- if route_target_ref_infos:
- for ln, ref in route_target_ref_infos:
- self.add_route_target (ln, ref)
- self.service_chain_information = service_chain_information
- self.routing_instance_is_default = routing_instance_is_default
- self.static_route_entries = static_route_entries
- self.default_ce_protocol = default_ce_protocol
- self.id_perms = id_perms
- self.display_name = display_name
- #end __init__
-
- def _update_links (self, update_server):
- for ln in self.get_routing_instances ():
- self.add_routing_instance (*ln.fixture (), update_server = update_server, add_link = False)
- for ln in self.get_route_targets ():
- self.add_route_target (*ln.fixture (), update_server = update_server, add_link = False)
- return None
- #end _update_links
-
- def add_routing_instance (self, lo, ref, update_server = True, add_link = True):
- '''
- add :class:`RoutingInstance` link to :class:`RoutingInstance`
- Args:
- lo (:class:`RoutingInstance`): obj to link
- ref (:class:`ConnectionType`): property of the link object
- '''
- if self._obj:
- self._obj.add_routing_instance (lo, ref)
- if update_server:
- self._conn_drv.routing_instance_update (self._obj)
-
- if add_link:
- self.add_link('routing_instance', cfixture.ConrtailLink('routing_instance', 'routing_instance', 'routing_instance', ['ref'], (lo, ref)))
- #end add_routing_instance_link
-
- def get_routing_instances (self):
- return self.get_links ('routing_instance')
- #end get_routing_instances
- def add_route_target (self, lo, ref, update_server = True, add_link = True):
- '''
- add :class:`RouteTarget` link to :class:`RoutingInstance`
- Args:
- lo (:class:`RouteTarget`): obj to link
- ref (:class:`InstanceTargetType`): property of the link object
- '''
- if self._obj:
- self._obj.add_route_target (lo, ref)
- if update_server:
- self._conn_drv.routing_instance_update (self._obj)
-
- if add_link:
- self.add_link('route_target', cfixture.ConrtailLink('route_target', 'routing_instance', 'route_target', ['ref'], (lo, ref)))
- #end add_route_target_link
-
- def get_route_targets (self):
- return self.get_links ('route_target')
- #end get_route_targets
-
- def populate (self):
- self._obj.set_service_chain_information(self.service_chain_information or vnc_api.gen.resource_xsd.ServiceChainInfo.populate())
- self._obj.set_routing_instance_is_default(self.routing_instance_is_default or GeneratedsSuper.populate_boolean("routing_instance_is_default"))
- self._obj.set_static_route_entries(self.static_route_entries or vnc_api.gen.resource_xsd.StaticRouteEntriesType.populate())
- self._obj.set_default_ce_protocol(self.default_ce_protocol or vnc_api.gen.resource_xsd.DefaultProtocolType.populate())
- self._obj.set_id_perms(self.id_perms or vnc_api.gen.resource_xsd.IdPermsType.populate())
- self._obj.set_display_name(self.display_name or GeneratedsSuper.populate_string("display_name"))
- #end populate
-
- def setUp(self):
- super(RoutingInstanceTestFixtureGen, self).setUp()
- if not self._parent_fixt:
- self._parent_fixt = self.useFixture(VirtualNetworkTestFixtureGen(self._conn_drv, 'default-virtual-network'))
-
- self._obj = vnc_api.RoutingInstance(self._name, self._parent_fixt.getObj ())
- try:
- self._obj = self._conn_drv.routing_instance_read (fq_name=self._obj.get_fq_name())
- self._update_links (update_server=True)
- except NoIdError:
- self._update_links (update_server=False)
- if self._auto_prop_val:
- self.populate ()
- else:
- self._obj.service_chain_information = self.service_chain_information
- self._obj.routing_instance_is_default = self.routing_instance_is_default
- self._obj.static_route_entries = self.static_route_entries
- self._obj.default_ce_protocol = self.default_ce_protocol
- self._obj.id_perms = self.id_perms
- self._obj.display_name = self.display_name
- self._conn_drv.routing_instance_create(self._obj)
- # read back for server allocated values
- self._obj = self._conn_drv.routing_instance_read(id = self._obj.uuid)
- #end setUp
-
- def cleanUp(self):
- if self._obj.get_virtual_machine_interface_back_refs() or self._obj.get_routing_instance_back_refs():
- return
- if self._obj.get_bgp_routers():
- return
- parent_fixt = getattr(self, '_parent_fixt', None)
- if parent_fixt:
- # non config-root child
- parent_obj = self._parent_fixt.getObj()
- # remove child from parent obj
- for child_obj in parent_obj.get_routing_instances():
- if type(child_obj) == dict:
- child_uuid = child_obj['uuid']
- else:
- child_uuid = child_obj.uuid
- if child_uuid == self._obj.uuid:
- parent_obj.routing_instances.remove(child_obj)
- break
-
- self._conn_drv.routing_instance_delete(id = self._obj.uuid)
- #end cleanUp
-
- def getObj(self):
- return self._obj
- #end getObj
-
-#end class RoutingInstanceTestFixtureGen
-
-class NetworkIpamTestFixtureGen(cfixture.ContrailFixture):
- """
- Fixture for :class:`.NetworkIpam`
- """
- def __init__(self, conn_drv, network_ipam_name=None, parent_fixt=None, auto_prop_val=False, virtual_DNS_refs = None, network_ipam_mgmt=None, id_perms=None, display_name=None):
- '''
- Create NetworkIpamTestFixtureGen object
-
- constructor
-
- Args:
- conn_drv (:class:`ConnectionDriver`): connection driver (eg. :class:`vnc_api.vnc_api.VncApi`, :class:`novaclient.client.Client`, etc)
-
- Kwargs:
- network_ipam_name (str): Name of network_ipam
- parent_fixt (:class:`.ProjectTestFixtureGen`): Parent fixture
- virtual_DNS (list): list of :class:`VirtualDns` type
- network_ipam_mgmt (instance): instance of :class:`IpamType`
- id_perms (instance): instance of :class:`IdPermsType`
- display_name (instance): instance of :class:`xsd:string`
-
- '''
- super(NetworkIpamTestFixtureGen, self).__init__()
- self._conn_drv = conn_drv
- if not network_ipam_name:
- self._name = 'default-network-ipam'
- else:
- self._name = network_ipam_name
- self._obj = None
- self._parent_fixt = parent_fixt
- self._auto_prop_val = auto_prop_val
- if virtual_DNS_refs:
- for ln in virtual_DNS_refs:
- self.add_virtual_DNS (ln)
- self.network_ipam_mgmt = network_ipam_mgmt
- self.id_perms = id_perms
- self.display_name = display_name
- #end __init__
-
- def _update_links (self, update_server):
- for ln in self.get_virtual_DNSs ():
- self.add_virtual_DNS (ln.fixture (), update_server = update_server, add_link = False)
- return None
- #end _update_links
-
- def add_virtual_DNS (self, lo, update_server = True, add_link = True):
- '''
- add :class:`VirtualDns` link to :class:`NetworkIpam`
- Args:
- lo (:class:`VirtualDns`): obj to link
- '''
- if self._obj:
- self._obj.add_virtual_DNS (lo)
- if update_server:
- self._conn_drv.network_ipam_update (self._obj)
-
- if add_link:
- self.add_link('virtual_DNS', cfixture.ConrtailLink('virtual_DNS', 'network_ipam', 'virtual_DNS', ['ref'], lo))
- #end add_virtual_DNS_link
-
- def get_virtual_DNSs (self):
- return self.get_links ('virtual_DNS')
- #end get_virtual_DNSs
-
- def populate (self):
- self._obj.set_network_ipam_mgmt(self.network_ipam_mgmt or vnc_api.gen.resource_xsd.IpamType.populate())
- self._obj.set_id_perms(self.id_perms or vnc_api.gen.resource_xsd.IdPermsType.populate())
- self._obj.set_display_name(self.display_name or GeneratedsSuper.populate_string("display_name"))
- #end populate
-
- def setUp(self):
- super(NetworkIpamTestFixtureGen, self).setUp()
- if not self._parent_fixt:
- self._parent_fixt = self.useFixture(ProjectTestFixtureGen(self._conn_drv, 'default-project'))
-
- self._obj = vnc_api.NetworkIpam(self._name, self._parent_fixt.getObj ())
- try:
- self._obj = self._conn_drv.network_ipam_read (fq_name=self._obj.get_fq_name())
- self._update_links (update_server=True)
- except NoIdError:
- self._update_links (update_server=False)
- if self._auto_prop_val:
- self.populate ()
- else:
- self._obj.network_ipam_mgmt = self.network_ipam_mgmt
- self._obj.id_perms = self.id_perms
- self._obj.display_name = self.display_name
- self._conn_drv.network_ipam_create(self._obj)
- # read back for server allocated values
- self._obj = self._conn_drv.network_ipam_read(id = self._obj.uuid)
- #end setUp
-
- def cleanUp(self):
- if self._obj.get_virtual_network_back_refs():
- return
- parent_fixt = getattr(self, '_parent_fixt', None)
- if parent_fixt:
- # non config-root child
- parent_obj = self._parent_fixt.getObj()
- # remove child from parent obj
- for child_obj in parent_obj.get_network_ipams():
- if type(child_obj) == dict:
- child_uuid = child_obj['uuid']
- else:
- child_uuid = child_obj.uuid
- if child_uuid == self._obj.uuid:
- parent_obj.network_ipams.remove(child_obj)
- break
-
- self._conn_drv.network_ipam_delete(id = self._obj.uuid)
- #end cleanUp
-
- def getObj(self):
- return self._obj
- #end getObj
-
-#end class NetworkIpamTestFixtureGen
-
-class LogicalRouterTestFixtureGen(cfixture.ContrailFixture):
- """
- Fixture for :class:`.LogicalRouter`
- """
- def __init__(self, conn_drv, logical_router_name=None, parent_fixt=None, auto_prop_val=False, virtual_machine_interface_refs = None, route_target_refs = None, virtual_network_refs = None, service_instance_refs = None, id_perms=None, display_name=None):
- '''
- Create LogicalRouterTestFixtureGen object
-
- constructor
-
- Args:
- conn_drv (:class:`ConnectionDriver`): connection driver (eg. :class:`vnc_api.vnc_api.VncApi`, :class:`novaclient.client.Client`, etc)
-
- Kwargs:
- logical_router_name (str): Name of logical_router
- parent_fixt (:class:`.ProjectTestFixtureGen`): Parent fixture
- virtual_machine_interface (list): list of :class:`VirtualMachineInterface` type
- route_target (list): list of :class:`RouteTarget` type
- virtual_network (list): list of :class:`VirtualNetwork` type
- service_instance (list): list of :class:`ServiceInstance` type
- id_perms (instance): instance of :class:`IdPermsType`
- display_name (instance): instance of :class:`xsd:string`
-
- '''
- super(LogicalRouterTestFixtureGen, self).__init__()
- self._conn_drv = conn_drv
- if not logical_router_name:
- self._name = 'default-logical-router'
- else:
- self._name = logical_router_name
- self._obj = None
- self._parent_fixt = parent_fixt
- self._auto_prop_val = auto_prop_val
- if virtual_machine_interface_refs:
- for ln in virtual_machine_interface_refs:
- self.add_virtual_machine_interface (ln)
- if route_target_refs:
- for ln in route_target_refs:
- self.add_route_target (ln)
- if virtual_network_refs:
- for ln in virtual_network_refs:
- self.add_virtual_network (ln)
- if service_instance_refs:
- for ln in service_instance_refs:
- self.add_service_instance (ln)
- self.id_perms = id_perms
- self.display_name = display_name
- #end __init__
-
- def _update_links (self, update_server):
- for ln in self.get_virtual_machine_interfaces ():
- self.add_virtual_machine_interface (ln.fixture (), update_server = update_server, add_link = False)
- for ln in self.get_route_targets ():
- self.add_route_target (ln.fixture (), update_server = update_server, add_link = False)
- for ln in self.get_virtual_networks ():
- self.add_virtual_network (ln.fixture (), update_server = update_server, add_link = False)
- for ln in self.get_service_instances ():
- self.add_service_instance (ln.fixture (), update_server = update_server, add_link = False)
- return None
- #end _update_links
-
- def add_virtual_machine_interface (self, lo, update_server = True, add_link = True):
- '''
- add :class:`VirtualMachineInterface` link to :class:`LogicalRouter`
- Args:
- lo (:class:`VirtualMachineInterface`): obj to link
- '''
- if self._obj:
- self._obj.add_virtual_machine_interface (lo)
- if update_server:
- self._conn_drv.logical_router_update (self._obj)
-
- if add_link:
- self.add_link('virtual_machine_interface', cfixture.ConrtailLink('virtual_machine_interface', 'logical_router', 'virtual_machine_interface', ['ref'], lo))
- #end add_virtual_machine_interface_link
-
- def get_virtual_machine_interfaces (self):
- return self.get_links ('virtual_machine_interface')
- #end get_virtual_machine_interfaces
- def add_route_target (self, lo, update_server = True, add_link = True):
- '''
- add :class:`RouteTarget` link to :class:`LogicalRouter`
- Args:
- lo (:class:`RouteTarget`): obj to link
- '''
- if self._obj:
- self._obj.add_route_target (lo)
- if update_server:
- self._conn_drv.logical_router_update (self._obj)
-
- if add_link:
- self.add_link('route_target', cfixture.ConrtailLink('route_target', 'logical_router', 'route_target', ['ref'], lo))
- #end add_route_target_link
-
- def get_route_targets (self):
- return self.get_links ('route_target')
- #end get_route_targets
- def add_virtual_network (self, lo, update_server = True, add_link = True):
- '''
- add :class:`VirtualNetwork` link to :class:`LogicalRouter`
- Args:
- lo (:class:`VirtualNetwork`): obj to link
- '''
- if self._obj:
- self._obj.add_virtual_network (lo)
- if update_server:
- self._conn_drv.logical_router_update (self._obj)
-
- if add_link:
- self.add_link('virtual_network', cfixture.ConrtailLink('virtual_network', 'logical_router', 'virtual_network', ['ref'], lo))
- #end add_virtual_network_link
-
- def get_virtual_networks (self):
- return self.get_links ('virtual_network')
- #end get_virtual_networks
- def add_service_instance (self, lo, update_server = True, add_link = True):
- '''
- add :class:`ServiceInstance` link to :class:`LogicalRouter`
- Args:
- lo (:class:`ServiceInstance`): obj to link
- '''
- if self._obj:
- self._obj.add_service_instance (lo)
- if update_server:
- self._conn_drv.logical_router_update (self._obj)
-
- if add_link:
- self.add_link('service_instance', cfixture.ConrtailLink('service_instance', 'logical_router', 'service_instance', ['ref'], lo))
- #end add_service_instance_link
-
- def get_service_instances (self):
- return self.get_links ('service_instance')
- #end get_service_instances
-
- def populate (self):
- self._obj.set_id_perms(self.id_perms or vnc_api.gen.resource_xsd.IdPermsType.populate())
- self._obj.set_display_name(self.display_name or GeneratedsSuper.populate_string("display_name"))
- #end populate
-
- def setUp(self):
- super(LogicalRouterTestFixtureGen, self).setUp()
- if not self._parent_fixt:
- self._parent_fixt = self.useFixture(ProjectTestFixtureGen(self._conn_drv, 'default-project'))
-
- self._obj = vnc_api.LogicalRouter(self._name, self._parent_fixt.getObj ())
- try:
- self._obj = self._conn_drv.logical_router_read (fq_name=self._obj.get_fq_name())
- self._update_links (update_server=True)
- except NoIdError:
- self._update_links (update_server=False)
- if self._auto_prop_val:
- self.populate ()
- else:
- self._obj.id_perms = self.id_perms
- self._obj.display_name = self.display_name
- self._conn_drv.logical_router_create(self._obj)
- # read back for server allocated values
- self._obj = self._conn_drv.logical_router_read(id = self._obj.uuid)
- #end setUp
-
- def cleanUp(self):
- parent_fixt = getattr(self, '_parent_fixt', None)
- if parent_fixt:
- # non config-root child
- parent_obj = self._parent_fixt.getObj()
- # remove child from parent obj
- for child_obj in parent_obj.get_logical_routers():
- if type(child_obj) == dict:
- child_uuid = child_obj['uuid']
- else:
- child_uuid = child_obj.uuid
- if child_uuid == self._obj.uuid:
- parent_obj.logical_routers.remove(child_obj)
- break
-
- self._conn_drv.logical_router_delete(id = self._obj.uuid)
- #end cleanUp
-
- def getObj(self):
- return self._obj
- #end getObj
-
-#end class LogicalRouterTestFixtureGen
-
diff --git a/Testcases/vnc_api/gen/resource_test.pyc b/Testcases/vnc_api/gen/resource_test.pyc
deleted file mode 100644
index 461e421..0000000
--- a/Testcases/vnc_api/gen/resource_test.pyc
+++ /dev/null
Binary files differ
diff --git a/Testcases/vnc_api/gen/resource_xsd.py b/Testcases/vnc_api/gen/resource_xsd.py
deleted file mode 100644
index e8453d4..0000000
--- a/Testcases/vnc_api/gen/resource_xsd.py
+++ /dev/null
@@ -1,18494 +0,0 @@
-"""
-This module defines the classes for types defined in :doc:`vnc_cfg.xsd`
-"""
-import json
-from generatedssuper import *
-class MacAddressesType(GeneratedsSuper):
- """
- MacAddressesType class definition from :doc:`vnc_cfg.xsd`
- """
- subclass = None
- superclass = None
- def __init__(self, mac_address=None, **kwargs):
- if (mac_address is None) or (mac_address == []):
- self.mac_address = []
- else:
- self.mac_address = mac_address
- def factory(*args_, **kwargs_):
- if MacAddressesType.subclass:
- return MacAddressesType.subclass(*args_, **kwargs_)
- else:
- return MacAddressesType(*args_, **kwargs_)
- factory = staticmethod(factory)
- def get_mac_address(self): return self.mac_address
- def set_mac_address(self, mac_address): self.mac_address = mac_address
- def add_mac_address(self, value): self.mac_address.append(value)
- def insert_mac_address(self, index, value): self.mac_address[index] = value
- def delete_mac_address(self, value): self.mac_address.remove(value)
- def __eq__(self, other):
- if isinstance(other, self.__class__):
- return (self.mac_address == other.mac_address)
- return NotImplemented
- def __ne__(self, other):
- if isinstance(other, self.__class__):
- return not self.__eq__(other)
- return NotImplemented
-
- @classmethod
- def populate (cls, *a, **kwa):
- obj = cls (*a, **kwa)
- obj.set_mac_address ([obj.populate_string ("mac_address")])
- return obj
- def export(self, outfile, level=1, namespace_='', name_='MacAddressesType', namespacedef_='', pretty_print=True):
- if pretty_print:
- eol_ = '\n'
- else:
- eol_ = ''
- showIndent(outfile, level, pretty_print)
- outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
- already_processed = []
- self.exportAttributes(outfile, level, already_processed, namespace_, name_='MacAddressesType')
- if self.hasContent_():
- outfile.write('>%s' % (eol_, ))
- self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print)
- showIndent(outfile, level, pretty_print)
- outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
- else:
- outfile.write('/>%s' % (eol_, ))
- def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='MacAddressesType'):
- pass
- def exportChildren(self, outfile, level, namespace_='', name_='MacAddressesType', fromsubclass_=False, pretty_print=True):
- if pretty_print:
- eol_ = '\n'
- else:
- eol_ = ''
- for mac_address_ in self.mac_address:
- showIndent(outfile, level, pretty_print)
- outfile.write('<%smac-address>%s</%smac-address>%s' % (namespace_, self.gds_format_string(quote_xml(mac_address_).encode(ExternalEncoding), input_name='mac-address'), namespace_, eol_))
- def hasContent_(self):
- if (
- self.mac_address
- ):
- return True
- else:
- return False
- def exportLiteral(self, outfile, level, name_='MacAddressesType'):
- level += 1
- self.exportLiteralAttributes(outfile, level, [], name_)
- if self.hasContent_():
- self.exportLiteralChildren(outfile, level, name_)
- def exportLiteralAttributes(self, outfile, level, already_processed, name_):
- pass
- def exportLiteralChildren(self, outfile, level, name_):
- showIndent(outfile, level)
- outfile.write('mac_address=[\n')
- level += 1
- for mac_address_ in self.mac_address:
- showIndent(outfile, level)
- outfile.write('%s,\n' % quote_python(mac_address_).encode(ExternalEncoding))
- level -= 1
- showIndent(outfile, level)
- outfile.write('],\n')
- def exportDict(self, name_='MacAddressesType'):
- obj_json = json.dumps(self, default=lambda o: dict((k, v) for k, v in o.__dict__.iteritems()))
- obj_dict = json.loads(obj_json)
- return {name_: obj_dict}
- def build(self, node):
- self.buildAttributes(node, node.attrib, [])
- for child in node:
- nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
- self.buildChildren(child, node, nodeName_)
- def buildAttributes(self, node, attrs, already_processed):
- pass
- def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
- if nodeName_ == 'mac-address':
- mac_address_ = child_.text
- mac_address_ = self.gds_validate_string(mac_address_, node, 'mac_address')
- self.mac_address.append(mac_address_)
-# end class MacAddressesType
-
-
-class IpAddressesType(GeneratedsSuper):
- """
- IpAddressesType class definition from :doc:`vnc_cfg.xsd`
- """
- subclass = None
- superclass = None
- def __init__(self, ip_address=None, **kwargs):
- if (ip_address is None) or (ip_address == []):
- self.ip_address = []
- else:
- self.ip_address = ip_address
- def factory(*args_, **kwargs_):
- if IpAddressesType.subclass:
- return IpAddressesType.subclass(*args_, **kwargs_)
- else:
- return IpAddressesType(*args_, **kwargs_)
- factory = staticmethod(factory)
- def get_ip_address(self): return self.ip_address
- def set_ip_address(self, ip_address): self.ip_address = ip_address
- def add_ip_address(self, value): self.ip_address.append(value)
- def insert_ip_address(self, index, value): self.ip_address[index] = value
- def delete_ip_address(self, value): self.ip_address.remove(value)
- def validate_IpAddressType(self, value):
- # Validate type IpAddressType, a restriction on xsd:string.
- pass
- def __eq__(self, other):
- if isinstance(other, self.__class__):
- return (self.ip_address == other.ip_address)
- return NotImplemented
- def __ne__(self, other):
- if isinstance(other, self.__class__):
- return not self.__eq__(other)
- return NotImplemented
-
- @classmethod
- def populate (cls, *a, **kwa):
- obj = cls (*a, **kwa)
- obj.set_ip_address ([obj.populate_string ("ip_address")])
- return obj
- def export(self, outfile, level=1, namespace_='', name_='IpAddressesType', namespacedef_='', pretty_print=True):
- if pretty_print:
- eol_ = '\n'
- else:
- eol_ = ''
- showIndent(outfile, level, pretty_print)
- outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
- already_processed = []
- self.exportAttributes(outfile, level, already_processed, namespace_, name_='IpAddressesType')
- if self.hasContent_():
- outfile.write('>%s' % (eol_, ))
- self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print)
- showIndent(outfile, level, pretty_print)
- outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
- else:
- outfile.write('/>%s' % (eol_, ))
- def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='IpAddressesType'):
- pass
- def exportChildren(self, outfile, level, namespace_='', name_='IpAddressesType', fromsubclass_=False, pretty_print=True):
- if pretty_print:
- eol_ = '\n'
- else:
- eol_ = ''
- for ip_address_ in self.ip_address:
- showIndent(outfile, level, pretty_print)
- outfile.write('<%sip-address>%s</%sip-address>%s' % (namespace_, self.gds_format_string(quote_xml(ip_address_).encode(ExternalEncoding), input_name='ip-address'), namespace_, eol_))
- def hasContent_(self):
- if (
- self.ip_address
- ):
- return True
- else:
- return False
- def exportLiteral(self, outfile, level, name_='IpAddressesType'):
- level += 1
- self.exportLiteralAttributes(outfile, level, [], name_)
- if self.hasContent_():
- self.exportLiteralChildren(outfile, level, name_)
- def exportLiteralAttributes(self, outfile, level, already_processed, name_):
- pass
- def exportLiteralChildren(self, outfile, level, name_):
- showIndent(outfile, level)
- outfile.write('ip_address=[\n')
- level += 1
- for ip_address_ in self.ip_address:
- showIndent(outfile, level)
- outfile.write('%s,\n' % quote_python(ip_address_).encode(ExternalEncoding))
- level -= 1
- showIndent(outfile, level)
- outfile.write('],\n')
- def exportDict(self, name_='IpAddressesType'):
- obj_json = json.dumps(self, default=lambda o: dict((k, v) for k, v in o.__dict__.iteritems()))
- obj_dict = json.loads(obj_json)
- return {name_: obj_dict}
- def build(self, node):
- self.buildAttributes(node, node.attrib, [])
- for child in node:
- nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
- self.buildChildren(child, node, nodeName_)
- def buildAttributes(self, node, attrs, already_processed):
- pass
- def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
- if nodeName_ == 'ip-address':
- ip_address_ = child_.text
- ip_address_ = self.gds_validate_string(ip_address_, node, 'ip_address')
- self.ip_address.append(ip_address_)
- self.validate_IpAddressType(self.ip_address) # validate type IpAddressType
-# end class IpAddressesType
-
-
-class AllocationPoolType(GeneratedsSuper):
- """
- AllocationPoolType class definition from :doc:`vnc_cfg.xsd`
- """
- subclass = None
- superclass = None
- def __init__(self, start=None, end=None, **kwargs):
- self.start = start
- self.end = end
- def factory(*args_, **kwargs_):
- if AllocationPoolType.subclass:
- return AllocationPoolType.subclass(*args_, **kwargs_)
- else:
- return AllocationPoolType(*args_, **kwargs_)
- factory = staticmethod(factory)
- def get_start(self): return self.start
- def set_start(self, start): self.start = start
- def get_end(self): return self.end
- def set_end(self, end): self.end = end
- def __eq__(self, other):
- if isinstance(other, self.__class__):
- return (self.start == other.start and
- self.end == other.end)
- return NotImplemented
- def __ne__(self, other):
- if isinstance(other, self.__class__):
- return not self.__eq__(other)
- return NotImplemented
-
- @classmethod
- def populate (cls, *a, **kwa):
- obj = cls (*a, **kwa)
- obj.set_start (obj.populate_string ("start"))
- obj.set_end (obj.populate_string ("end"))
- return obj
- def export(self, outfile, level=1, namespace_='', name_='AllocationPoolType', namespacedef_='', pretty_print=True):
- if pretty_print:
- eol_ = '\n'
- else:
- eol_ = ''
- showIndent(outfile, level, pretty_print)
- outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
- already_processed = []
- self.exportAttributes(outfile, level, already_processed, namespace_, name_='AllocationPoolType')
- if self.hasContent_():
- outfile.write('>%s' % (eol_, ))
- self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print)
- showIndent(outfile, level, pretty_print)
- outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
- else:
- outfile.write('/>%s' % (eol_, ))
- def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='AllocationPoolType'):
- pass
- def exportChildren(self, outfile, level, namespace_='', name_='AllocationPoolType', fromsubclass_=False, pretty_print=True):
- if pretty_print:
- eol_ = '\n'
- else:
- eol_ = ''
- if self.start is not None:
- showIndent(outfile, level, pretty_print)
- outfile.write('<%sstart>%s</%sstart>%s' % (namespace_, self.gds_format_string(quote_xml(self.start).encode(ExternalEncoding), input_name='start'), namespace_, eol_))
- if self.end is not None:
- showIndent(outfile, level, pretty_print)
- outfile.write('<%send>%s</%send>%s' % (namespace_, self.gds_format_string(quote_xml(self.end).encode(ExternalEncoding), input_name='end'), namespace_, eol_))
- def hasContent_(self):
- if (
- self.start is not None or
- self.end is not None
- ):
- return True
- else:
- return False
- def exportLiteral(self, outfile, level, name_='AllocationPoolType'):
- level += 1
- self.exportLiteralAttributes(outfile, level, [], name_)
- if self.hasContent_():
- self.exportLiteralChildren(outfile, level, name_)
- def exportLiteralAttributes(self, outfile, level, already_processed, name_):
- pass
- def exportLiteralChildren(self, outfile, level, name_):
- if self.start is not None:
- showIndent(outfile, level)
- outfile.write('start=%s,\n' % quote_python(self.start).encode(ExternalEncoding))
- if self.end is not None:
- showIndent(outfile, level)
- outfile.write('end=%s,\n' % quote_python(self.end).encode(ExternalEncoding))
- def exportDict(self, name_='AllocationPoolType'):
- obj_json = json.dumps(self, default=lambda o: dict((k, v) for k, v in o.__dict__.iteritems()))
- obj_dict = json.loads(obj_json)
- return {name_: obj_dict}
- def build(self, node):
- self.buildAttributes(node, node.attrib, [])
- for child in node:
- nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
- self.buildChildren(child, node, nodeName_)
- def buildAttributes(self, node, attrs, already_processed):
- pass
- def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
- if nodeName_ == 'start':
- start_ = child_.text
- start_ = self.gds_validate_string(start_, node, 'start')
- self.start = start_
- elif nodeName_ == 'end':
- end_ = child_.text
- end_ = self.gds_validate_string(end_, node, 'end')
- self.end = end_
-# end class AllocationPoolType
-
-
-class SubnetType(GeneratedsSuper):
- """
- SubnetType class definition from :doc:`vnc_cfg.xsd`
- """
- subclass = None
- superclass = None
- def __init__(self, ip_prefix=None, ip_prefix_len=None, **kwargs):
- self.ip_prefix = ip_prefix
- self.ip_prefix_len = ip_prefix_len
- def factory(*args_, **kwargs_):
- if SubnetType.subclass:
- return SubnetType.subclass(*args_, **kwargs_)
- else:
- return SubnetType(*args_, **kwargs_)
- factory = staticmethod(factory)
- def get_ip_prefix(self): return self.ip_prefix
- def set_ip_prefix(self, ip_prefix): self.ip_prefix = ip_prefix
- def get_ip_prefix_len(self): return self.ip_prefix_len
- def set_ip_prefix_len(self, ip_prefix_len): self.ip_prefix_len = ip_prefix_len
- def __eq__(self, other):
- if isinstance(other, self.__class__):
- return (self.ip_prefix == other.ip_prefix and
- self.ip_prefix_len == other.ip_prefix_len)
- return NotImplemented
- def __ne__(self, other):
- if isinstance(other, self.__class__):
- return not self.__eq__(other)
- return NotImplemented
-
- @classmethod
- def populate (cls, *a, **kwa):
- obj = cls (*a, **kwa)
- obj.set_ip_prefix (obj.populate_string ("ip_prefix"))
- obj.set_ip_prefix_len (obj.populate_integer ("ip_prefix_len"))
- return obj
- def export(self, outfile, level=1, namespace_='', name_='SubnetType', namespacedef_='', pretty_print=True):
- if pretty_print:
- eol_ = '\n'
- else:
- eol_ = ''
- showIndent(outfile, level, pretty_print)
- outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
- already_processed = []
- self.exportAttributes(outfile, level, already_processed, namespace_, name_='SubnetType')
- if self.hasContent_():
- outfile.write('>%s' % (eol_, ))
- self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print)
- showIndent(outfile, level, pretty_print)
- outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
- else:
- outfile.write('/>%s' % (eol_, ))
- def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='SubnetType'):
- pass
- def exportChildren(self, outfile, level, namespace_='', name_='SubnetType', fromsubclass_=False, pretty_print=True):
- if pretty_print:
- eol_ = '\n'
- else:
- eol_ = ''
- if self.ip_prefix is not None:
- showIndent(outfile, level, pretty_print)
- outfile.write('<%sip-prefix>%s</%sip-prefix>%s' % (namespace_, self.gds_format_string(quote_xml(self.ip_prefix).encode(ExternalEncoding), input_name='ip-prefix'), namespace_, eol_))
- if self.ip_prefix_len is not None:
- showIndent(outfile, level, pretty_print)
- outfile.write('<%sip-prefix-len>%s</%sip-prefix-len>%s' % (namespace_, self.gds_format_integer(self.ip_prefix_len, input_name='ip-prefix-len'), namespace_, eol_))
- def hasContent_(self):
- if (
- self.ip_prefix is not None or
- self.ip_prefix_len is not None
- ):
- return True
- else:
- return False
- def exportLiteral(self, outfile, level, name_='SubnetType'):
- level += 1
- self.exportLiteralAttributes(outfile, level, [], name_)
- if self.hasContent_():
- self.exportLiteralChildren(outfile, level, name_)
- def exportLiteralAttributes(self, outfile, level, already_processed, name_):
- pass
- def exportLiteralChildren(self, outfile, level, name_):
- if self.ip_prefix is not None:
- showIndent(outfile, level)
- outfile.write('ip_prefix=%s,\n' % quote_python(self.ip_prefix).encode(ExternalEncoding))
- if self.ip_prefix_len is not None:
- showIndent(outfile, level)
- outfile.write('ip_prefix_len=%d,\n' % self.ip_prefix_len)
- def exportDict(self, name_='SubnetType'):
- obj_json = json.dumps(self, default=lambda o: dict((k, v) for k, v in o.__dict__.iteritems()))
- obj_dict = json.loads(obj_json)
- return {name_: obj_dict}
- def build(self, node):
- self.buildAttributes(node, node.attrib, [])
- for child in node:
- nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
- self.buildChildren(child, node, nodeName_)
- def buildAttributes(self, node, attrs, already_processed):
- pass
- def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
- if nodeName_ == 'ip-prefix':
- ip_prefix_ = child_.text
- ip_prefix_ = self.gds_validate_string(ip_prefix_, node, 'ip_prefix')
- self.ip_prefix = ip_prefix_
- elif nodeName_ == 'ip-prefix-len':
- sval_ = child_.text
- try:
- ival_ = int(sval_)
- except (TypeError, ValueError), exp:
- raise_parse_error(child_, 'requires integer: %s' % exp)
- ival_ = self.gds_validate_integer(ival_, node, 'ip_prefix_len')
- self.ip_prefix_len = ival_
-# end class SubnetType
-
-
-class AllowedAddressPair(GeneratedsSuper):
- """
- AllowedAddressPair class definition from :doc:`vnc_cfg.xsd`
- """
- subclass = None
- superclass = None
- def __init__(self, ip=None, mac=None, address_mode=None, **kwargs):
- if isinstance(ip, dict):
- obj = SubnetType(**ip)
- self.ip = obj
- else:
- self.ip = ip
- self.mac = mac
- self.address_mode = address_mode
- def factory(*args_, **kwargs_):
- if AllowedAddressPair.subclass:
- return AllowedAddressPair.subclass(*args_, **kwargs_)
- else:
- return AllowedAddressPair(*args_, **kwargs_)
- factory = staticmethod(factory)
- def get_ip(self): return self.ip
- def set_ip(self, ip): self.ip = ip
- def get_mac(self): return self.mac
- def set_mac(self, mac): self.mac = mac
- def get_address_mode(self): return self.address_mode
- def set_address_mode(self, address_mode): self.address_mode = address_mode
- def validate_AddressMode(self, value):
- # Validate type AddressMode, a restriction on xsd:string.
- error = False
- if isinstance(value, list):
- error = set(value) - set([u'active-active', u'active-standby'])
- else:
- error = value not in [u'active-active', u'active-standby']
- if error:
- raise ValueError("AddressMode must be one of [u'active-active', u'active-standby']")
- def __eq__(self, other):
- if isinstance(other, self.__class__):
- return (self.ip == other.ip and
- self.mac == other.mac and
- self.address_mode == other.address_mode)
- return NotImplemented
- def __ne__(self, other):
- if isinstance(other, self.__class__):
- return not self.__eq__(other)
- return NotImplemented
-
- @classmethod
- def populate (cls, *a, **kwa):
- obj = cls (*a, **kwa)
- obj.set_ip (SubnetType.populate ())
- obj.set_mac (obj.populate_string ("mac"))
- obj.set_address_mode (obj.populate_string ("address_mode"))
- return obj
- def export(self, outfile, level=1, namespace_='', name_='AllowedAddressPair', namespacedef_='', pretty_print=True):
- if pretty_print:
- eol_ = '\n'
- else:
- eol_ = ''
- showIndent(outfile, level, pretty_print)
- outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
- already_processed = []
- self.exportAttributes(outfile, level, already_processed, namespace_, name_='AllowedAddressPair')
- if self.hasContent_():
- outfile.write('>%s' % (eol_, ))
- self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print)
- showIndent(outfile, level, pretty_print)
- outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
- else:
- outfile.write('/>%s' % (eol_, ))
- def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='AllowedAddressPair'):
- pass
- def exportChildren(self, outfile, level, namespace_='', name_='AllowedAddressPair', fromsubclass_=False, pretty_print=True):
- if pretty_print:
- eol_ = '\n'
- else:
- eol_ = ''
- if self.ip is not None:
- self.ip.export(outfile, level, namespace_, name_='ip', pretty_print=pretty_print)
- if self.mac is not None:
- showIndent(outfile, level, pretty_print)
- outfile.write('<%smac>%s</%smac>%s' % (namespace_, self.gds_format_string(quote_xml(self.mac).encode(ExternalEncoding), input_name='mac'), namespace_, eol_))
- if self.address_mode is not None:
- showIndent(outfile, level, pretty_print)
- outfile.write('<%saddress-mode>%s</%saddress-mode>%s' % (namespace_, self.gds_format_string(quote_xml(self.address_mode).encode(ExternalEncoding), input_name='address-mode'), namespace_, eol_))
- def hasContent_(self):
- if (
- self.ip is not None or
- self.mac is not None or
- self.address_mode is not None
- ):
- return True
- else:
- return False
- def exportLiteral(self, outfile, level, name_='AllowedAddressPair'):
- level += 1
- self.exportLiteralAttributes(outfile, level, [], name_)
- if self.hasContent_():
- self.exportLiteralChildren(outfile, level, name_)
- def exportLiteralAttributes(self, outfile, level, already_processed, name_):
- pass
- def exportLiteralChildren(self, outfile, level, name_):
- if self.ip is not None:
- showIndent(outfile, level)
- outfile.write('ip=model_.SubnetType(\n')
- self.ip.exportLiteral(outfile, level, name_='ip')
- showIndent(outfile, level)
- outfile.write('),\n')
- if self.mac is not None:
- showIndent(outfile, level)
- outfile.write('mac=%s,\n' % quote_python(self.mac).encode(ExternalEncoding))
- if self.address_mode is not None:
- showIndent(outfile, level)
- outfile.write('address_mode=%s,\n' % quote_python(self.address_mode).encode(ExternalEncoding))
- def exportDict(self, name_='AllowedAddressPair'):
- obj_json = json.dumps(self, default=lambda o: dict((k, v) for k, v in o.__dict__.iteritems()))
- obj_dict = json.loads(obj_json)
- return {name_: obj_dict}
- def build(self, node):
- self.buildAttributes(node, node.attrib, [])
- for child in node:
- nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
- self.buildChildren(child, node, nodeName_)
- def buildAttributes(self, node, attrs, already_processed):
- pass
- def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
- if nodeName_ == 'ip':
- obj_ = SubnetType.factory()
- obj_.build(child_)
- self.set_ip(obj_)
- elif nodeName_ == 'mac':
- mac_ = child_.text
- mac_ = self.gds_validate_string(mac_, node, 'mac')
- self.mac = mac_
- elif nodeName_ == 'address-mode':
- address_mode_ = child_.text
- address_mode_ = self.gds_validate_string(address_mode_, node, 'address_mode')
- self.address_mode = address_mode_
- self.validate_AddressMode(self.address_mode) # validate type AddressMode
-# end class AllowedAddressPair
-
-
-class AllowedAddressPairs(GeneratedsSuper):
- """
- AllowedAddressPairs class definition from :doc:`vnc_cfg.xsd`
- """
- subclass = None
- superclass = None
- def __init__(self, allowed_address_pair=None, **kwargs):
- if (allowed_address_pair is None) or (allowed_address_pair == []):
- self.allowed_address_pair = []
- else:
- if isinstance(allowed_address_pair[0], dict):
- objs = [AllowedAddressPair(**elem) for elem in allowed_address_pair]
- self.allowed_address_pair = objs
- else:
- self.allowed_address_pair = allowed_address_pair
- def factory(*args_, **kwargs_):
- if AllowedAddressPairs.subclass:
- return AllowedAddressPairs.subclass(*args_, **kwargs_)
- else:
- return AllowedAddressPairs(*args_, **kwargs_)
- factory = staticmethod(factory)
- def get_allowed_address_pair(self): return self.allowed_address_pair
- def set_allowed_address_pair(self, allowed_address_pair): self.allowed_address_pair = allowed_address_pair
- def add_allowed_address_pair(self, value): self.allowed_address_pair.append(value)
- def insert_allowed_address_pair(self, index, value): self.allowed_address_pair[index] = value
- def delete_allowed_address_pair(self, value): self.allowed_address_pair.remove(value)
- def __eq__(self, other):
- if isinstance(other, self.__class__):
- return (self.allowed_address_pair == other.allowed_address_pair)
- return NotImplemented
- def __ne__(self, other):
- if isinstance(other, self.__class__):
- return not self.__eq__(other)
- return NotImplemented
-
- @classmethod
- def populate (cls, *a, **kwa):
- obj = cls (*a, **kwa)
- obj.set_allowed_address_pair ([AllowedAddressPair.populate ()])
- return obj
- def export(self, outfile, level=1, namespace_='', name_='AllowedAddressPairs', namespacedef_='', pretty_print=True):
- if pretty_print:
- eol_ = '\n'
- else:
- eol_ = ''
- showIndent(outfile, level, pretty_print)
- outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
- already_processed = []
- self.exportAttributes(outfile, level, already_processed, namespace_, name_='AllowedAddressPairs')
- if self.hasContent_():
- outfile.write('>%s' % (eol_, ))
- self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print)
- showIndent(outfile, level, pretty_print)
- outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
- else:
- outfile.write('/>%s' % (eol_, ))
- def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='AllowedAddressPairs'):
- pass
- def exportChildren(self, outfile, level, namespace_='', name_='AllowedAddressPairs', fromsubclass_=False, pretty_print=True):
- if pretty_print:
- eol_ = '\n'
- else:
- eol_ = ''
- for allowed_address_pair_ in self.allowed_address_pair:
- if isinstance(allowed_address_pair_, dict):
- allowed_address_pair_ = AllowedAddressPair(**allowed_address_pair_)
- allowed_address_pair_.export(outfile, level, namespace_, name_='allowed-address-pair', pretty_print=pretty_print)
- def hasContent_(self):
- if (
- self.allowed_address_pair
- ):
- return True
- else:
- return False
- def exportLiteral(self, outfile, level, name_='AllowedAddressPairs'):
- level += 1
- self.exportLiteralAttributes(outfile, level, [], name_)
- if self.hasContent_():
- self.exportLiteralChildren(outfile, level, name_)
- def exportLiteralAttributes(self, outfile, level, already_processed, name_):
- pass
- def exportLiteralChildren(self, outfile, level, name_):
- showIndent(outfile, level)
- outfile.write('allowed_address_pair=[\n')
- level += 1
- for allowed_address_pair_ in self.allowed_address_pair:
- showIndent(outfile, level)
- outfile.write('model_.AllowedAddressPair(\n')
- allowed_address_pair_.exportLiteral(outfile, level, name_='AllowedAddressPair')
- showIndent(outfile, level)
- outfile.write('),\n')
- level -= 1
- showIndent(outfile, level)
- outfile.write('],\n')
- def exportDict(self, name_='AllowedAddressPairs'):
- obj_json = json.dumps(self, default=lambda o: dict((k, v) for k, v in o.__dict__.iteritems()))
- obj_dict = json.loads(obj_json)
- return {name_: obj_dict}
- def build(self, node):
- self.buildAttributes(node, node.attrib, [])
- for child in node:
- nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
- self.buildChildren(child, node, nodeName_)
- def buildAttributes(self, node, attrs, already_processed):
- pass
- def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
- if nodeName_ == 'allowed-address-pair':
- obj_ = AllowedAddressPair.factory()
- obj_.build(child_)
- self.allowed_address_pair.append(obj_)
-# end class AllowedAddressPairs
-
-
-class UuidType(GeneratedsSuper):
- """
- UuidType class definition from :doc:`vnc_cfg.xsd`
- """
- subclass = None
- superclass = None
- def __init__(self, uuid_mslong=None, uuid_lslong=None, **kwargs):
- self.uuid_mslong = uuid_mslong
- self.uuid_lslong = uuid_lslong
- def factory(*args_, **kwargs_):
- if UuidType.subclass:
- return UuidType.subclass(*args_, **kwargs_)
- else:
- return UuidType(*args_, **kwargs_)
- factory = staticmethod(factory)
- def get_uuid_mslong(self): return self.uuid_mslong
- def set_uuid_mslong(self, uuid_mslong): self.uuid_mslong = uuid_mslong
- def get_uuid_lslong(self): return self.uuid_lslong
- def set_uuid_lslong(self, uuid_lslong): self.uuid_lslong = uuid_lslong
- def __eq__(self, other):
- if isinstance(other, self.__class__):
- return (self.uuid_mslong == other.uuid_mslong and
- self.uuid_lslong == other.uuid_lslong)
- return NotImplemented
- def __ne__(self, other):
- if isinstance(other, self.__class__):
- return not self.__eq__(other)
- return NotImplemented
-
- @classmethod
- def populate (cls, *a, **kwa):
- obj = cls (*a, **kwa)
- obj.set_uuid_mslong (obj.populate_unsignedLong ("uuid_mslong"))
- obj.set_uuid_lslong (obj.populate_unsignedLong ("uuid_lslong"))
- return obj
- def export(self, outfile, level=1, namespace_='', name_='UuidType', namespacedef_='', pretty_print=True):
- if pretty_print:
- eol_ = '\n'
- else:
- eol_ = ''
- showIndent(outfile, level, pretty_print)
- outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
- already_processed = []
- self.exportAttributes(outfile, level, already_processed, namespace_, name_='UuidType')
- if self.hasContent_():
- outfile.write('>%s' % (eol_, ))
- self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print)
- showIndent(outfile, level, pretty_print)
- outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
- else:
- outfile.write('/>%s' % (eol_, ))
- def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='UuidType'):
- pass
- def exportChildren(self, outfile, level, namespace_='', name_='UuidType', fromsubclass_=False, pretty_print=True):
- if pretty_print:
- eol_ = '\n'
- else:
- eol_ = ''
- if self.uuid_mslong is not None:
- showIndent(outfile, level, pretty_print)
- outfile.write('<%suuid-mslong>%s</%suuid-mslong>%s' % (namespace_, self.gds_format_integer(self.uuid_mslong, input_name='uuid-mslong'), namespace_, eol_))
- if self.uuid_lslong is not None:
- showIndent(outfile, level, pretty_print)
- outfile.write('<%suuid-lslong>%s</%suuid-lslong>%s' % (namespace_, self.gds_format_integer(self.uuid_lslong, input_name='uuid-lslong'), namespace_, eol_))
- def hasContent_(self):
- if (
- self.uuid_mslong is not None or
- self.uuid_lslong is not None
- ):
- return True
- else:
- return False
- def exportLiteral(self, outfile, level, name_='UuidType'):
- level += 1
- self.exportLiteralAttributes(outfile, level, [], name_)
- if self.hasContent_():
- self.exportLiteralChildren(outfile, level, name_)
- def exportLiteralAttributes(self, outfile, level, already_processed, name_):
- pass
- def exportLiteralChildren(self, outfile, level, name_):
- if self.uuid_mslong is not None:
- showIndent(outfile, level)
- outfile.write('uuid_mslong=%d,\n' % self.uuid_mslong)
- if self.uuid_lslong is not None:
- showIndent(outfile, level)
- outfile.write('uuid_lslong=%d,\n' % self.uuid_lslong)
- def exportDict(self, name_='UuidType'):
- obj_json = json.dumps(self, default=lambda o: dict((k, v) for k, v in o.__dict__.iteritems()))
- obj_dict = json.loads(obj_json)
- return {name_: obj_dict}
- def build(self, node):
- self.buildAttributes(node, node.attrib, [])
- for child in node:
- nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
- self.buildChildren(child, node, nodeName_)
- def buildAttributes(self, node, attrs, already_processed):
- pass
- def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
- if nodeName_ == 'uuid-mslong':
- sval_ = child_.text
- try:
- ival_ = int(sval_)
- except (TypeError, ValueError), exp:
- raise_parse_error(child_, 'requires integer: %s' % exp)
- ival_ = self.gds_validate_integer(ival_, node, 'uuid_mslong')
- self.uuid_mslong = ival_
- elif nodeName_ == 'uuid-lslong':
- sval_ = child_.text
- try:
- ival_ = int(sval_)
- except (TypeError, ValueError), exp:
- raise_parse_error(child_, 'requires integer: %s' % exp)
- ival_ = self.gds_validate_integer(ival_, node, 'uuid_lslong')
- self.uuid_lslong = ival_
-# end class UuidType
-
-
-class SequenceType(GeneratedsSuper):
- """
- SequenceType class definition from :doc:`vnc_cfg.xsd`
- """
- subclass = None
- superclass = None
- def __init__(self, major=None, minor=None, **kwargs):
- self.major = major
- self.minor = minor
- def factory(*args_, **kwargs_):
- if SequenceType.subclass:
- return SequenceType.subclass(*args_, **kwargs_)
- else:
- return SequenceType(*args_, **kwargs_)
- factory = staticmethod(factory)
- def get_major(self): return self.major
- def set_major(self, major): self.major = major
- def get_minor(self): return self.minor
- def set_minor(self, minor): self.minor = minor
- def __eq__(self, other):
- if isinstance(other, self.__class__):
- return (self.major == other.major and
- self.minor == other.minor)
- return NotImplemented
- def __ne__(self, other):
- if isinstance(other, self.__class__):
- return not self.__eq__(other)
- return NotImplemented
-
- @classmethod
- def populate (cls, *a, **kwa):
- obj = cls (*a, **kwa)
- obj.set_major (obj.populate_integer ("major"))
- obj.set_minor (obj.populate_integer ("minor"))
- return obj
- def export(self, outfile, level=1, namespace_='', name_='SequenceType', namespacedef_='', pretty_print=True):
- if pretty_print:
- eol_ = '\n'
- else:
- eol_ = ''
- showIndent(outfile, level, pretty_print)
- outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
- already_processed = []
- self.exportAttributes(outfile, level, already_processed, namespace_, name_='SequenceType')
- if self.hasContent_():
- outfile.write('>%s' % (eol_, ))
- self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print)
- showIndent(outfile, level, pretty_print)
- outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
- else:
- outfile.write('/>%s' % (eol_, ))
- def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='SequenceType'):
- pass
- def exportChildren(self, outfile, level, namespace_='', name_='SequenceType', fromsubclass_=False, pretty_print=True):
- if pretty_print:
- eol_ = '\n'
- else:
- eol_ = ''
- if self.major is not None:
- showIndent(outfile, level, pretty_print)
- outfile.write('<%smajor>%s</%smajor>%s' % (namespace_, self.gds_format_integer(self.major, input_name='major'), namespace_, eol_))
- if self.minor is not None:
- showIndent(outfile, level, pretty_print)
- outfile.write('<%sminor>%s</%sminor>%s' % (namespace_, self.gds_format_integer(self.minor, input_name='minor'), namespace_, eol_))
- def hasContent_(self):
- if (
- self.major is not None or
- self.minor is not None
- ):
- return True
- else:
- return False
- def exportLiteral(self, outfile, level, name_='SequenceType'):
- level += 1
- self.exportLiteralAttributes(outfile, level, [], name_)
- if self.hasContent_():
- self.exportLiteralChildren(outfile, level, name_)
- def exportLiteralAttributes(self, outfile, level, already_processed, name_):
- pass
- def exportLiteralChildren(self, outfile, level, name_):
- if self.major is not None:
- showIndent(outfile, level)
- outfile.write('major=%d,\n' % self.major)
- if self.minor is not None:
- showIndent(outfile, level)
- outfile.write('minor=%d,\n' % self.minor)
- def exportDict(self, name_='SequenceType'):
- obj_json = json.dumps(self, default=lambda o: dict((k, v) for k, v in o.__dict__.iteritems()))
- obj_dict = json.loads(obj_json)
- return {name_: obj_dict}
- def build(self, node):
- self.buildAttributes(node, node.attrib, [])
- for child in node:
- nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
- self.buildChildren(child, node, nodeName_)
- def buildAttributes(self, node, attrs, already_processed):
- pass
- def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
- if nodeName_ == 'major':
- sval_ = child_.text
- try:
- ival_ = int(sval_)
- except (TypeError, ValueError), exp:
- raise_parse_error(child_, 'requires integer: %s' % exp)
- ival_ = self.gds_validate_integer(ival_, node, 'major')
- self.major = ival_
- elif nodeName_ == 'minor':
- sval_ = child_.text
- try:
- ival_ = int(sval_)
- except (TypeError, ValueError), exp:
- raise_parse_error(child_, 'requires integer: %s' % exp)
- ival_ = self.gds_validate_integer(ival_, node, 'minor')
- self.minor = ival_
-# end class SequenceType
-
-
-class TimerType(GeneratedsSuper):
- """
- TimerType class definition from :doc:`vnc_cfg.xsd`
- """
- subclass = None
- superclass = None
- def __init__(self, start_time=None, on_interval=None, off_interval=None, end_time=None, **kwargs):
- self.start_time = start_time
- self.on_interval = on_interval
- self.off_interval = off_interval
- self.end_time = end_time
- def factory(*args_, **kwargs_):
- if TimerType.subclass:
- return TimerType.subclass(*args_, **kwargs_)
- else:
- return TimerType(*args_, **kwargs_)
- factory = staticmethod(factory)
- def get_start_time(self): return self.start_time
- def set_start_time(self, start_time): self.start_time = start_time
- def get_on_interval(self): return self.on_interval
- def set_on_interval(self, on_interval): self.on_interval = on_interval
- def get_off_interval(self): return self.off_interval
- def set_off_interval(self, off_interval): self.off_interval = off_interval
- def get_end_time(self): return self.end_time
- def set_end_time(self, end_time): self.end_time = end_time
- def __eq__(self, other):
- if isinstance(other, self.__class__):
- return (self.start_time == other.start_time and
- self.on_interval == other.on_interval and
- self.off_interval == other.off_interval and
- self.end_time == other.end_time)
- return NotImplemented
- def __ne__(self, other):
- if isinstance(other, self.__class__):
- return not self.__eq__(other)
- return NotImplemented
-
- @classmethod
- def populate (cls, *a, **kwa):
- obj = cls (*a, **kwa)
- obj.set_start_time (obj.populate_dateTime ("start_time"))
- obj.set_on_interval (obj.populate_time ("on_interval"))
- obj.set_off_interval (obj.populate_time ("off_interval"))
- obj.set_end_time (obj.populate_dateTime ("end_time"))
- return obj
- def export(self, outfile, level=1, namespace_='', name_='TimerType', namespacedef_='', pretty_print=True):
- if pretty_print:
- eol_ = '\n'
- else:
- eol_ = ''
- showIndent(outfile, level, pretty_print)
- outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
- already_processed = []
- self.exportAttributes(outfile, level, already_processed, namespace_, name_='TimerType')
- if self.hasContent_():
- outfile.write('>%s' % (eol_, ))
- self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print)
- showIndent(outfile, level, pretty_print)
- outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
- else:
- outfile.write('/>%s' % (eol_, ))
- def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='TimerType'):
- pass
- def exportChildren(self, outfile, level, namespace_='', name_='TimerType', fromsubclass_=False, pretty_print=True):
- if pretty_print:
- eol_ = '\n'
- else:
- eol_ = ''
- if self.start_time is not None:
- showIndent(outfile, level, pretty_print)
- outfile.write('<%sstart-time>%s</%sstart-time>%s' % (namespace_, self.gds_format_string(quote_xml(self.start_time).encode(ExternalEncoding), input_name='start-time'), namespace_, eol_))
- if self.on_interval is not None:
- showIndent(outfile, level, pretty_print)
- outfile.write('<%son-interval>%s</%son-interval>%s' % (namespace_, self.gds_format_string(quote_xml(self.on_interval).encode(ExternalEncoding), input_name='on-interval'), namespace_, eol_))
- if self.off_interval is not None:
- showIndent(outfile, level, pretty_print)
- outfile.write('<%soff-interval>%s</%soff-interval>%s' % (namespace_, self.gds_format_string(quote_xml(self.off_interval).encode(ExternalEncoding), input_name='off-interval'), namespace_, eol_))
- if self.end_time is not None:
- showIndent(outfile, level, pretty_print)
- outfile.write('<%send-time>%s</%send-time>%s' % (namespace_, self.gds_format_string(quote_xml(self.end_time).encode(ExternalEncoding), input_name='end-time'), namespace_, eol_))
- def hasContent_(self):
- if (
- self.start_time is not None or
- self.on_interval is not None or
- self.off_interval is not None or
- self.end_time is not None
- ):
- return True
- else:
- return False
- def exportLiteral(self, outfile, level, name_='TimerType'):
- level += 1
- self.exportLiteralAttributes(outfile, level, [], name_)
- if self.hasContent_():
- self.exportLiteralChildren(outfile, level, name_)
- def exportLiteralAttributes(self, outfile, level, already_processed, name_):
- pass
- def exportLiteralChildren(self, outfile, level, name_):
- if self.start_time is not None:
- showIndent(outfile, level)
- outfile.write('start_time=%s,\n' % quote_python(self.start_time).encode(ExternalEncoding))
- if self.on_interval is not None:
- showIndent(outfile, level)
- outfile.write('on_interval=%s,\n' % quote_python(self.on_interval).encode(ExternalEncoding))
- if self.off_interval is not None:
- showIndent(outfile, level)
- outfile.write('off_interval=%s,\n' % quote_python(self.off_interval).encode(ExternalEncoding))
- if self.end_time is not None:
- showIndent(outfile, level)
- outfile.write('end_time=%s,\n' % quote_python(self.end_time).encode(ExternalEncoding))
- def exportDict(self, name_='TimerType'):
- obj_json = json.dumps(self, default=lambda o: dict((k, v) for k, v in o.__dict__.iteritems()))
- obj_dict = json.loads(obj_json)
- return {name_: obj_dict}
- def build(self, node):
- self.buildAttributes(node, node.attrib, [])
- for child in node:
- nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
- self.buildChildren(child, node, nodeName_)
- def buildAttributes(self, node, attrs, already_processed):
- pass
- def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
- if nodeName_ == 'start-time':
- start_time_ = child_.text
- start_time_ = self.gds_validate_string(start_time_, node, 'start_time')
- self.start_time = start_time_
- elif nodeName_ == 'on-interval':
- on_interval_ = child_.text
- on_interval_ = self.gds_validate_string(on_interval_, node, 'on_interval')
- self.on_interval = on_interval_
- elif nodeName_ == 'off-interval':
- off_interval_ = child_.text
- off_interval_ = self.gds_validate_string(off_interval_, node, 'off_interval')
- self.off_interval = off_interval_
- elif nodeName_ == 'end-time':
- end_time_ = child_.text
- end_time_ = self.gds_validate_string(end_time_, node, 'end_time')
- self.end_time = end_time_
-# end class TimerType
-
-
-class VirtualNetworkPolicyType(GeneratedsSuper):
- """
- VirtualNetworkPolicyType class definition from :doc:`vnc_cfg.xsd`
- """
- subclass = None
- superclass = None
- def __init__(self, sequence=None, timer=None, **kwargs):
- if isinstance(sequence, dict):
- obj = SequenceType(**sequence)
- self.sequence = obj
- else:
- self.sequence = sequence
- if isinstance(timer, dict):
- obj = TimerType(**timer)
- self.timer = obj
- else:
- self.timer = timer
- def factory(*args_, **kwargs_):
- if VirtualNetworkPolicyType.subclass:
- return VirtualNetworkPolicyType.subclass(*args_, **kwargs_)
- else:
- return VirtualNetworkPolicyType(*args_, **kwargs_)
- factory = staticmethod(factory)
- def get_sequence(self): return self.sequence
- def set_sequence(self, sequence): self.sequence = sequence
- def get_timer(self): return self.timer
- def set_timer(self, timer): self.timer = timer
- def __eq__(self, other):
- if isinstance(other, self.__class__):
- return (self.sequence == other.sequence and
- self.timer == other.timer)
- return NotImplemented
- def __ne__(self, other):
- if isinstance(other, self.__class__):
- return not self.__eq__(other)
- return NotImplemented
-
- @classmethod
- def populate (cls, *a, **kwa):
- obj = cls (*a, **kwa)
- obj.set_sequence (SequenceType.populate ())
- obj.set_timer (TimerType.populate ())
- return obj
- def export(self, outfile, level=1, namespace_='', name_='VirtualNetworkPolicyType', namespacedef_='', pretty_print=True):
- if pretty_print:
- eol_ = '\n'
- else:
- eol_ = ''
- showIndent(outfile, level, pretty_print)
- outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
- already_processed = []
- self.exportAttributes(outfile, level, already_processed, namespace_, name_='VirtualNetworkPolicyType')
- if self.hasContent_():
- outfile.write('>%s' % (eol_, ))
- self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print)
- showIndent(outfile, level, pretty_print)
- outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
- else:
- outfile.write('/>%s' % (eol_, ))
- def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='VirtualNetworkPolicyType'):
- pass
- def exportChildren(self, outfile, level, namespace_='', name_='VirtualNetworkPolicyType', fromsubclass_=False, pretty_print=True):
- if pretty_print:
- eol_ = '\n'
- else:
- eol_ = ''
- if self.sequence is not None:
- self.sequence.export(outfile, level, namespace_, name_='sequence', pretty_print=pretty_print)
- if self.timer is not None:
- self.timer.export(outfile, level, namespace_, name_='timer', pretty_print=pretty_print)
- def hasContent_(self):
- if (
- self.sequence is not None or
- self.timer is not None
- ):
- return True
- else:
- return False
- def exportLiteral(self, outfile, level, name_='VirtualNetworkPolicyType'):
- level += 1
- self.exportLiteralAttributes(outfile, level, [], name_)
- if self.hasContent_():
- self.exportLiteralChildren(outfile, level, name_)
- def exportLiteralAttributes(self, outfile, level, already_processed, name_):
- pass
- def exportLiteralChildren(self, outfile, level, name_):
- if self.sequence is not None:
- showIndent(outfile, level)
- outfile.write('sequence=model_.SequenceType(\n')
- self.sequence.exportLiteral(outfile, level, name_='sequence')
- showIndent(outfile, level)
- outfile.write('),\n')
- if self.timer is not None:
- showIndent(outfile, level)
- outfile.write('timer=model_.TimerType(\n')
- self.timer.exportLiteral(outfile, level, name_='timer')
- showIndent(outfile, level)
- outfile.write('),\n')
- def exportDict(self, name_='VirtualNetworkPolicyType'):
- obj_json = json.dumps(self, default=lambda o: dict((k, v) for k, v in o.__dict__.iteritems()))
- obj_dict = json.loads(obj_json)
- return {name_: obj_dict}
- def build(self, node):
- self.buildAttributes(node, node.attrib, [])
- for child in node:
- nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
- self.buildChildren(child, node, nodeName_)
- def buildAttributes(self, node, attrs, already_processed):
- pass
- def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
- if nodeName_ == 'sequence':
- obj_ = SequenceType.factory()
- obj_.build(child_)
- self.set_sequence(obj_)
- elif nodeName_ == 'timer':
- obj_ = TimerType.factory()
- obj_.build(child_)
- self.set_timer(obj_)
-# end class VirtualNetworkPolicyType
-
-
-class AddressType(GeneratedsSuper):
- """
- AddressType class definition from :doc:`vnc_cfg.xsd`
- """
- subclass = None
- superclass = None
- def __init__(self, subnet=None, virtual_network=None, security_group=None, network_policy=None, **kwargs):
- if isinstance(subnet, dict):
- obj = SubnetType(**subnet)
- self.subnet = obj
- else:
- self.subnet = subnet
- self.virtual_network = virtual_network
- self.security_group = security_group
- self.network_policy = network_policy
- def factory(*args_, **kwargs_):
- if AddressType.subclass:
- return AddressType.subclass(*args_, **kwargs_)
- else:
- return AddressType(*args_, **kwargs_)
- factory = staticmethod(factory)
- def get_subnet(self): return self.subnet
- def set_subnet(self, subnet): self.subnet = subnet
- def get_virtual_network(self): return self.virtual_network
- def set_virtual_network(self, virtual_network): self.virtual_network = virtual_network
- def get_security_group(self): return self.security_group
- def set_security_group(self, security_group): self.security_group = security_group
- def get_network_policy(self): return self.network_policy
- def set_network_policy(self, network_policy): self.network_policy = network_policy
- def __eq__(self, other):
- if isinstance(other, self.__class__):
- return (self.subnet == other.subnet and
- self.virtual_network == other.virtual_network and
- self.security_group == other.security_group and
- self.network_policy == other.network_policy)
- return NotImplemented
- def __ne__(self, other):
- if isinstance(other, self.__class__):
- return not self.__eq__(other)
- return NotImplemented
-
- @classmethod
- def populate (cls, *a, **kwa):
- obj = cls (*a, **kwa)
- obj.set_subnet (SubnetType.populate ())
- obj.set_virtual_network (obj.populate_string ("virtual_network"))
- obj.set_security_group (obj.populate_string ("security_group"))
- obj.set_network_policy (obj.populate_string ("network_policy"))
- return obj
- def export(self, outfile, level=1, namespace_='', name_='AddressType', namespacedef_='', pretty_print=True):
- if pretty_print:
- eol_ = '\n'
- else:
- eol_ = ''
- showIndent(outfile, level, pretty_print)
- outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
- already_processed = []
- self.exportAttributes(outfile, level, already_processed, namespace_, name_='AddressType')
- if self.hasContent_():
- outfile.write('>%s' % (eol_, ))
- self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print)
- showIndent(outfile, level, pretty_print)
- outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
- else:
- outfile.write('/>%s' % (eol_, ))
- def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='AddressType'):
- pass
- def exportChildren(self, outfile, level, namespace_='', name_='AddressType', fromsubclass_=False, pretty_print=True):
- if pretty_print:
- eol_ = '\n'
- else:
- eol_ = ''
- if self.subnet is not None:
- self.subnet.export(outfile, level, namespace_, name_='subnet', pretty_print=pretty_print)
- if self.virtual_network is not None:
- showIndent(outfile, level, pretty_print)
- outfile.write('<%svirtual-network>%s</%svirtual-network>%s' % (namespace_, self.gds_format_string(quote_xml(self.virtual_network).encode(ExternalEncoding), input_name='virtual-network'), namespace_, eol_))
- if self.security_group is not None:
- showIndent(outfile, level, pretty_print)
- outfile.write('<%ssecurity-group>%s</%ssecurity-group>%s' % (namespace_, self.gds_format_string(quote_xml(self.security_group).encode(ExternalEncoding), input_name='security-group'), namespace_, eol_))
- if self.network_policy is not None:
- showIndent(outfile, level, pretty_print)
- outfile.write('<%snetwork-policy>%s</%snetwork-policy>%s' % (namespace_, self.gds_format_string(quote_xml(self.network_policy).encode(ExternalEncoding), input_name='network-policy'), namespace_, eol_))
- def hasContent_(self):
- if (
- self.subnet is not None or
- self.virtual_network is not None or
- self.security_group is not None or
- self.network_policy is not None
- ):
- return True
- else:
- return False
- def exportLiteral(self, outfile, level, name_='AddressType'):
- level += 1
- self.exportLiteralAttributes(outfile, level, [], name_)
- if self.hasContent_():
- self.exportLiteralChildren(outfile, level, name_)
- def exportLiteralAttributes(self, outfile, level, already_processed, name_):
- pass
- def exportLiteralChildren(self, outfile, level, name_):
- if self.subnet is not None:
- showIndent(outfile, level)
- outfile.write('subnet=model_.SubnetType(\n')
- self.subnet.exportLiteral(outfile, level, name_='subnet')
- showIndent(outfile, level)
- outfile.write('),\n')
- if self.virtual_network is not None:
- showIndent(outfile, level)
- outfile.write('virtual_network=%s,\n' % quote_python(self.virtual_network).encode(ExternalEncoding))
- if self.security_group is not None:
- showIndent(outfile, level)
- outfile.write('security_group=%s,\n' % quote_python(self.security_group).encode(ExternalEncoding))
- if self.network_policy is not None:
- showIndent(outfile, level)
- outfile.write('network_policy=%s,\n' % quote_python(self.network_policy).encode(ExternalEncoding))
- def exportDict(self, name_='AddressType'):
- obj_json = json.dumps(self, default=lambda o: dict((k, v) for k, v in o.__dict__.iteritems()))
- obj_dict = json.loads(obj_json)
- return {name_: obj_dict}
- def build(self, node):
- self.buildAttributes(node, node.attrib, [])
- for child in node:
- nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
- self.buildChildren(child, node, nodeName_)
- def buildAttributes(self, node, attrs, already_processed):
- pass
- def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
- if nodeName_ == 'subnet':
- obj_ = SubnetType.factory()
- obj_.build(child_)
- self.set_subnet(obj_)
- elif nodeName_ == 'virtual-network':
- virtual_network_ = child_.text
- virtual_network_ = self.gds_validate_string(virtual_network_, node, 'virtual_network')
- self.virtual_network = virtual_network_
- elif nodeName_ == 'security-group':
- security_group_ = child_.text
- security_group_ = self.gds_validate_string(security_group_, node, 'security_group')
- self.security_group = security_group_
- elif nodeName_ == 'network-policy':
- network_policy_ = child_.text
- network_policy_ = self.gds_validate_string(network_policy_, node, 'network_policy')
- self.network_policy = network_policy_
-# end class AddressType
-
-
-class PortType(GeneratedsSuper):
- """
- PortType class definition from :doc:`vnc_cfg.xsd`
- """
- subclass = None
- superclass = None
- def __init__(self, start_port=None, end_port=None, **kwargs):
- self.start_port = start_port
- self.end_port = end_port
- def factory(*args_, **kwargs_):
- if PortType.subclass:
- return PortType.subclass(*args_, **kwargs_)
- else:
- return PortType(*args_, **kwargs_)
- factory = staticmethod(factory)
- def get_start_port(self): return self.start_port
- def set_start_port(self, start_port): self.start_port = start_port
- def validate_L4PortType(self, value):
- # Validate type L4PortType, a restriction on xsd:integer.
- error = False
- if isinstance(value, list):
- v_int = map(int, value)
- v1, v2 = min(v_int), max(v_int)
- else:
- v1, v2 = int(value), int(value)
- error = (-1 > v1)
- error |= (v2 > 65535)
- if error:
- raise ValueError("L4PortType must be in the range -1-65535")
- def get_end_port(self): return self.end_port
- def set_end_port(self, end_port): self.end_port = end_port
- def __eq__(self, other):
- if isinstance(other, self.__class__):
- return (self.start_port == other.start_port and
- self.end_port == other.end_port)
- return NotImplemented
- def __ne__(self, other):
- if isinstance(other, self.__class__):
- return not self.__eq__(other)
- return NotImplemented
-
- @classmethod
- def populate (cls, *a, **kwa):
- obj = cls (*a, **kwa)
- obj.set_start_port (obj.populate_integer ("start_port"))
- obj.set_end_port (obj.populate_integer ("end_port"))
- return obj
- def export(self, outfile, level=1, namespace_='', name_='PortType', namespacedef_='', pretty_print=True):
- if pretty_print:
- eol_ = '\n'
- else:
- eol_ = ''
- showIndent(outfile, level, pretty_print)
- outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
- already_processed = []
- self.exportAttributes(outfile, level, already_processed, namespace_, name_='PortType')
- if self.hasContent_():
- outfile.write('>%s' % (eol_, ))
- self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print)
- showIndent(outfile, level, pretty_print)
- outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
- else:
- outfile.write('/>%s' % (eol_, ))
- def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='PortType'):
- pass
- def exportChildren(self, outfile, level, namespace_='', name_='PortType', fromsubclass_=False, pretty_print=True):
- if pretty_print:
- eol_ = '\n'
- else:
- eol_ = ''
- if self.start_port is not None:
- showIndent(outfile, level, pretty_print)
- outfile.write('<%sstart-port>%s</%sstart-port>%s' % (namespace_, self.gds_format_integer(self.start_port, input_name='start-port'), namespace_, eol_))
- if self.end_port is not None:
- showIndent(outfile, level, pretty_print)
- outfile.write('<%send-port>%s</%send-port>%s' % (namespace_, self.gds_format_integer(self.end_port, input_name='end-port'), namespace_, eol_))
- def hasContent_(self):
- if (
- self.start_port is not None or
- self.end_port is not None
- ):
- return True
- else:
- return False
- def exportLiteral(self, outfile, level, name_='PortType'):
- level += 1
- self.exportLiteralAttributes(outfile, level, [], name_)
- if self.hasContent_():
- self.exportLiteralChildren(outfile, level, name_)
- def exportLiteralAttributes(self, outfile, level, already_processed, name_):
- pass
- def exportLiteralChildren(self, outfile, level, name_):
- if self.start_port is not None:
- showIndent(outfile, level)
- outfile.write('start_port=%d,\n' % self.start_port)
- if self.end_port is not None:
- showIndent(outfile, level)
- outfile.write('end_port=%d,\n' % self.end_port)
- def exportDict(self, name_='PortType'):
- obj_json = json.dumps(self, default=lambda o: dict((k, v) for k, v in o.__dict__.iteritems()))
- obj_dict = json.loads(obj_json)
- return {name_: obj_dict}
- def build(self, node):
- self.buildAttributes(node, node.attrib, [])
- for child in node:
- nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
- self.buildChildren(child, node, nodeName_)
- def buildAttributes(self, node, attrs, already_processed):
- pass
- def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
- if nodeName_ == 'start-port':
- sval_ = child_.text
- try:
- ival_ = int(sval_)
- except (TypeError, ValueError), exp:
- raise_parse_error(child_, 'requires integer: %s' % exp)
- ival_ = self.gds_validate_integer(ival_, node, 'start_port')
- self.start_port = ival_
- self.validate_L4PortType(self.start_port) # validate type L4PortType
- elif nodeName_ == 'end-port':
- sval_ = child_.text
- try:
- ival_ = int(sval_)
- except (TypeError, ValueError), exp:
- raise_parse_error(child_, 'requires integer: %s' % exp)
- ival_ = self.gds_validate_integer(ival_, node, 'end_port')
- self.end_port = ival_
- self.validate_L4PortType(self.end_port) # validate type L4PortType
-# end class PortType
-
-
-class MatchConditionType(GeneratedsSuper):
- """
- MatchConditionType class definition from :doc:`vnc_cfg.xsd`
- """
- subclass = None
- superclass = None
- def __init__(self, protocol=None, src_address=None, src_port=None, dst_address=None, dst_port=None, ethertype=None, **kwargs):
- self.protocol = protocol
- if isinstance(src_address, dict):
- obj = AddressType(**src_address)
- self.src_address = obj
- else:
- self.src_address = src_address
- if isinstance(src_port, dict):
- obj = PortType(**src_port)
- self.src_port = obj
- else:
- self.src_port = src_port
- if isinstance(dst_address, dict):
- obj = AddressType(**dst_address)
- self.dst_address = obj
- else:
- self.dst_address = dst_address
- if isinstance(dst_port, dict):
- obj = PortType(**dst_port)
- self.dst_port = obj
- else:
- self.dst_port = dst_port
- self.ethertype = ethertype
- def factory(*args_, **kwargs_):
- if MatchConditionType.subclass:
- return MatchConditionType.subclass(*args_, **kwargs_)
- else:
- return MatchConditionType(*args_, **kwargs_)
- factory = staticmethod(factory)
- def get_protocol(self): return self.protocol
- def set_protocol(self, protocol): self.protocol = protocol
- def get_src_address(self): return self.src_address
- def set_src_address(self, src_address): self.src_address = src_address
- def get_src_port(self): return self.src_port
- def set_src_port(self, src_port): self.src_port = src_port
- def get_dst_address(self): return self.dst_address
- def set_dst_address(self, dst_address): self.dst_address = dst_address
- def get_dst_port(self): return self.dst_port
- def set_dst_port(self, dst_port): self.dst_port = dst_port
- def get_ethertype(self): return self.ethertype
- def set_ethertype(self, ethertype): self.ethertype = ethertype
- def validate_EtherType(self, value):
- # Validate type EtherType, a restriction on xsd:string.
- error = False
- if isinstance(value, list):
- error = set(value) - set([u'IPv4', u'IPv6'])
- else:
- error = value not in [u'IPv4', u'IPv6']
- if error:
- raise ValueError("EtherType must be one of [u'IPv4', u'IPv6']")
- def __eq__(self, other):
- if isinstance(other, self.__class__):
- return (self.protocol == other.protocol and
- self.src_address == other.src_address and
- self.src_port == other.src_port and
- self.dst_address == other.dst_address and
- self.dst_port == other.dst_port and
- self.ethertype == other.ethertype)
- return NotImplemented
- def __ne__(self, other):
- if isinstance(other, self.__class__):
- return not self.__eq__(other)
- return NotImplemented
-
- @classmethod
- def populate (cls, *a, **kwa):
- obj = cls (*a, **kwa)
- obj.set_protocol (obj.populate_string ("protocol"))
- obj.set_src_address (AddressType.populate ())
- obj.set_src_port (PortType.populate ())
- obj.set_dst_address (AddressType.populate ())
- obj.set_dst_port (PortType.populate ())
- obj.set_ethertype (obj.populate_string ("ethertype"))
- return obj
- def export(self, outfile, level=1, namespace_='', name_='MatchConditionType', namespacedef_='', pretty_print=True):
- if pretty_print:
- eol_ = '\n'
- else:
- eol_ = ''
- showIndent(outfile, level, pretty_print)
- outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
- already_processed = []
- self.exportAttributes(outfile, level, already_processed, namespace_, name_='MatchConditionType')
- if self.hasContent_():
- outfile.write('>%s' % (eol_, ))
- self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print)
- showIndent(outfile, level, pretty_print)
- outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
- else:
- outfile.write('/>%s' % (eol_, ))
- def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='MatchConditionType'):
- pass
- def exportChildren(self, outfile, level, namespace_='', name_='MatchConditionType', fromsubclass_=False, pretty_print=True):
- if pretty_print:
- eol_ = '\n'
- else:
- eol_ = ''
- if self.protocol is not None:
- showIndent(outfile, level, pretty_print)
- outfile.write('<%sprotocol>%s</%sprotocol>%s' % (namespace_, self.gds_format_string(quote_xml(self.protocol).encode(ExternalEncoding), input_name='protocol'), namespace_, eol_))
- if self.src_address is not None:
- self.src_address.export(outfile, level, namespace_, name_='src-address', pretty_print=pretty_print)
- if self.src_port is not None:
- self.src_port.export(outfile, level, namespace_, name_='src-port', pretty_print=pretty_print)
- if self.dst_address is not None:
- self.dst_address.export(outfile, level, namespace_, name_='dst-address', pretty_print=pretty_print)
- if self.dst_port is not None:
- self.dst_port.export(outfile, level, namespace_, name_='dst-port', pretty_print=pretty_print)
- if self.ethertype is not None:
- showIndent(outfile, level, pretty_print)
- outfile.write('<%sethertype>%s</%sethertype>%s' % (namespace_, self.gds_format_string(quote_xml(self.ethertype).encode(ExternalEncoding), input_name='ethertype'), namespace_, eol_))
- def hasContent_(self):
- if (
- self.protocol is not None or
- self.src_address is not None or
- self.src_port is not None or
- self.dst_address is not None or
- self.dst_port is not None or
- self.ethertype is not None
- ):
- return True
- else:
- return False
- def exportLiteral(self, outfile, level, name_='MatchConditionType'):
- level += 1
- self.exportLiteralAttributes(outfile, level, [], name_)
- if self.hasContent_():
- self.exportLiteralChildren(outfile, level, name_)
- def exportLiteralAttributes(self, outfile, level, already_processed, name_):
- pass
- def exportLiteralChildren(self, outfile, level, name_):
- if self.protocol is not None:
- showIndent(outfile, level)
- outfile.write('protocol=%s,\n' % quote_python(self.protocol).encode(ExternalEncoding))
- if self.src_address is not None:
- showIndent(outfile, level)
- outfile.write('src_address=model_.AddressType(\n')
- self.src_address.exportLiteral(outfile, level, name_='src_address')
- showIndent(outfile, level)
- outfile.write('),\n')
- if self.src_port is not None:
- showIndent(outfile, level)
- outfile.write('src_port=model_.PortType(\n')
- self.src_port.exportLiteral(outfile, level, name_='src_port')
- showIndent(outfile, level)
- outfile.write('),\n')
- if self.dst_address is not None:
- showIndent(outfile, level)
- outfile.write('dst_address=model_.AddressType(\n')
- self.dst_address.exportLiteral(outfile, level, name_='dst_address')
- showIndent(outfile, level)
- outfile.write('),\n')
- if self.dst_port is not None:
- showIndent(outfile, level)
- outfile.write('dst_port=model_.PortType(\n')
- self.dst_port.exportLiteral(outfile, level, name_='dst_port')
- showIndent(outfile, level)
- outfile.write('),\n')
- if self.ethertype is not None:
- showIndent(outfile, level)
- outfile.write('ethertype=%s,\n' % quote_python(self.ethertype).encode(ExternalEncoding))
- def exportDict(self, name_='MatchConditionType'):
- obj_json = json.dumps(self, default=lambda o: dict((k, v) for k, v in o.__dict__.iteritems()))
- obj_dict = json.loads(obj_json)
- return {name_: obj_dict}
- def build(self, node):
- self.buildAttributes(node, node.attrib, [])
- for child in node:
- nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
- self.buildChildren(child, node, nodeName_)
- def buildAttributes(self, node, attrs, already_processed):
- pass
- def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
- if nodeName_ == 'protocol':
- protocol_ = child_.text
- protocol_ = self.gds_validate_string(protocol_, node, 'protocol')
- self.protocol = protocol_
- elif nodeName_ == 'src-address':
- obj_ = AddressType.factory()
- obj_.build(child_)
- self.set_src_address(obj_)
- elif nodeName_ == 'src-port':
- obj_ = PortType.factory()
- obj_.build(child_)
- self.set_src_port(obj_)
- elif nodeName_ == 'dst-address':
- obj_ = AddressType.factory()
- obj_.build(child_)
- self.set_dst_address(obj_)
- elif nodeName_ == 'dst-port':
- obj_ = PortType.factory()
- obj_.build(child_)
- self.set_dst_port(obj_)
- elif nodeName_ == 'ethertype':
- ethertype_ = child_.text
- ethertype_ = self.gds_validate_string(ethertype_, node, 'ethertype')
- self.ethertype = ethertype_
- self.validate_EtherType(self.ethertype) # validate type EtherType
-# end class MatchConditionType
-
-
-class MirrorActionType(GeneratedsSuper):
- """
- MirrorActionType class definition from :doc:`vnc_cfg.xsd`
- """
- subclass = None
- superclass = None
- def __init__(self, analyzer_name=None, encapsulation=None, analyzer_ip_address=None, routing_instance=None, udp_port=None, **kwargs):
- self.analyzer_name = analyzer_name
- self.encapsulation = encapsulation
- self.analyzer_ip_address = analyzer_ip_address
- self.routing_instance = routing_instance
- self.udp_port = udp_port
- def factory(*args_, **kwargs_):
- if MirrorActionType.subclass:
- return MirrorActionType.subclass(*args_, **kwargs_)
- else:
- return MirrorActionType(*args_, **kwargs_)
- factory = staticmethod(factory)
- def get_analyzer_name(self): return self.analyzer_name
- def set_analyzer_name(self, analyzer_name): self.analyzer_name = analyzer_name
- def get_encapsulation(self): return self.encapsulation
- def set_encapsulation(self, encapsulation): self.encapsulation = encapsulation
- def get_analyzer_ip_address(self): return self.analyzer_ip_address
- def set_analyzer_ip_address(self, analyzer_ip_address): self.analyzer_ip_address = analyzer_ip_address
- def validate_IpAddress(self, value):
- # Validate type IpAddress, a restriction on xsd:string.
- pass
- def get_routing_instance(self): return self.routing_instance
- def set_routing_instance(self, routing_instance): self.routing_instance = routing_instance
- def get_udp_port(self): return self.udp_port
- def set_udp_port(self, udp_port): self.udp_port = udp_port
- def __eq__(self, other):
- if isinstance(other, self.__class__):
- return (self.analyzer_name == other.analyzer_name and
- self.encapsulation == other.encapsulation and
- self.analyzer_ip_address == other.analyzer_ip_address and
- self.routing_instance == other.routing_instance and
- self.udp_port == other.udp_port)
- return NotImplemented
- def __ne__(self, other):
- if isinstance(other, self.__class__):
- return not self.__eq__(other)
- return NotImplemented
-
- @classmethod
- def populate (cls, *a, **kwa):
- obj = cls (*a, **kwa)
- obj.set_analyzer_name (obj.populate_string ("analyzer_name"))
- obj.set_encapsulation (obj.populate_string ("encapsulation"))
- obj.set_analyzer_ip_address (obj.populate_string ("analyzer_ip_address"))
- obj.set_routing_instance (obj.populate_string ("routing_instance"))
- obj.set_udp_port (obj.populate_integer ("udp_port"))
- return obj
- def export(self, outfile, level=1, namespace_='', name_='MirrorActionType', namespacedef_='', pretty_print=True):
- if pretty_print:
- eol_ = '\n'
- else:
- eol_ = ''
- showIndent(outfile, level, pretty_print)
- outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
- already_processed = []
- self.exportAttributes(outfile, level, already_processed, namespace_, name_='MirrorActionType')
- if self.hasContent_():
- outfile.write('>%s' % (eol_, ))
- self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print)
- showIndent(outfile, level, pretty_print)
- outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
- else:
- outfile.write('/>%s' % (eol_, ))
- def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='MirrorActionType'):
- pass
- def exportChildren(self, outfile, level, namespace_='', name_='MirrorActionType', fromsubclass_=False, pretty_print=True):
- if pretty_print:
- eol_ = '\n'
- else:
- eol_ = ''
- if self.analyzer_name is not None:
- showIndent(outfile, level, pretty_print)
- outfile.write('<%sanalyzer-name>%s</%sanalyzer-name>%s' % (namespace_, self.gds_format_string(quote_xml(self.analyzer_name).encode(ExternalEncoding), input_name='analyzer-name'), namespace_, eol_))
- if self.encapsulation is not None:
- showIndent(outfile, level, pretty_print)
- outfile.write('<%sencapsulation>%s</%sencapsulation>%s' % (namespace_, self.gds_format_string(quote_xml(self.encapsulation).encode(ExternalEncoding), input_name='encapsulation'), namespace_, eol_))
- if self.analyzer_ip_address is not None:
- showIndent(outfile, level, pretty_print)
- outfile.write('<%sanalyzer-ip-address>%s</%sanalyzer-ip-address>%s' % (namespace_, self.gds_format_string(quote_xml(self.analyzer_ip_address).encode(ExternalEncoding), input_name='analyzer-ip-address'), namespace_, eol_))
- if self.routing_instance is not None:
- showIndent(outfile, level, pretty_print)
- outfile.write('<%srouting-instance>%s</%srouting-instance>%s' % (namespace_, self.gds_format_string(quote_xml(self.routing_instance).encode(ExternalEncoding), input_name='routing-instance'), namespace_, eol_))
- if self.udp_port is not None:
- showIndent(outfile, level, pretty_print)
- outfile.write('<%sudp-port>%s</%sudp-port>%s' % (namespace_, self.gds_format_integer(self.udp_port, input_name='udp-port'), namespace_, eol_))
- def hasContent_(self):
- if (
- self.analyzer_name is not None or
- self.encapsulation is not None or
- self.analyzer_ip_address is not None or
- self.routing_instance is not None or
- self.udp_port is not None
- ):
- return True
- else:
- return False
- def exportLiteral(self, outfile, level, name_='MirrorActionType'):
- level += 1
- self.exportLiteralAttributes(outfile, level, [], name_)
- if self.hasContent_():
- self.exportLiteralChildren(outfile, level, name_)
- def exportLiteralAttributes(self, outfile, level, already_processed, name_):
- pass
- def exportLiteralChildren(self, outfile, level, name_):
- if self.analyzer_name is not None:
- showIndent(outfile, level)
- outfile.write('analyzer_name=%s,\n' % quote_python(self.analyzer_name).encode(ExternalEncoding))
- if self.encapsulation is not None:
- showIndent(outfile, level)
- outfile.write('encapsulation=%s,\n' % quote_python(self.encapsulation).encode(ExternalEncoding))
- if self.analyzer_ip_address is not None:
- showIndent(outfile, level)
- outfile.write('analyzer_ip_address=%s,\n' % quote_python(self.analyzer_ip_address).encode(ExternalEncoding))
- if self.routing_instance is not None:
- showIndent(outfile, level)
- outfile.write('routing_instance=%s,\n' % quote_python(self.routing_instance).encode(ExternalEncoding))
- if self.udp_port is not None:
- showIndent(outfile, level)
- outfile.write('udp_port=%d,\n' % self.udp_port)
- def exportDict(self, name_='MirrorActionType'):
- obj_json = json.dumps(self, default=lambda o: dict((k, v) for k, v in o.__dict__.iteritems()))
- obj_dict = json.loads(obj_json)
- return {name_: obj_dict}
- def build(self, node):
- self.buildAttributes(node, node.attrib, [])
- for child in node:
- nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
- self.buildChildren(child, node, nodeName_)
- def buildAttributes(self, node, attrs, already_processed):
- pass
- def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
- if nodeName_ == 'analyzer-name':
- analyzer_name_ = child_.text
- analyzer_name_ = self.gds_validate_string(analyzer_name_, node, 'analyzer_name')
- self.analyzer_name = analyzer_name_
- elif nodeName_ == 'encapsulation':
- encapsulation_ = child_.text
- encapsulation_ = self.gds_validate_string(encapsulation_, node, 'encapsulation')
- self.encapsulation = encapsulation_
- elif nodeName_ == 'analyzer-ip-address':
- analyzer_ip_address_ = child_.text
- analyzer_ip_address_ = self.gds_validate_string(analyzer_ip_address_, node, 'analyzer_ip_address')
- self.analyzer_ip_address = analyzer_ip_address_
- self.validate_IpAddress(self.analyzer_ip_address) # validate type IpAddress
- elif nodeName_ == 'routing-instance':
- routing_instance_ = child_.text
- routing_instance_ = self.gds_validate_string(routing_instance_, node, 'routing_instance')
- self.routing_instance = routing_instance_
- elif nodeName_ == 'udp-port':
- sval_ = child_.text
- try:
- ival_ = int(sval_)
- except (TypeError, ValueError), exp:
- raise_parse_error(child_, 'requires integer: %s' % exp)
- ival_ = self.gds_validate_integer(ival_, node, 'udp_port')
- self.udp_port = ival_
-# end class MirrorActionType
-
-
-class ActionListType(GeneratedsSuper):
- """
- ActionListType class definition from :doc:`vnc_cfg.xsd`
- """
- subclass = None
- superclass = None
- def __init__(self, simple_action=None, gateway_name=None, apply_service=None, mirror_to=None, assign_routing_instance=None, **kwargs):
- self.simple_action = simple_action
- self.gateway_name = gateway_name
- if (apply_service is None) or (apply_service == []):
- self.apply_service = []
- else:
- self.apply_service = apply_service
- if isinstance(mirror_to, dict):
- obj = MirrorActionType(**mirror_to)
- self.mirror_to = obj
- else:
- self.mirror_to = mirror_to
- self.assign_routing_instance = assign_routing_instance
- def factory(*args_, **kwargs_):
- if ActionListType.subclass:
- return ActionListType.subclass(*args_, **kwargs_)
- else:
- return ActionListType(*args_, **kwargs_)
- factory = staticmethod(factory)
- def get_simple_action(self): return self.simple_action
- def set_simple_action(self, simple_action): self.simple_action = simple_action
- def validate_SimpleActionType(self, value):
- # Validate type SimpleActionType, a restriction on xsd:string.
- error = False
- if isinstance(value, list):
- error = set(value) - set([u'alert', u'drop', u'deny', u'log', u'pass', u'reject'])
- else:
- error = value not in [u'alert', u'drop', u'deny', u'log', u'pass', u'reject']
- if error:
- raise ValueError("SimpleActionType must be one of [u'alert', u'drop', u'deny', u'log', u'pass', u'reject']")
- def get_gateway_name(self): return self.gateway_name
- def set_gateway_name(self, gateway_name): self.gateway_name = gateway_name
- def get_apply_service(self): return self.apply_service
- def set_apply_service(self, apply_service): self.apply_service = apply_service
- def add_apply_service(self, value): self.apply_service.append(value)
- def insert_apply_service(self, index, value): self.apply_service[index] = value
- def delete_apply_service(self, value): self.apply_service.remove(value)
- def get_mirror_to(self): return self.mirror_to
- def set_mirror_to(self, mirror_to): self.mirror_to = mirror_to
- def get_assign_routing_instance(self): return self.assign_routing_instance
- def set_assign_routing_instance(self, assign_routing_instance): self.assign_routing_instance = assign_routing_instance
- def __eq__(self, other):
- if isinstance(other, self.__class__):
- return (self.simple_action == other.simple_action and
- self.gateway_name == other.gateway_name and
- self.apply_service == other.apply_service and
- self.mirror_to == other.mirror_to and
- self.assign_routing_instance == other.assign_routing_instance)
- return NotImplemented
- def __ne__(self, other):
- if isinstance(other, self.__class__):
- return not self.__eq__(other)
- return NotImplemented
-
- @classmethod
- def populate (cls, *a, **kwa):
- obj = cls (*a, **kwa)
- obj.set_simple_action (obj.populate_string ("simple_action"))
- obj.set_gateway_name (obj.populate_string ("gateway_name"))
- obj.set_apply_service ([obj.populate_string ("apply_service")])
- obj.set_mirror_to (MirrorActionType.populate ())
- obj.set_assign_routing_instance (obj.populate_string ("assign_routing_instance"))
- return obj
- def export(self, outfile, level=1, namespace_='', name_='ActionListType', namespacedef_='', pretty_print=True):
- if pretty_print:
- eol_ = '\n'
- else:
- eol_ = ''
- showIndent(outfile, level, pretty_print)
- outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
- already_processed = []
- self.exportAttributes(outfile, level, already_processed, namespace_, name_='ActionListType')
- if self.hasContent_():
- outfile.write('>%s' % (eol_, ))
- self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print)
- showIndent(outfile, level, pretty_print)
- outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
- else:
- outfile.write('/>%s' % (eol_, ))
- def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='ActionListType'):
- pass
- def exportChildren(self, outfile, level, namespace_='', name_='ActionListType', fromsubclass_=False, pretty_print=True):
- if pretty_print:
- eol_ = '\n'
- else:
- eol_ = ''
- if self.simple_action is not None:
- showIndent(outfile, level, pretty_print)
- outfile.write('<%ssimple-action>%s</%ssimple-action>%s' % (namespace_, self.gds_format_string(quote_xml(self.simple_action).encode(ExternalEncoding), input_name='simple-action'), namespace_, eol_))
- if self.gateway_name is not None:
- showIndent(outfile, level, pretty_print)
- outfile.write('<%sgateway-name>%s</%sgateway-name>%s' % (namespace_, self.gds_format_string(quote_xml(self.gateway_name).encode(ExternalEncoding), input_name='gateway-name'), namespace_, eol_))
- for apply_service_ in self.apply_service:
- showIndent(outfile, level, pretty_print)
- outfile.write('<%sapply-service>%s</%sapply-service>%s' % (namespace_, self.gds_format_string(quote_xml(apply_service_).encode(ExternalEncoding), input_name='apply-service'), namespace_, eol_))
- if self.mirror_to is not None:
- self.mirror_to.export(outfile, level, namespace_, name_='mirror-to', pretty_print=pretty_print)
- if self.assign_routing_instance is not None:
- showIndent(outfile, level, pretty_print)
- outfile.write('<%sassign-routing-instance>%s</%sassign-routing-instance>%s' % (namespace_, self.gds_format_string(quote_xml(self.assign_routing_instance).encode(ExternalEncoding), input_name='assign-routing-instance'), namespace_, eol_))
- def hasContent_(self):
- if (
- self.simple_action is not None or
- self.gateway_name is not None or
- self.apply_service or
- self.mirror_to is not None or
- self.assign_routing_instance is not None
- ):
- return True
- else:
- return False
- def exportLiteral(self, outfile, level, name_='ActionListType'):
- level += 1
- self.exportLiteralAttributes(outfile, level, [], name_)
- if self.hasContent_():
- self.exportLiteralChildren(outfile, level, name_)
- def exportLiteralAttributes(self, outfile, level, already_processed, name_):
- pass
- def exportLiteralChildren(self, outfile, level, name_):
- if self.simple_action is not None:
- showIndent(outfile, level)
- outfile.write('simple_action=%s,\n' % quote_python(self.simple_action).encode(ExternalEncoding))
- if self.gateway_name is not None:
- showIndent(outfile, level)
- outfile.write('gateway_name=%s,\n' % quote_python(self.gateway_name).encode(ExternalEncoding))
- showIndent(outfile, level)
- outfile.write('apply_service=[\n')
- level += 1
- for apply_service_ in self.apply_service:
- showIndent(outfile, level)
- outfile.write('%s,\n' % quote_python(apply_service_).encode(ExternalEncoding))
- level -= 1
- showIndent(outfile, level)
- outfile.write('],\n')
- if self.mirror_to is not None:
- showIndent(outfile, level)
- outfile.write('mirror_to=model_.MirrorActionType(\n')
- self.mirror_to.exportLiteral(outfile, level, name_='mirror_to')
- showIndent(outfile, level)
- outfile.write('),\n')
- if self.assign_routing_instance is not None:
- showIndent(outfile, level)
- outfile.write('assign_routing_instance=%s,\n' % quote_python(self.assign_routing_instance).encode(ExternalEncoding))
- def exportDict(self, name_='ActionListType'):
- obj_json = json.dumps(self, default=lambda o: dict((k, v) for k, v in o.__dict__.iteritems()))
- obj_dict = json.loads(obj_json)
- return {name_: obj_dict}
- def build(self, node):
- self.buildAttributes(node, node.attrib, [])
- for child in node:
- nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
- self.buildChildren(child, node, nodeName_)
- def buildAttributes(self, node, attrs, already_processed):
- pass
- def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
- if nodeName_ == 'simple-action':
- simple_action_ = child_.text
- simple_action_ = self.gds_validate_string(simple_action_, node, 'simple_action')
- self.simple_action = simple_action_
- self.validate_SimpleActionType(self.simple_action) # validate type SimpleActionType
- elif nodeName_ == 'gateway-name':
- gateway_name_ = child_.text
- gateway_name_ = self.gds_validate_string(gateway_name_, node, 'gateway_name')
- self.gateway_name = gateway_name_
- elif nodeName_ == 'apply-service':
- apply_service_ = child_.text
- apply_service_ = self.gds_validate_string(apply_service_, node, 'apply_service')
- self.apply_service.append(apply_service_)
- elif nodeName_ == 'mirror-to':
- obj_ = MirrorActionType.factory()
- obj_.build(child_)
- self.set_mirror_to(obj_)
- elif nodeName_ == 'assign-routing-instance':
- assign_routing_instance_ = child_.text
- assign_routing_instance_ = self.gds_validate_string(assign_routing_instance_, node, 'assign_routing_instance')
- self.assign_routing_instance = assign_routing_instance_
-# end class ActionListType
-
-
-class AclRuleType(GeneratedsSuper):
- """
- AclRuleType class definition from :doc:`vnc_cfg.xsd`
- """
- subclass = None
- superclass = None
- def __init__(self, match_condition=None, action_list=None, rule_uuid=None, **kwargs):
- if isinstance(match_condition, dict):
- obj = MatchConditionType(**match_condition)
- self.match_condition = obj
- else:
- self.match_condition = match_condition
- if isinstance(action_list, dict):
- obj = ActionListType(**action_list)
- self.action_list = obj
- else:
- self.action_list = action_list
- self.rule_uuid = rule_uuid
- def factory(*args_, **kwargs_):
- if AclRuleType.subclass:
- return AclRuleType.subclass(*args_, **kwargs_)
- else:
- return AclRuleType(*args_, **kwargs_)
- factory = staticmethod(factory)
- def get_match_condition(self): return self.match_condition
- def set_match_condition(self, match_condition): self.match_condition = match_condition
- def get_action_list(self): return self.action_list
- def set_action_list(self, action_list): self.action_list = action_list
- def get_rule_uuid(self): return self.rule_uuid
- def set_rule_uuid(self, rule_uuid): self.rule_uuid = rule_uuid
- def __eq__(self, other):
- if isinstance(other, self.__class__):
- return (self.match_condition == other.match_condition and
- self.action_list == other.action_list and
- self.rule_uuid == other.rule_uuid)
- return NotImplemented
- def __ne__(self, other):
- if isinstance(other, self.__class__):
- return not self.__eq__(other)
- return NotImplemented
-
- @classmethod
- def populate (cls, *a, **kwa):
- obj = cls (*a, **kwa)
- obj.set_match_condition (MatchConditionType.populate ())
- obj.set_action_list (ActionListType.populate ())
- obj.set_rule_uuid (obj.populate_string ("rule_uuid"))
- return obj
- def export(self, outfile, level=1, namespace_='', name_='AclRuleType', namespacedef_='', pretty_print=True):
- if pretty_print:
- eol_ = '\n'
- else:
- eol_ = ''
- showIndent(outfile, level, pretty_print)
- outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
- already_processed = []
- self.exportAttributes(outfile, level, already_processed, namespace_, name_='AclRuleType')
- if self.hasContent_():
- outfile.write('>%s' % (eol_, ))
- self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print)
- showIndent(outfile, level, pretty_print)
- outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
- else:
- outfile.write('/>%s' % (eol_, ))
- def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='AclRuleType'):
- pass
- def exportChildren(self, outfile, level, namespace_='', name_='AclRuleType', fromsubclass_=False, pretty_print=True):
- if pretty_print:
- eol_ = '\n'
- else:
- eol_ = ''
- if self.match_condition is not None:
- self.match_condition.export(outfile, level, namespace_, name_='match-condition', pretty_print=pretty_print)
- if self.action_list is not None:
- self.action_list.export(outfile, level, namespace_, name_='action-list', pretty_print=pretty_print)
- if self.rule_uuid is not None:
- showIndent(outfile, level, pretty_print)
- outfile.write('<%srule-uuid>%s</%srule-uuid>%s' % (namespace_, self.gds_format_string(quote_xml(self.rule_uuid).encode(ExternalEncoding), input_name='rule-uuid'), namespace_, eol_))
- def hasContent_(self):
- if (
- self.match_condition is not None or
- self.action_list is not None or
- self.rule_uuid is not None
- ):
- return True
- else:
- return False
- def exportLiteral(self, outfile, level, name_='AclRuleType'):
- level += 1
- self.exportLiteralAttributes(outfile, level, [], name_)
- if self.hasContent_():
- self.exportLiteralChildren(outfile, level, name_)
- def exportLiteralAttributes(self, outfile, level, already_processed, name_):
- pass
- def exportLiteralChildren(self, outfile, level, name_):
- if self.match_condition is not None:
- showIndent(outfile, level)
- outfile.write('match_condition=model_.MatchConditionType(\n')
- self.match_condition.exportLiteral(outfile, level, name_='match_condition')
- showIndent(outfile, level)
- outfile.write('),\n')
- if self.action_list is not None:
- showIndent(outfile, level)
- outfile.write('action_list=model_.ActionListType(\n')
- self.action_list.exportLiteral(outfile, level, name_='action_list')
- showIndent(outfile, level)
- outfile.write('),\n')
- if self.rule_uuid is not None:
- showIndent(outfile, level)
- outfile.write('rule_uuid=%s,\n' % quote_python(self.rule_uuid).encode(ExternalEncoding))
- def exportDict(self, name_='AclRuleType'):
- obj_json = json.dumps(self, default=lambda o: dict((k, v) for k, v in o.__dict__.iteritems()))
- obj_dict = json.loads(obj_json)
- return {name_: obj_dict}
- def build(self, node):
- self.buildAttributes(node, node.attrib, [])
- for child in node:
- nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
- self.buildChildren(child, node, nodeName_)
- def buildAttributes(self, node, attrs, already_processed):
- pass
- def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
- if nodeName_ == 'match-condition':
- obj_ = MatchConditionType.factory()
- obj_.build(child_)
- self.set_match_condition(obj_)
- elif nodeName_ == 'action-list':
- obj_ = ActionListType.factory()
- obj_.build(child_)
- self.set_action_list(obj_)
- elif nodeName_ == 'rule-uuid':
- rule_uuid_ = child_.text
- rule_uuid_ = self.gds_validate_string(rule_uuid_, node, 'rule_uuid')
- self.rule_uuid = rule_uuid_
-# end class AclRuleType
-
-
-class AclEntriesType(GeneratedsSuper):
- """
- AclEntriesType class definition from :doc:`vnc_cfg.xsd`
- """
- subclass = None
- superclass = None
- def __init__(self, dynamic=None, acl_rule=None, **kwargs):
- self.dynamic = dynamic
- if (acl_rule is None) or (acl_rule == []):
- self.acl_rule = []
- else:
- if isinstance(acl_rule[0], dict):
- objs = [AclRuleType(**elem) for elem in acl_rule]
- self.acl_rule = objs
- else:
- self.acl_rule = acl_rule
- def factory(*args_, **kwargs_):
- if AclEntriesType.subclass:
- return AclEntriesType.subclass(*args_, **kwargs_)
- else:
- return AclEntriesType(*args_, **kwargs_)
- factory = staticmethod(factory)
- def get_dynamic(self): return self.dynamic
- def set_dynamic(self, dynamic): self.dynamic = dynamic
- def get_acl_rule(self): return self.acl_rule
- def set_acl_rule(self, acl_rule): self.acl_rule = acl_rule
- def add_acl_rule(self, value): self.acl_rule.append(value)
- def insert_acl_rule(self, index, value): self.acl_rule[index] = value
- def delete_acl_rule(self, value): self.acl_rule.remove(value)
- def __eq__(self, other):
- if isinstance(other, self.__class__):
- return (self.dynamic == other.dynamic and
- self.acl_rule == other.acl_rule)
- return NotImplemented
- def __ne__(self, other):
- if isinstance(other, self.__class__):
- return not self.__eq__(other)
- return NotImplemented
-
- @classmethod
- def populate (cls, *a, **kwa):
- obj = cls (*a, **kwa)
- obj.set_dynamic (obj.populate_boolean ("dynamic"))
- obj.set_acl_rule ([AclRuleType.populate ()])
- return obj
- def export(self, outfile, level=1, namespace_='', name_='AclEntriesType', namespacedef_='', pretty_print=True):
- if pretty_print:
- eol_ = '\n'
- else:
- eol_ = ''
- showIndent(outfile, level, pretty_print)
- outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
- already_processed = []
- self.exportAttributes(outfile, level, already_processed, namespace_, name_='AclEntriesType')
- if self.hasContent_():
- outfile.write('>%s' % (eol_, ))
- self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print)
- showIndent(outfile, level, pretty_print)
- outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
- else:
- outfile.write('/>%s' % (eol_, ))
- def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='AclEntriesType'):
- pass
- def exportChildren(self, outfile, level, namespace_='', name_='AclEntriesType', fromsubclass_=False, pretty_print=True):
- if pretty_print:
- eol_ = '\n'
- else:
- eol_ = ''
- if self.dynamic is not None:
- showIndent(outfile, level, pretty_print)
- outfile.write('<%sdynamic>%s</%sdynamic>%s' % (namespace_, self.gds_format_boolean(self.gds_str_lower(str(self.dynamic)), input_name='dynamic'), namespace_, eol_))
- for acl_rule_ in self.acl_rule:
- if isinstance(acl_rule_, dict):
- acl_rule_ = AclRuleType(**acl_rule_)
- acl_rule_.export(outfile, level, namespace_, name_='acl-rule', pretty_print=pretty_print)
- def hasContent_(self):
- if (
- self.dynamic is not None or
- self.acl_rule
- ):
- return True
- else:
- return False
- def exportLiteral(self, outfile, level, name_='AclEntriesType'):
- level += 1
- self.exportLiteralAttributes(outfile, level, [], name_)
- if self.hasContent_():
- self.exportLiteralChildren(outfile, level, name_)
- def exportLiteralAttributes(self, outfile, level, already_processed, name_):
- pass
- def exportLiteralChildren(self, outfile, level, name_):
- if self.dynamic is not None:
- showIndent(outfile, level)
- outfile.write('dynamic=%s,\n' % self.dynamic)
- showIndent(outfile, level)
- outfile.write('acl_rule=[\n')
- level += 1
- for acl_rule_ in self.acl_rule:
- showIndent(outfile, level)
- outfile.write('model_.AclRuleType(\n')
- acl_rule_.exportLiteral(outfile, level, name_='AclRuleType')
- showIndent(outfile, level)
- outfile.write('),\n')
- level -= 1
- showIndent(outfile, level)
- outfile.write('],\n')
- def exportDict(self, name_='AclEntriesType'):
- obj_json = json.dumps(self, default=lambda o: dict((k, v) for k, v in o.__dict__.iteritems()))
- obj_dict = json.loads(obj_json)
- return {name_: obj_dict}
- def build(self, node):
- self.buildAttributes(node, node.attrib, [])
- for child in node:
- nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
- self.buildChildren(child, node, nodeName_)
- def buildAttributes(self, node, attrs, already_processed):
- pass
- def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
- if nodeName_ == 'dynamic':
- sval_ = child_.text
- if sval_ in ('true', '1'):
- ival_ = True
- elif sval_ in ('false', '0'):
- ival_ = False
- else:
- raise_parse_error(child_, 'requires boolean')
- ival_ = self.gds_validate_boolean(ival_, node, 'dynamic')
- self.dynamic = ival_
- elif nodeName_ == 'acl-rule':
- obj_ = AclRuleType.factory()
- obj_.build(child_)
- self.acl_rule.append(obj_)
-# end class AclEntriesType
-
-
-class PolicyRuleType(GeneratedsSuper):
- """
- PolicyRuleType class definition from :doc:`vnc_cfg.xsd`
- """
- subclass = None
- superclass = None
- def __init__(self, rule_sequence=None, rule_uuid=None, direction=None, protocol=None, src_addresses=None, src_ports=None, application=None, dst_addresses=None, dst_ports=None, action_list=None, ethertype=None, **kwargs):
- if isinstance(rule_sequence, dict):
- obj = SequenceType(**rule_sequence)
- self.rule_sequence = obj
- else:
- self.rule_sequence = rule_sequence
- self.rule_uuid = rule_uuid
- self.direction = direction
- self.protocol = protocol
- if (src_addresses is None) or (src_addresses == []):
- self.src_addresses = []
- else:
- if isinstance(src_addresses[0], dict):
- objs = [AddressType(**elem) for elem in src_addresses]
- self.src_addresses = objs
- else:
- self.src_addresses = src_addresses
- if (src_ports is None) or (src_ports == []):
- self.src_ports = []
- else:
- if isinstance(src_ports[0], dict):
- objs = [PortType(**elem) for elem in src_ports]
- self.src_ports = objs
- else:
- self.src_ports = src_ports
- if (application is None) or (application == []):
- self.application = []
- else:
- self.application = application
- if (dst_addresses is None) or (dst_addresses == []):
- self.dst_addresses = []
- else:
- if isinstance(dst_addresses[0], dict):
- objs = [AddressType(**elem) for elem in dst_addresses]
- self.dst_addresses = objs
- else:
- self.dst_addresses = dst_addresses
- if (dst_ports is None) or (dst_ports == []):
- self.dst_ports = []
- else:
- if isinstance(dst_ports[0], dict):
- objs = [PortType(**elem) for elem in dst_ports]
- self.dst_ports = objs
- else:
- self.dst_ports = dst_ports
- if isinstance(action_list, dict):
- obj = ActionListType(**action_list)
- self.action_list = obj
- else:
- self.action_list = action_list
- self.ethertype = ethertype
- def factory(*args_, **kwargs_):
- if PolicyRuleType.subclass:
- return PolicyRuleType.subclass(*args_, **kwargs_)
- else:
- return PolicyRuleType(*args_, **kwargs_)
- factory = staticmethod(factory)
- def get_rule_sequence(self): return self.rule_sequence
- def set_rule_sequence(self, rule_sequence): self.rule_sequence = rule_sequence
- def get_rule_uuid(self): return self.rule_uuid
- def set_rule_uuid(self, rule_uuid): self.rule_uuid = rule_uuid
- def get_direction(self): return self.direction
- def set_direction(self, direction): self.direction = direction
- def validate_DirectionType(self, value):
- # Validate type DirectionType, a restriction on xsd:string.
- error = False
- if isinstance(value, list):
- error = set(value) - set([u'>', u'<>'])
- else:
- error = value not in [u'>', u'<>']
- if error:
- raise ValueError("DirectionType must be one of [u'>', u'<>']")
- def get_protocol(self): return self.protocol
- def set_protocol(self, protocol): self.protocol = protocol
- def validate_ProtocolType(self, value):
- # Validate type ProtocolType, a restriction on xsd:string.
- error = False
- if isinstance(value, list):
- error = set(value) - set([u'tcp', u'udp', u'icmp', u'any', u'1', u'6', u'17'])
- else:
- error = value not in [u'tcp', u'udp', u'icmp', u'any', u'1', u'6', u'17']
- if error:
- raise ValueError("ProtocolType must be one of [u'tcp', u'udp', u'icmp', u'any', u'1', u'6', u'17']")
- def get_src_addresses(self): return self.src_addresses
- def set_src_addresses(self, src_addresses): self.src_addresses = src_addresses
- def add_src_addresses(self, value): self.src_addresses.append(value)
- def insert_src_addresses(self, index, value): self.src_addresses[index] = value
- def delete_src_addresses(self, value): self.src_addresses.remove(value)
- def get_src_ports(self): return self.src_ports
- def set_src_ports(self, src_ports): self.src_ports = src_ports
- def add_src_ports(self, value): self.src_ports.append(value)
- def insert_src_ports(self, index, value): self.src_ports[index] = value
- def delete_src_ports(self, value): self.src_ports.remove(value)
- def get_application(self): return self.application
- def set_application(self, application): self.application = application
- def add_application(self, value): self.application.append(value)
- def insert_application(self, index, value): self.application[index] = value
- def delete_application(self, value): self.application.remove(value)
- def get_dst_addresses(self): return self.dst_addresses
- def set_dst_addresses(self, dst_addresses): self.dst_addresses = dst_addresses
- def add_dst_addresses(self, value): self.dst_addresses.append(value)
- def insert_dst_addresses(self, index, value): self.dst_addresses[index] = value
- def delete_dst_addresses(self, value): self.dst_addresses.remove(value)
- def get_dst_ports(self): return self.dst_ports
- def set_dst_ports(self, dst_ports): self.dst_ports = dst_ports
- def add_dst_ports(self, value): self.dst_ports.append(value)
- def insert_dst_ports(self, index, value): self.dst_ports[index] = value
- def delete_dst_ports(self, value): self.dst_ports.remove(value)
- def get_action_list(self): return self.action_list
- def set_action_list(self, action_list): self.action_list = action_list
- def get_ethertype(self): return self.ethertype
- def set_ethertype(self, ethertype): self.ethertype = ethertype
- def validate_EtherType(self, value):
- # Validate type EtherType, a restriction on xsd:string.
- error = False
- if isinstance(value, list):
- error = set(value) - set([u'IPv4', u'IPv6'])
- else:
- error = value not in [u'IPv4', u'IPv6']
- if error:
- raise ValueError("EtherType must be one of [u'IPv4', u'IPv6']")
- def __eq__(self, other):
- if isinstance(other, self.__class__):
- return (self.rule_sequence == other.rule_sequence and
- self.rule_uuid == other.rule_uuid and
- self.direction == other.direction and
- self.protocol == other.protocol and
- self.src_addresses == other.src_addresses and
- self.src_ports == other.src_ports and
- self.application == other.application and
- self.dst_addresses == other.dst_addresses and
- self.dst_ports == other.dst_ports and
- self.action_list == other.action_list and
- self.ethertype == other.ethertype)
- return NotImplemented
- def __ne__(self, other):
- if isinstance(other, self.__class__):
- return not self.__eq__(other)
- return NotImplemented
-
- @classmethod
- def populate (cls, *a, **kwa):
- obj = cls (*a, **kwa)
- obj.set_rule_sequence (SequenceType.populate ())
- obj.set_rule_uuid (obj.populate_string ("rule_uuid"))
- obj.set_direction (obj.populate_string ("direction"))
- obj.set_protocol (obj.populate_string ("protocol"))
- obj.set_src_addresses ([AddressType.populate ()])
- obj.set_src_ports ([PortType.populate ()])
- obj.set_application ([obj.populate_string ("application")])
- obj.set_dst_addresses ([AddressType.populate ()])
- obj.set_dst_ports ([PortType.populate ()])
- obj.set_action_list (ActionListType.populate ())
- obj.set_ethertype (obj.populate_string ("ethertype"))
- return obj
- def export(self, outfile, level=1, namespace_='', name_='PolicyRuleType', namespacedef_='', pretty_print=True):
- if pretty_print:
- eol_ = '\n'
- else:
- eol_ = ''
- showIndent(outfile, level, pretty_print)
- outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
- already_processed = []
- self.exportAttributes(outfile, level, already_processed, namespace_, name_='PolicyRuleType')
- if self.hasContent_():
- outfile.write('>%s' % (eol_, ))
- self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print)
- showIndent(outfile, level, pretty_print)
- outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
- else:
- outfile.write('/>%s' % (eol_, ))
- def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='PolicyRuleType'):
- pass
- def exportChildren(self, outfile, level, namespace_='', name_='PolicyRuleType', fromsubclass_=False, pretty_print=True):
- if pretty_print:
- eol_ = '\n'
- else:
- eol_ = ''
- if self.rule_sequence is not None:
- self.rule_sequence.export(outfile, level, namespace_, name_='rule-sequence', pretty_print=pretty_print)
- if self.rule_uuid is not None:
- showIndent(outfile, level, pretty_print)
- outfile.write('<%srule-uuid>%s</%srule-uuid>%s' % (namespace_, self.gds_format_string(quote_xml(self.rule_uuid).encode(ExternalEncoding), input_name='rule-uuid'), namespace_, eol_))
- if self.direction is not None:
- showIndent(outfile, level, pretty_print)
- outfile.write('<%sdirection>%s</%sdirection>%s' % (namespace_, self.gds_format_string(quote_xml(self.direction).encode(ExternalEncoding), input_name='direction'), namespace_, eol_))
- if self.protocol is not None:
- showIndent(outfile, level, pretty_print)
- outfile.write('<%sprotocol>%s</%sprotocol>%s' % (namespace_, self.gds_format_string(quote_xml(self.protocol).encode(ExternalEncoding), input_name='protocol'), namespace_, eol_))
- for src_addresses_ in self.src_addresses:
- if isinstance(src_addresses_, dict):
- src_addresses_ = AddressType(**src_addresses_)
- src_addresses_.export(outfile, level, namespace_, name_='src-addresses', pretty_print=pretty_print)
- for src_ports_ in self.src_ports:
- if isinstance(src_ports_, dict):
- src_ports_ = PortType(**src_ports_)
- src_ports_.export(outfile, level, namespace_, name_='src-ports', pretty_print=pretty_print)
- for application_ in self.application:
- showIndent(outfile, level, pretty_print)
- outfile.write('<%sapplication>%s</%sapplication>%s' % (namespace_, self.gds_format_string(quote_xml(application_).encode(ExternalEncoding), input_name='application'), namespace_, eol_))
- for dst_addresses_ in self.dst_addresses:
- if isinstance(dst_addresses_, dict):
- dst_addresses_ = AddressType(**dst_addresses_)
- dst_addresses_.export(outfile, level, namespace_, name_='dst-addresses', pretty_print=pretty_print)
- for dst_ports_ in self.dst_ports:
- if isinstance(dst_ports_, dict):
- dst_ports_ = PortType(**dst_ports_)
- dst_ports_.export(outfile, level, namespace_, name_='dst-ports', pretty_print=pretty_print)
- if self.action_list is not None:
- self.action_list.export(outfile, level, namespace_, name_='action-list', pretty_print=pretty_print)
- if self.ethertype is not None:
- showIndent(outfile, level, pretty_print)
- outfile.write('<%sethertype>%s</%sethertype>%s' % (namespace_, self.gds_format_string(quote_xml(self.ethertype).encode(ExternalEncoding), input_name='ethertype'), namespace_, eol_))
- def hasContent_(self):
- if (
- self.rule_sequence is not None or
- self.rule_uuid is not None or
- self.direction is not None or
- self.protocol is not None or
- self.src_addresses or
- self.src_ports or
- self.application or
- self.dst_addresses or
- self.dst_ports or
- self.action_list is not None or
- self.ethertype is not None
- ):
- return True
- else:
- return False
- def exportLiteral(self, outfile, level, name_='PolicyRuleType'):
- level += 1
- self.exportLiteralAttributes(outfile, level, [], name_)
- if self.hasContent_():
- self.exportLiteralChildren(outfile, level, name_)
- def exportLiteralAttributes(self, outfile, level, already_processed, name_):
- pass
- def exportLiteralChildren(self, outfile, level, name_):
- if self.rule_sequence is not None:
- showIndent(outfile, level)
- outfile.write('rule_sequence=model_.SequenceType(\n')
- self.rule_sequence.exportLiteral(outfile, level, name_='rule_sequence')
- showIndent(outfile, level)
- outfile.write('),\n')
- if self.rule_uuid is not None:
- showIndent(outfile, level)
- outfile.write('rule_uuid=%s,\n' % quote_python(self.rule_uuid).encode(ExternalEncoding))
- if self.direction is not None:
- showIndent(outfile, level)
- outfile.write('direction=%s,\n' % quote_python(self.direction).encode(ExternalEncoding))
- if self.protocol is not None:
- showIndent(outfile, level)
- outfile.write('protocol=%s,\n' % quote_python(self.protocol).encode(ExternalEncoding))
- showIndent(outfile, level)
- outfile.write('src_addresses=[\n')
- level += 1
- for src_addresses_ in self.src_addresses:
- showIndent(outfile, level)
- outfile.write('model_.AddressType(\n')
- src_addresses_.exportLiteral(outfile, level, name_='AddressType')
- showIndent(outfile, level)
- outfile.write('),\n')
- level -= 1
- showIndent(outfile, level)
- outfile.write('],\n')
- showIndent(outfile, level)
- outfile.write('src_ports=[\n')
- level += 1
- for src_ports_ in self.src_ports:
- showIndent(outfile, level)
- outfile.write('model_.PortType(\n')
- src_ports_.exportLiteral(outfile, level, name_='PortType')
- showIndent(outfile, level)
- outfile.write('),\n')
- level -= 1
- showIndent(outfile, level)
- outfile.write('],\n')
- showIndent(outfile, level)
- outfile.write('application=[\n')
- level += 1
- for application_ in self.application:
- showIndent(outfile, level)
- outfile.write('%s,\n' % quote_python(application_).encode(ExternalEncoding))
- level -= 1
- showIndent(outfile, level)
- outfile.write('],\n')
- showIndent(outfile, level)
- outfile.write('dst_addresses=[\n')
- level += 1
- for dst_addresses_ in self.dst_addresses:
- showIndent(outfile, level)
- outfile.write('model_.AddressType(\n')
- dst_addresses_.exportLiteral(outfile, level, name_='AddressType')
- showIndent(outfile, level)
- outfile.write('),\n')
- level -= 1
- showIndent(outfile, level)
- outfile.write('],\n')
- showIndent(outfile, level)
- outfile.write('dst_ports=[\n')
- level += 1
- for dst_ports_ in self.dst_ports:
- showIndent(outfile, level)
- outfile.write('model_.PortType(\n')
- dst_ports_.exportLiteral(outfile, level, name_='PortType')
- showIndent(outfile, level)
- outfile.write('),\n')
- level -= 1
- showIndent(outfile, level)
- outfile.write('],\n')
- if self.action_list is not None:
- showIndent(outfile, level)
- outfile.write('action_list=model_.ActionListType(\n')
- self.action_list.exportLiteral(outfile, level, name_='action_list')
- showIndent(outfile, level)
- outfile.write('),\n')
- if self.ethertype is not None:
- showIndent(outfile, level)
- outfile.write('ethertype=%s,\n' % quote_python(self.ethertype).encode(ExternalEncoding))
- def exportDict(self, name_='PolicyRuleType'):
- obj_json = json.dumps(self, default=lambda o: dict((k, v) for k, v in o.__dict__.iteritems()))
- obj_dict = json.loads(obj_json)
- return {name_: obj_dict}
- def build(self, node):
- self.buildAttributes(node, node.attrib, [])
- for child in node:
- nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
- self.buildChildren(child, node, nodeName_)
- def buildAttributes(self, node, attrs, already_processed):
- pass
- def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
- if nodeName_ == 'rule-sequence':
- obj_ = SequenceType.factory()
- obj_.build(child_)
- self.set_rule_sequence(obj_)
- elif nodeName_ == 'rule-uuid':
- rule_uuid_ = child_.text
- rule_uuid_ = self.gds_validate_string(rule_uuid_, node, 'rule_uuid')
- self.rule_uuid = rule_uuid_
- elif nodeName_ == 'direction':
- direction_ = child_.text
- direction_ = self.gds_validate_string(direction_, node, 'direction')
- self.direction = direction_
- self.validate_DirectionType(self.direction) # validate type DirectionType
- elif nodeName_ == 'protocol':
- protocol_ = child_.text
- protocol_ = self.gds_validate_string(protocol_, node, 'protocol')
- self.protocol = protocol_
- self.validate_ProtocolType(self.protocol) # validate type ProtocolType
- elif nodeName_ == 'src-addresses':
- obj_ = AddressType.factory()
- obj_.build(child_)
- self.src_addresses.append(obj_)
- elif nodeName_ == 'src-ports':
- obj_ = PortType.factory()
- obj_.build(child_)
- self.src_ports.append(obj_)
- elif nodeName_ == 'application':
- application_ = child_.text
- application_ = self.gds_validate_string(application_, node, 'application')
- self.application.append(application_)
- elif nodeName_ == 'dst-addresses':
- obj_ = AddressType.factory()
- obj_.build(child_)
- self.dst_addresses.append(obj_)
- elif nodeName_ == 'dst-ports':
- obj_ = PortType.factory()
- obj_.build(child_)
- self.dst_ports.append(obj_)
- elif nodeName_ == 'action-list':
- obj_ = ActionListType.factory()
- obj_.build(child_)
- self.set_action_list(obj_)
- elif nodeName_ == 'ethertype':
- ethertype_ = child_.text
- ethertype_ = self.gds_validate_string(ethertype_, node, 'ethertype')
- self.ethertype = ethertype_
- self.validate_EtherType(self.ethertype) # validate type EtherType
-# end class PolicyRuleType
-
-
-class PolicyEntriesType(GeneratedsSuper):
- """
- PolicyEntriesType class definition from :doc:`vnc_cfg.xsd`
- """
- subclass = None
- superclass = None
- def __init__(self, policy_rule=None, **kwargs):
- if (policy_rule is None) or (policy_rule == []):
- self.policy_rule = []
- else:
- if isinstance(policy_rule[0], dict):
- objs = [PolicyRuleType(**elem) for elem in policy_rule]
- self.policy_rule = objs
- else:
- self.policy_rule = policy_rule
- def factory(*args_, **kwargs_):
- if PolicyEntriesType.subclass:
- return PolicyEntriesType.subclass(*args_, **kwargs_)
- else:
- return PolicyEntriesType(*args_, **kwargs_)
- factory = staticmethod(factory)
- def get_policy_rule(self): return self.policy_rule
- def set_policy_rule(self, policy_rule): self.policy_rule = policy_rule
- def add_policy_rule(self, value): self.policy_rule.append(value)
- def insert_policy_rule(self, index, value): self.policy_rule[index] = value
- def delete_policy_rule(self, value): self.policy_rule.remove(value)
- def __eq__(self, other):
- if isinstance(other, self.__class__):
- return (self.policy_rule == other.policy_rule)
- return NotImplemented
- def __ne__(self, other):
- if isinstance(other, self.__class__):
- return not self.__eq__(other)
- return NotImplemented
-
- @classmethod
- def populate (cls, *a, **kwa):
- obj = cls (*a, **kwa)
- obj.set_policy_rule ([PolicyRuleType.populate ()])
- return obj
- def export(self, outfile, level=1, namespace_='', name_='PolicyEntriesType', namespacedef_='', pretty_print=True):
- if pretty_print:
- eol_ = '\n'
- else:
- eol_ = ''
- showIndent(outfile, level, pretty_print)
- outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
- already_processed = []
- self.exportAttributes(outfile, level, already_processed, namespace_, name_='PolicyEntriesType')
- if self.hasContent_():
- outfile.write('>%s' % (eol_, ))
- self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print)
- showIndent(outfile, level, pretty_print)
- outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
- else:
- outfile.write('/>%s' % (eol_, ))
- def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='PolicyEntriesType'):
- pass
- def exportChildren(self, outfile, level, namespace_='', name_='PolicyEntriesType', fromsubclass_=False, pretty_print=True):
- if pretty_print:
- eol_ = '\n'
- else:
- eol_ = ''
- for policy_rule_ in self.policy_rule:
- if isinstance(policy_rule_, dict):
- policy_rule_ = PolicyRuleType(**policy_rule_)
- policy_rule_.export(outfile, level, namespace_, name_='policy-rule', pretty_print=pretty_print)
- def hasContent_(self):
- if (
- self.policy_rule
- ):
- return True
- else:
- return False
- def exportLiteral(self, outfile, level, name_='PolicyEntriesType'):
- level += 1
- self.exportLiteralAttributes(outfile, level, [], name_)
- if self.hasContent_():
- self.exportLiteralChildren(outfile, level, name_)
- def exportLiteralAttributes(self, outfile, level, already_processed, name_):
- pass
- def exportLiteralChildren(self, outfile, level, name_):
- showIndent(outfile, level)
- outfile.write('policy_rule=[\n')
- level += 1
- for policy_rule_ in self.policy_rule:
- showIndent(outfile, level)
- outfile.write('model_.PolicyRuleType(\n')
- policy_rule_.exportLiteral(outfile, level, name_='PolicyRuleType')
- showIndent(outfile, level)
- outfile.write('),\n')
- level -= 1
- showIndent(outfile, level)
- outfile.write('],\n')
- def exportDict(self, name_='PolicyEntriesType'):
- obj_json = json.dumps(self, default=lambda o: dict((k, v) for k, v in o.__dict__.iteritems()))
- obj_dict = json.loads(obj_json)
- return {name_: obj_dict}
- def build(self, node):
- self.buildAttributes(node, node.attrib, [])
- for child in node:
- nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
- self.buildChildren(child, node, nodeName_)
- def buildAttributes(self, node, attrs, already_processed):
- pass
- def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
- if nodeName_ == 'policy-rule':
- obj_ = PolicyRuleType.factory()
- obj_.build(child_)
- self.policy_rule.append(obj_)
-# end class PolicyEntriesType
-
-
-class ApiAccessType(GeneratedsSuper):
- """
- ApiAccessType class definition from :doc:`vnc_cfg.xsd`
- """
- subclass = None
- superclass = None
- def __init__(self, api_name=None, permissions=None, **kwargs):
- self.api_name = api_name
- if isinstance(permissions, dict):
- obj = PermType(**permissions)
- self.permissions = obj
- else:
- self.permissions = permissions
- def factory(*args_, **kwargs_):
- if ApiAccessType.subclass:
- return ApiAccessType.subclass(*args_, **kwargs_)
- else:
- return ApiAccessType(*args_, **kwargs_)
- factory = staticmethod(factory)
- def get_api_name(self): return self.api_name
- def set_api_name(self, api_name): self.api_name = api_name
- def get_permissions(self): return self.permissions
- def set_permissions(self, permissions): self.permissions = permissions
- def __eq__(self, other):
- if isinstance(other, self.__class__):
- return (self.api_name == other.api_name and
- self.permissions == other.permissions)
- return NotImplemented
- def __ne__(self, other):
- if isinstance(other, self.__class__):
- return not self.__eq__(other)
- return NotImplemented
-
- @classmethod
- def populate (cls, *a, **kwa):
- obj = cls (*a, **kwa)
- obj.set_api_name (obj.populate_string ("api_name"))
- obj.set_permissions (PermType.populate ())
- return obj
- def export(self, outfile, level=1, namespace_='', name_='ApiAccessType', namespacedef_='', pretty_print=True):
- if pretty_print:
- eol_ = '\n'
- else:
- eol_ = ''
- showIndent(outfile, level, pretty_print)
- outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
- already_processed = []
- self.exportAttributes(outfile, level, already_processed, namespace_, name_='ApiAccessType')
- if self.hasContent_():
- outfile.write('>%s' % (eol_, ))
- self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print)
- showIndent(outfile, level, pretty_print)
- outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
- else:
- outfile.write('/>%s' % (eol_, ))
- def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='ApiAccessType'):
- pass
- def exportChildren(self, outfile, level, namespace_='', name_='ApiAccessType', fromsubclass_=False, pretty_print=True):
- if pretty_print:
- eol_ = '\n'
- else:
- eol_ = ''
- if self.api_name is not None:
- showIndent(outfile, level, pretty_print)
- outfile.write('<%sapi-name>%s</%sapi-name>%s' % (namespace_, self.gds_format_string(quote_xml(self.api_name).encode(ExternalEncoding), input_name='api-name'), namespace_, eol_))
- if self.permissions is not None:
- self.permissions.export(outfile, level, namespace_, name_='permissions', pretty_print=pretty_print)
- def hasContent_(self):
- if (
- self.api_name is not None or
- self.permissions is not None
- ):
- return True
- else:
- return False
- def exportLiteral(self, outfile, level, name_='ApiAccessType'):
- level += 1
- self.exportLiteralAttributes(outfile, level, [], name_)
- if self.hasContent_():
- self.exportLiteralChildren(outfile, level, name_)
- def exportLiteralAttributes(self, outfile, level, already_processed, name_):
- pass
- def exportLiteralChildren(self, outfile, level, name_):
- if self.api_name is not None:
- showIndent(outfile, level)
- outfile.write('api_name=%s,\n' % quote_python(self.api_name).encode(ExternalEncoding))
- if self.permissions is not None:
- showIndent(outfile, level)
- outfile.write('permissions=model_.PermType(\n')
- self.permissions.exportLiteral(outfile, level, name_='permissions')
- showIndent(outfile, level)
- outfile.write('),\n')
- def exportDict(self, name_='ApiAccessType'):
- obj_json = json.dumps(self, default=lambda o: dict((k, v) for k, v in o.__dict__.iteritems()))
- obj_dict = json.loads(obj_json)
- return {name_: obj_dict}
- def build(self, node):
- self.buildAttributes(node, node.attrib, [])
- for child in node:
- nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
- self.buildChildren(child, node, nodeName_)
- def buildAttributes(self, node, attrs, already_processed):
- pass
- def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
- if nodeName_ == 'api-name':
- api_name_ = child_.text
- api_name_ = self.gds_validate_string(api_name_, node, 'api_name')
- self.api_name = api_name_
- elif nodeName_ == 'permissions':
- obj_ = PermType.factory()
- obj_.build(child_)
- self.set_permissions(obj_)
-# end class ApiAccessType
-
-
-class ApiAccessListType(GeneratedsSuper):
- """
- ApiAccessListType class definition from :doc:`vnc_cfg.xsd`
- """
- subclass = None
- superclass = None
- def __init__(self, api_access=None, **kwargs):
- if (api_access is None) or (api_access == []):
- self.api_access = []
- else:
- if isinstance(api_access[0], dict):
- objs = [ApiAccessType(**elem) for elem in api_access]
- self.api_access = objs
- else:
- self.api_access = api_access
- def factory(*args_, **kwargs_):
- if ApiAccessListType.subclass:
- return ApiAccessListType.subclass(*args_, **kwargs_)
- else:
- return ApiAccessListType(*args_, **kwargs_)
- factory = staticmethod(factory)
- def get_api_access(self): return self.api_access
- def set_api_access(self, api_access): self.api_access = api_access
- def add_api_access(self, value): self.api_access.append(value)
- def insert_api_access(self, index, value): self.api_access[index] = value
- def delete_api_access(self, value): self.api_access.remove(value)
- def __eq__(self, other):
- if isinstance(other, self.__class__):
- return (self.api_access == other.api_access)
- return NotImplemented
- def __ne__(self, other):
- if isinstance(other, self.__class__):
- return not self.__eq__(other)
- return NotImplemented
-
- @classmethod
- def populate (cls, *a, **kwa):
- obj = cls (*a, **kwa)
- obj.set_api_access ([ApiAccessType.populate ()])
- return obj
- def export(self, outfile, level=1, namespace_='', name_='ApiAccessListType', namespacedef_='', pretty_print=True):
- if pretty_print:
- eol_ = '\n'
- else:
- eol_ = ''
- showIndent(outfile, level, pretty_print)
- outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
- already_processed = []
- self.exportAttributes(outfile, level, already_processed, namespace_, name_='ApiAccessListType')
- if self.hasContent_():
- outfile.write('>%s' % (eol_, ))
- self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print)
- showIndent(outfile, level, pretty_print)
- outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
- else:
- outfile.write('/>%s' % (eol_, ))
- def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='ApiAccessListType'):
- pass
- def exportChildren(self, outfile, level, namespace_='', name_='ApiAccessListType', fromsubclass_=False, pretty_print=True):
- if pretty_print:
- eol_ = '\n'
- else:
- eol_ = ''
- for api_access_ in self.api_access:
- if isinstance(api_access_, dict):
- api_access_ = ApiAccessType(**api_access_)
- api_access_.export(outfile, level, namespace_, name_='api-access', pretty_print=pretty_print)
- def hasContent_(self):
- if (
- self.api_access
- ):
- return True
- else:
- return False
- def exportLiteral(self, outfile, level, name_='ApiAccessListType'):
- level += 1
- self.exportLiteralAttributes(outfile, level, [], name_)
- if self.hasContent_():
- self.exportLiteralChildren(outfile, level, name_)
- def exportLiteralAttributes(self, outfile, level, already_processed, name_):
- pass
- def exportLiteralChildren(self, outfile, level, name_):
- showIndent(outfile, level)
- outfile.write('api_access=[\n')
- level += 1
- for api_access_ in self.api_access:
- showIndent(outfile, level)
- outfile.write('model_.ApiAccessType(\n')
- api_access_.exportLiteral(outfile, level, name_='ApiAccessType')
- showIndent(outfile, level)
- outfile.write('),\n')
- level -= 1
- showIndent(outfile, level)
- outfile.write('],\n')
- def exportDict(self, name_='ApiAccessListType'):
- obj_json = json.dumps(self, default=lambda o: dict((k, v) for k, v in o.__dict__.iteritems()))
- obj_dict = json.loads(obj_json)
- return {name_: obj_dict}
- def build(self, node):
- self.buildAttributes(node, node.attrib, [])
- for child in node:
- nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
- self.buildChildren(child, node, nodeName_)
- def buildAttributes(self, node, attrs, already_processed):
- pass
- def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
- if nodeName_ == 'api-access':
- obj_ = ApiAccessType.factory()
- obj_.build(child_)
- self.api_access.append(obj_)
-# end class ApiAccessListType
-
-
-class DhcpOptionType(GeneratedsSuper):
- """
- DhcpOptionType class definition from :doc:`vnc_cfg.xsd`
- """
- subclass = None
- superclass = None
- def __init__(self, dhcp_option_name=None, dhcp_option_value=None, **kwargs):
- self.dhcp_option_name = dhcp_option_name
- self.dhcp_option_value = dhcp_option_value
- def factory(*args_, **kwargs_):
- if DhcpOptionType.subclass:
- return DhcpOptionType.subclass(*args_, **kwargs_)
- else:
- return DhcpOptionType(*args_, **kwargs_)
- factory = staticmethod(factory)
- def get_dhcp_option_name(self): return self.dhcp_option_name
- def set_dhcp_option_name(self, dhcp_option_name): self.dhcp_option_name = dhcp_option_name
- def get_dhcp_option_value(self): return self.dhcp_option_value
- def set_dhcp_option_value(self, dhcp_option_value): self.dhcp_option_value = dhcp_option_value
- def __eq__(self, other):
- if isinstance(other, self.__class__):
- return (self.dhcp_option_name == other.dhcp_option_name and
- self.dhcp_option_value == other.dhcp_option_value)
- return NotImplemented
- def __ne__(self, other):
- if isinstance(other, self.__class__):
- return not self.__eq__(other)
- return NotImplemented
-
- @classmethod
- def populate (cls, *a, **kwa):
- obj = cls (*a, **kwa)
- obj.set_dhcp_option_name (obj.populate_string ("dhcp_option_name"))
- obj.set_dhcp_option_value (obj.populate_string ("dhcp_option_value"))
- return obj
- def export(self, outfile, level=1, namespace_='', name_='DhcpOptionType', namespacedef_='', pretty_print=True):
- if pretty_print:
- eol_ = '\n'
- else:
- eol_ = ''
- showIndent(outfile, level, pretty_print)
- outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
- already_processed = []
- self.exportAttributes(outfile, level, already_processed, namespace_, name_='DhcpOptionType')
- if self.hasContent_():
- outfile.write('>%s' % (eol_, ))
- self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print)
- showIndent(outfile, level, pretty_print)
- outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
- else:
- outfile.write('/>%s' % (eol_, ))
- def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='DhcpOptionType'):
- pass
- def exportChildren(self, outfile, level, namespace_='', name_='DhcpOptionType', fromsubclass_=False, pretty_print=True):
- if pretty_print:
- eol_ = '\n'
- else:
- eol_ = ''
- if self.dhcp_option_name is not None:
- showIndent(outfile, level, pretty_print)
- outfile.write('<%sdhcp-option-name>%s</%sdhcp-option-name>%s' % (namespace_, self.gds_format_string(quote_xml(self.dhcp_option_name).encode(ExternalEncoding), input_name='dhcp-option-name'), namespace_, eol_))
- if self.dhcp_option_value is not None:
- showIndent(outfile, level, pretty_print)
- outfile.write('<%sdhcp-option-value>%s</%sdhcp-option-value>%s' % (namespace_, self.gds_format_string(quote_xml(self.dhcp_option_value).encode(ExternalEncoding), input_name='dhcp-option-value'), namespace_, eol_))
- def hasContent_(self):
- if (
- self.dhcp_option_name is not None or
- self.dhcp_option_value is not None
- ):
- return True
- else:
- return False
- def exportLiteral(self, outfile, level, name_='DhcpOptionType'):
- level += 1
- self.exportLiteralAttributes(outfile, level, [], name_)
- if self.hasContent_():
- self.exportLiteralChildren(outfile, level, name_)
- def exportLiteralAttributes(self, outfile, level, already_processed, name_):
- pass
- def exportLiteralChildren(self, outfile, level, name_):
- if self.dhcp_option_name is not None:
- showIndent(outfile, level)
- outfile.write('dhcp_option_name=%s,\n' % quote_python(self.dhcp_option_name).encode(ExternalEncoding))
- if self.dhcp_option_value is not None:
- showIndent(outfile, level)
- outfile.write('dhcp_option_value=%s,\n' % quote_python(self.dhcp_option_value).encode(ExternalEncoding))
- def exportDict(self, name_='DhcpOptionType'):
- obj_json = json.dumps(self, default=lambda o: dict((k, v) for k, v in o.__dict__.iteritems()))
- obj_dict = json.loads(obj_json)
- return {name_: obj_dict}
- def build(self, node):
- self.buildAttributes(node, node.attrib, [])
- for child in node:
- nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
- self.buildChildren(child, node, nodeName_)
- def buildAttributes(self, node, attrs, already_processed):
- pass
- def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
- if nodeName_ == 'dhcp-option-name':
- dhcp_option_name_ = child_.text
- dhcp_option_name_ = self.gds_validate_string(dhcp_option_name_, node, 'dhcp_option_name')
- self.dhcp_option_name = dhcp_option_name_
- elif nodeName_ == 'dhcp-option-value':
- dhcp_option_value_ = child_.text
- dhcp_option_value_ = self.gds_validate_string(dhcp_option_value_, node, 'dhcp_option_value')
- self.dhcp_option_value = dhcp_option_value_
-# end class DhcpOptionType
-
-
-class DhcpOptionsListType(GeneratedsSuper):
- """
- DhcpOptionsListType class definition from :doc:`vnc_cfg.xsd`
- """
- subclass = None
- superclass = None
- def __init__(self, dhcp_option=None, **kwargs):
- if (dhcp_option is None) or (dhcp_option == []):
- self.dhcp_option = []
- else:
- if isinstance(dhcp_option[0], dict):
- objs = [DhcpOptionType(**elem) for elem in dhcp_option]
- self.dhcp_option = objs
- else:
- self.dhcp_option = dhcp_option
- def factory(*args_, **kwargs_):
- if DhcpOptionsListType.subclass:
- return DhcpOptionsListType.subclass(*args_, **kwargs_)
- else:
- return DhcpOptionsListType(*args_, **kwargs_)
- factory = staticmethod(factory)
- def get_dhcp_option(self): return self.dhcp_option
- def set_dhcp_option(self, dhcp_option): self.dhcp_option = dhcp_option
- def add_dhcp_option(self, value): self.dhcp_option.append(value)
- def insert_dhcp_option(self, index, value): self.dhcp_option[index] = value
- def delete_dhcp_option(self, value): self.dhcp_option.remove(value)
- def __eq__(self, other):
- if isinstance(other, self.__class__):
- return (self.dhcp_option == other.dhcp_option)
- return NotImplemented
- def __ne__(self, other):
- if isinstance(other, self.__class__):
- return not self.__eq__(other)
- return NotImplemented
-
- @classmethod
- def populate (cls, *a, **kwa):
- obj = cls (*a, **kwa)
- obj.set_dhcp_option ([DhcpOptionType.populate ()])
- return obj
- def export(self, outfile, level=1, namespace_='', name_='DhcpOptionsListType', namespacedef_='', pretty_print=True):
- if pretty_print:
- eol_ = '\n'
- else:
- eol_ = ''
- showIndent(outfile, level, pretty_print)
- outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
- already_processed = []
- self.exportAttributes(outfile, level, already_processed, namespace_, name_='DhcpOptionsListType')
- if self.hasContent_():
- outfile.write('>%s' % (eol_, ))
- self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print)
- showIndent(outfile, level, pretty_print)
- outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
- else:
- outfile.write('/>%s' % (eol_, ))
- def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='DhcpOptionsListType'):
- pass
- def exportChildren(self, outfile, level, namespace_='', name_='DhcpOptionsListType', fromsubclass_=False, pretty_print=True):
- if pretty_print:
- eol_ = '\n'
- else:
- eol_ = ''
- for dhcp_option_ in self.dhcp_option:
- if isinstance(dhcp_option_, dict):
- dhcp_option_ = DhcpOptionType(**dhcp_option_)
- dhcp_option_.export(outfile, level, namespace_, name_='dhcp-option', pretty_print=pretty_print)
- def hasContent_(self):
- if (
- self.dhcp_option
- ):
- return True
- else:
- return False
- def exportLiteral(self, outfile, level, name_='DhcpOptionsListType'):
- level += 1
- self.exportLiteralAttributes(outfile, level, [], name_)
- if self.hasContent_():
- self.exportLiteralChildren(outfile, level, name_)
- def exportLiteralAttributes(self, outfile, level, already_processed, name_):
- pass
- def exportLiteralChildren(self, outfile, level, name_):
- showIndent(outfile, level)
- outfile.write('dhcp_option=[\n')
- level += 1
- for dhcp_option_ in self.dhcp_option:
- showIndent(outfile, level)
- outfile.write('model_.DhcpOptionType(\n')
- dhcp_option_.exportLiteral(outfile, level, name_='DhcpOptionType')
- showIndent(outfile, level)
- outfile.write('),\n')
- level -= 1
- showIndent(outfile, level)
- outfile.write('],\n')
- def exportDict(self, name_='DhcpOptionsListType'):
- obj_json = json.dumps(self, default=lambda o: dict((k, v) for k, v in o.__dict__.iteritems()))
- obj_dict = json.loads(obj_json)
- return {name_: obj_dict}
- def build(self, node):
- self.buildAttributes(node, node.attrib, [])
- for child in node:
- nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
- self.buildChildren(child, node, nodeName_)
- def buildAttributes(self, node, attrs, already_processed):
- pass
- def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
- if nodeName_ == 'dhcp-option':
- obj_ = DhcpOptionType.factory()
- obj_.build(child_)
- self.dhcp_option.append(obj_)
-# end class DhcpOptionsListType
-
-
-class IpamDnsAddressType(GeneratedsSuper):
- """
- IpamDnsAddressType class definition from :doc:`vnc_cfg.xsd`
- """
- subclass = None
- superclass = None
- def __init__(self, tenant_dns_server_address=None, virtual_dns_server_name=None, **kwargs):
- if isinstance(tenant_dns_server_address, dict):
- obj = IpAddressesType(**tenant_dns_server_address)
- self.tenant_dns_server_address = obj
- else:
- self.tenant_dns_server_address = tenant_dns_server_address
- self.virtual_dns_server_name = virtual_dns_server_name
- def factory(*args_, **kwargs_):
- if IpamDnsAddressType.subclass:
- return IpamDnsAddressType.subclass(*args_, **kwargs_)
- else:
- return IpamDnsAddressType(*args_, **kwargs_)
- factory = staticmethod(factory)
- def get_tenant_dns_server_address(self): return self.tenant_dns_server_address
- def set_tenant_dns_server_address(self, tenant_dns_server_address): self.tenant_dns_server_address = tenant_dns_server_address
- def get_virtual_dns_server_name(self): return self.virtual_dns_server_name
- def set_virtual_dns_server_name(self, virtual_dns_server_name): self.virtual_dns_server_name = virtual_dns_server_name
- def __eq__(self, other):
- if isinstance(other, self.__class__):
- return (self.tenant_dns_server_address == other.tenant_dns_server_address and
- self.virtual_dns_server_name == other.virtual_dns_server_name)
- return NotImplemented
- def __ne__(self, other):
- if isinstance(other, self.__class__):
- return not self.__eq__(other)
- return NotImplemented
-
- @classmethod
- def populate (cls, *a, **kwa):
- obj = cls (*a, **kwa)
- obj.set_tenant_dns_server_address (IpAddressesType.populate ())
- obj.set_virtual_dns_server_name (obj.populate_string ("virtual_dns_server_name"))
- return obj
- def export(self, outfile, level=1, namespace_='', name_='IpamDnsAddressType', namespacedef_='', pretty_print=True):
- if pretty_print:
- eol_ = '\n'
- else:
- eol_ = ''
- showIndent(outfile, level, pretty_print)
- outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
- already_processed = []
- self.exportAttributes(outfile, level, already_processed, namespace_, name_='IpamDnsAddressType')
- if self.hasContent_():
- outfile.write('>%s' % (eol_, ))
- self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print)
- showIndent(outfile, level, pretty_print)
- outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
- else:
- outfile.write('/>%s' % (eol_, ))
- def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='IpamDnsAddressType'):
- pass
- def exportChildren(self, outfile, level, namespace_='', name_='IpamDnsAddressType', fromsubclass_=False, pretty_print=True):
- if pretty_print:
- eol_ = '\n'
- else:
- eol_ = ''
- if self.tenant_dns_server_address is not None:
- self.tenant_dns_server_address.export(outfile, level, namespace_, name_='tenant-dns-server-address', pretty_print=pretty_print)
- if self.virtual_dns_server_name is not None:
- showIndent(outfile, level, pretty_print)
- outfile.write('<%svirtual-dns-server-name>%s</%svirtual-dns-server-name>%s' % (namespace_, self.gds_format_string(quote_xml(self.virtual_dns_server_name).encode(ExternalEncoding), input_name='virtual-dns-server-name'), namespace_, eol_))
- def hasContent_(self):
- if (
- self.tenant_dns_server_address is not None or
- self.virtual_dns_server_name is not None
- ):
- return True
- else:
- return False
- def exportLiteral(self, outfile, level, name_='IpamDnsAddressType'):
- level += 1
- self.exportLiteralAttributes(outfile, level, [], name_)
- if self.hasContent_():
- self.exportLiteralChildren(outfile, level, name_)
- def exportLiteralAttributes(self, outfile, level, already_processed, name_):
- pass
- def exportLiteralChildren(self, outfile, level, name_):
- if self.tenant_dns_server_address is not None:
- showIndent(outfile, level)
- outfile.write('tenant_dns_server_address=model_.IpAddressesType(\n')
- self.tenant_dns_server_address.exportLiteral(outfile, level, name_='tenant_dns_server_address')
- showIndent(outfile, level)
- outfile.write('),\n')
- if self.virtual_dns_server_name is not None:
- showIndent(outfile, level)
- outfile.write('virtual_dns_server_name=%s,\n' % quote_python(self.virtual_dns_server_name).encode(ExternalEncoding))
- def exportDict(self, name_='IpamDnsAddressType'):
- obj_json = json.dumps(self, default=lambda o: dict((k, v) for k, v in o.__dict__.iteritems()))
- obj_dict = json.loads(obj_json)
- return {name_: obj_dict}
- def build(self, node):
- self.buildAttributes(node, node.attrib, [])
- for child in node:
- nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
- self.buildChildren(child, node, nodeName_)
- def buildAttributes(self, node, attrs, already_processed):
- pass
- def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
- if nodeName_ == 'tenant-dns-server-address':
- obj_ = IpAddressesType.factory()
- obj_.build(child_)
- self.set_tenant_dns_server_address(obj_)
- elif nodeName_ == 'virtual-dns-server-name':
- virtual_dns_server_name_ = child_.text
- virtual_dns_server_name_ = self.gds_validate_string(virtual_dns_server_name_, node, 'virtual_dns_server_name')
- self.virtual_dns_server_name = virtual_dns_server_name_
-# end class IpamDnsAddressType
-
-
-class IpamType(GeneratedsSuper):
- """
- IpamType class definition from :doc:`vnc_cfg.xsd`
- """
- subclass = None
- superclass = None
- def __init__(self, ipam_method=None, ipam_dns_method=None, ipam_dns_server=None, dhcp_option_list=None, cidr_block=None, host_routes=None, **kwargs):
- self.ipam_method = ipam_method
- self.ipam_dns_method = ipam_dns_method
- if isinstance(ipam_dns_server, dict):
- obj = IpamDnsAddressType(**ipam_dns_server)
- self.ipam_dns_server = obj
- else:
- self.ipam_dns_server = ipam_dns_server
- if isinstance(dhcp_option_list, dict):
- obj = DhcpOptionsListType(**dhcp_option_list)
- self.dhcp_option_list = obj
- else:
- self.dhcp_option_list = dhcp_option_list
- if isinstance(cidr_block, dict):
- obj = SubnetType(**cidr_block)
- self.cidr_block = obj
- else:
- self.cidr_block = cidr_block
- if isinstance(host_routes, dict):
- obj = RouteTableType(**host_routes)
- self.host_routes = obj
- else:
- self.host_routes = host_routes
- def factory(*args_, **kwargs_):
- if IpamType.subclass:
- return IpamType.subclass(*args_, **kwargs_)
- else:
- return IpamType(*args_, **kwargs_)
- factory = staticmethod(factory)
- def get_ipam_method(self): return self.ipam_method
- def set_ipam_method(self, ipam_method): self.ipam_method = ipam_method
- def validate_IpamMethodType(self, value):
- # Validate type IpamMethodType, a restriction on xsd:string.
- error = False
- if isinstance(value, list):
- error = set(value) - set([u'dhcp', u'fixed'])
- else:
- error = value not in [u'dhcp', u'fixed']
- if error:
- raise ValueError("IpamMethodType must be one of [u'dhcp', u'fixed']")
- def get_ipam_dns_method(self): return self.ipam_dns_method
- def set_ipam_dns_method(self, ipam_dns_method): self.ipam_dns_method = ipam_dns_method
- def validate_IpamDnsMethodType(self, value):
- # Validate type IpamDnsMethodType, a restriction on xsd:string.
- error = False
- if isinstance(value, list):
- error = set(value) - set([u'none', u'default-dns-server', u'tenant-dns-server', u'virtual-dns-server'])
- else:
- error = value not in [u'none', u'default-dns-server', u'tenant-dns-server', u'virtual-dns-server']
- if error:
- raise ValueError("IpamDnsMethodType must be one of [u'none', u'default-dns-server', u'tenant-dns-server', u'virtual-dns-server']")
- def get_ipam_dns_server(self): return self.ipam_dns_server
- def set_ipam_dns_server(self, ipam_dns_server): self.ipam_dns_server = ipam_dns_server
- def get_dhcp_option_list(self): return self.dhcp_option_list
- def set_dhcp_option_list(self, dhcp_option_list): self.dhcp_option_list = dhcp_option_list
- def get_cidr_block(self): return self.cidr_block
- def set_cidr_block(self, cidr_block): self.cidr_block = cidr_block
- def get_host_routes(self): return self.host_routes
- def set_host_routes(self, host_routes): self.host_routes = host_routes
- def __eq__(self, other):
- if isinstance(other, self.__class__):
- return (self.ipam_method == other.ipam_method and
- self.ipam_dns_method == other.ipam_dns_method and
- self.ipam_dns_server == other.ipam_dns_server and
- self.dhcp_option_list == other.dhcp_option_list and
- self.cidr_block == other.cidr_block and
- self.host_routes == other.host_routes)
- return NotImplemented
- def __ne__(self, other):
- if isinstance(other, self.__class__):
- return not self.__eq__(other)
- return NotImplemented
-
- @classmethod
- def populate (cls, *a, **kwa):
- obj = cls (*a, **kwa)
- obj.set_ipam_method (obj.populate_string ("ipam_method"))
- obj.set_ipam_dns_method (obj.populate_string ("ipam_dns_method"))
- obj.set_ipam_dns_server (IpamDnsAddressType.populate ())
- obj.set_dhcp_option_list (DhcpOptionsListType.populate ())
- obj.set_cidr_block (SubnetType.populate ())
- obj.set_host_routes (RouteTableType.populate ())
- return obj
- def export(self, outfile, level=1, namespace_='', name_='IpamType', namespacedef_='', pretty_print=True):
- if pretty_print:
- eol_ = '\n'
- else:
- eol_ = ''
- showIndent(outfile, level, pretty_print)
- outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
- already_processed = []
- self.exportAttributes(outfile, level, already_processed, namespace_, name_='IpamType')
- if self.hasContent_():
- outfile.write('>%s' % (eol_, ))
- self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print)
- showIndent(outfile, level, pretty_print)
- outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
- else:
- outfile.write('/>%s' % (eol_, ))
- def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='IpamType'):
- pass
- def exportChildren(self, outfile, level, namespace_='', name_='IpamType', fromsubclass_=False, pretty_print=True):
- if pretty_print:
- eol_ = '\n'
- else:
- eol_ = ''
- if self.ipam_method is not None:
- showIndent(outfile, level, pretty_print)
- outfile.write('<%sipam-method>%s</%sipam-method>%s' % (namespace_, self.gds_format_string(quote_xml(self.ipam_method).encode(ExternalEncoding), input_name='ipam-method'), namespace_, eol_))
- if self.ipam_dns_method is not None:
- showIndent(outfile, level, pretty_print)
- outfile.write('<%sipam-dns-method>%s</%sipam-dns-method>%s' % (namespace_, self.gds_format_string(quote_xml(self.ipam_dns_method).encode(ExternalEncoding), input_name='ipam-dns-method'), namespace_, eol_))
- if self.ipam_dns_server is not None:
- self.ipam_dns_server.export(outfile, level, namespace_, name_='ipam-dns-server', pretty_print=pretty_print)
- if self.dhcp_option_list is not None:
- self.dhcp_option_list.export(outfile, level, namespace_, name_='dhcp-option-list', pretty_print=pretty_print)
- if self.cidr_block is not None:
- self.cidr_block.export(outfile, level, namespace_, name_='cidr-block', pretty_print=pretty_print)
- if self.host_routes is not None:
- self.host_routes.export(outfile, level, namespace_, name_='host-routes', pretty_print=pretty_print)
- def hasContent_(self):
- if (
- self.ipam_method is not None or
- self.ipam_dns_method is not None or
- self.ipam_dns_server is not None or
- self.dhcp_option_list is not None or
- self.cidr_block is not None or
- self.host_routes is not None
- ):
- return True
- else:
- return False
- def exportLiteral(self, outfile, level, name_='IpamType'):
- level += 1
- self.exportLiteralAttributes(outfile, level, [], name_)
- if self.hasContent_():
- self.exportLiteralChildren(outfile, level, name_)
- def exportLiteralAttributes(self, outfile, level, already_processed, name_):
- pass
- def exportLiteralChildren(self, outfile, level, name_):
- if self.ipam_method is not None:
- showIndent(outfile, level)
- outfile.write('ipam_method=%s,\n' % quote_python(self.ipam_method).encode(ExternalEncoding))
- if self.ipam_dns_method is not None:
- showIndent(outfile, level)
- outfile.write('ipam_dns_method=%s,\n' % quote_python(self.ipam_dns_method).encode(ExternalEncoding))
- if self.ipam_dns_server is not None:
- showIndent(outfile, level)
- outfile.write('ipam_dns_server=model_.IpamDnsAddressType(\n')
- self.ipam_dns_server.exportLiteral(outfile, level, name_='ipam_dns_server')
- showIndent(outfile, level)
- outfile.write('),\n')
- if self.dhcp_option_list is not None:
- showIndent(outfile, level)
- outfile.write('dhcp_option_list=model_.DhcpOptionsListType(\n')
- self.dhcp_option_list.exportLiteral(outfile, level, name_='dhcp_option_list')
- showIndent(outfile, level)
- outfile.write('),\n')
- if self.cidr_block is not None:
- showIndent(outfile, level)
- outfile.write('cidr_block=model_.SubnetType(\n')
- self.cidr_block.exportLiteral(outfile, level, name_='cidr_block')
- showIndent(outfile, level)
- outfile.write('),\n')
- if self.host_routes is not None:
- showIndent(outfile, level)
- outfile.write('host_routes=model_.RouteTableType(\n')
- self.host_routes.exportLiteral(outfile, level, name_='host_routes')
- showIndent(outfile, level)
- outfile.write('),\n')
- def exportDict(self, name_='IpamType'):
- obj_json = json.dumps(self, default=lambda o: dict((k, v) for k, v in o.__dict__.iteritems()))
- obj_dict = json.loads(obj_json)
- return {name_: obj_dict}
- def build(self, node):
- self.buildAttributes(node, node.attrib, [])
- for child in node:
- nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
- self.buildChildren(child, node, nodeName_)
- def buildAttributes(self, node, attrs, already_processed):
- pass
- def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
- if nodeName_ == 'ipam-method':
- ipam_method_ = child_.text
- ipam_method_ = self.gds_validate_string(ipam_method_, node, 'ipam_method')
- self.ipam_method = ipam_method_
- self.validate_IpamMethodType(self.ipam_method) # validate type IpamMethodType
- elif nodeName_ == 'ipam-dns-method':
- ipam_dns_method_ = child_.text
- ipam_dns_method_ = self.gds_validate_string(ipam_dns_method_, node, 'ipam_dns_method')
- self.ipam_dns_method = ipam_dns_method_
- self.validate_IpamDnsMethodType(self.ipam_dns_method) # validate type IpamDnsMethodType
- elif nodeName_ == 'ipam-dns-server':
- obj_ = IpamDnsAddressType.factory()
- obj_.build(child_)
- self.set_ipam_dns_server(obj_)
- elif nodeName_ == 'dhcp-option-list':
- obj_ = DhcpOptionsListType.factory()
- obj_.build(child_)
- self.set_dhcp_option_list(obj_)
- elif nodeName_ == 'cidr-block':
- obj_ = SubnetType.factory()
- obj_.build(child_)
- self.set_cidr_block(obj_)
- elif nodeName_ == 'host-routes':
- obj_ = RouteTableType.factory()
- obj_.build(child_)
- self.set_host_routes(obj_)
-# end class IpamType
-
-
-class EncapsulationPrioritiesType(GeneratedsSuper):
- """
- EncapsulationPrioritiesType class definition from :doc:`vnc_cfg.xsd`
- """
- subclass = None
- superclass = None
- def __init__(self, encapsulation=None, **kwargs):
- if (encapsulation is None) or (encapsulation == []):
- self.encapsulation = []
- else:
- self.encapsulation = encapsulation
- def factory(*args_, **kwargs_):
- if EncapsulationPrioritiesType.subclass:
- return EncapsulationPrioritiesType.subclass(*args_, **kwargs_)
- else:
- return EncapsulationPrioritiesType(*args_, **kwargs_)
- factory = staticmethod(factory)
- def get_encapsulation(self): return self.encapsulation
- def set_encapsulation(self, encapsulation): self.encapsulation = encapsulation
- def add_encapsulation(self, value): self.encapsulation.append(value)
- def insert_encapsulation(self, index, value): self.encapsulation[index] = value
- def delete_encapsulation(self, value): self.encapsulation.remove(value)
- def validate_EncapsulationType(self, value):
- # Validate type EncapsulationType, a restriction on xsd:string.
- error = False
- if isinstance(value, list):
- error = set(value) - set([u'MPLSoGRE', u'MPLSoUDP', u'VXLAN'])
- else:
- error = value not in [u'MPLSoGRE', u'MPLSoUDP', u'VXLAN']
- if error:
- raise ValueError("EncapsulationType must be one of [u'MPLSoGRE', u'MPLSoUDP', u'VXLAN']")
- def __eq__(self, other):
- if isinstance(other, self.__class__):
- return (self.encapsulation == other.encapsulation)
- return NotImplemented
- def __ne__(self, other):
- if isinstance(other, self.__class__):
- return not self.__eq__(other)
- return NotImplemented
-
- @classmethod
- def populate (cls, *a, **kwa):
- obj = cls (*a, **kwa)
- obj.set_encapsulation ([obj.populate_string ("encapsulation")])
- return obj
- def export(self, outfile, level=1, namespace_='', name_='EncapsulationPrioritiesType', namespacedef_='', pretty_print=True):
- if pretty_print:
- eol_ = '\n'
- else:
- eol_ = ''
- showIndent(outfile, level, pretty_print)
- outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
- already_processed = []
- self.exportAttributes(outfile, level, already_processed, namespace_, name_='EncapsulationPrioritiesType')
- if self.hasContent_():
- outfile.write('>%s' % (eol_, ))
- self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print)
- showIndent(outfile, level, pretty_print)
- outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
- else:
- outfile.write('/>%s' % (eol_, ))
- def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='EncapsulationPrioritiesType'):
- pass
- def exportChildren(self, outfile, level, namespace_='', name_='EncapsulationPrioritiesType', fromsubclass_=False, pretty_print=True):
- if pretty_print:
- eol_ = '\n'
- else:
- eol_ = ''
- for encapsulation_ in self.encapsulation:
- showIndent(outfile, level, pretty_print)
- outfile.write('<%sencapsulation>%s</%sencapsulation>%s' % (namespace_, self.gds_format_string(quote_xml(encapsulation_).encode(ExternalEncoding), input_name='encapsulation'), namespace_, eol_))
- def hasContent_(self):
- if (
- self.encapsulation
- ):
- return True
- else:
- return False
- def exportLiteral(self, outfile, level, name_='EncapsulationPrioritiesType'):
- level += 1
- self.exportLiteralAttributes(outfile, level, [], name_)
- if self.hasContent_():
- self.exportLiteralChildren(outfile, level, name_)
- def exportLiteralAttributes(self, outfile, level, already_processed, name_):
- pass
- def exportLiteralChildren(self, outfile, level, name_):
- showIndent(outfile, level)
- outfile.write('encapsulation=[\n')
- level += 1
- for encapsulation_ in self.encapsulation:
- showIndent(outfile, level)
- outfile.write('%s,\n' % quote_python(encapsulation_).encode(ExternalEncoding))
- level -= 1
- showIndent(outfile, level)
- outfile.write('],\n')
- def exportDict(self, name_='EncapsulationPrioritiesType'):
- obj_json = json.dumps(self, default=lambda o: dict((k, v) for k, v in o.__dict__.iteritems()))
- obj_dict = json.loads(obj_json)
- return {name_: obj_dict}
- def build(self, node):
- self.buildAttributes(node, node.attrib, [])
- for child in node:
- nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
- self.buildChildren(child, node, nodeName_)
- def buildAttributes(self, node, attrs, already_processed):
- pass
- def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
- if nodeName_ == 'encapsulation':
- encapsulation_ = child_.text
- encapsulation_ = self.gds_validate_string(encapsulation_, node, 'encapsulation')
- self.encapsulation.append(encapsulation_)
- self.validate_EncapsulationType(self.encapsulation) # validate type EncapsulationType
-# end class EncapsulationPrioritiesType
-
-
-class LinklocalServiceEntryType(GeneratedsSuper):
- """
- LinklocalServiceEntryType class definition from :doc:`vnc_cfg.xsd`
- """
- subclass = None
- superclass = None
- def __init__(self, linklocal_service_name=None, linklocal_service_ip=None, linklocal_service_port=None, ip_fabric_DNS_service_name=None, ip_fabric_service_port=None, ip_fabric_service_ip=None, **kwargs):
- self.linklocal_service_name = linklocal_service_name
- self.linklocal_service_ip = linklocal_service_ip
- self.linklocal_service_port = linklocal_service_port
- self.ip_fabric_DNS_service_name = ip_fabric_DNS_service_name
- self.ip_fabric_service_port = ip_fabric_service_port
- if (ip_fabric_service_ip is None) or (ip_fabric_service_ip == []):
- self.ip_fabric_service_ip = []
- else:
- self.ip_fabric_service_ip = ip_fabric_service_ip
- def factory(*args_, **kwargs_):
- if LinklocalServiceEntryType.subclass:
- return LinklocalServiceEntryType.subclass(*args_, **kwargs_)
- else:
- return LinklocalServiceEntryType(*args_, **kwargs_)
- factory = staticmethod(factory)
- def get_linklocal_service_name(self): return self.linklocal_service_name
- def set_linklocal_service_name(self, linklocal_service_name): self.linklocal_service_name = linklocal_service_name
- def get_linklocal_service_ip(self): return self.linklocal_service_ip
- def set_linklocal_service_ip(self, linklocal_service_ip): self.linklocal_service_ip = linklocal_service_ip
- def validate_IpAddress(self, value):
- # Validate type IpAddress, a restriction on xsd:string.
- pass
- def get_linklocal_service_port(self): return self.linklocal_service_port
- def set_linklocal_service_port(self, linklocal_service_port): self.linklocal_service_port = linklocal_service_port
- def get_ip_fabric_DNS_service_name(self): return self.ip_fabric_DNS_service_name
- def set_ip_fabric_DNS_service_name(self, ip_fabric_DNS_service_name): self.ip_fabric_DNS_service_name = ip_fabric_DNS_service_name
- def get_ip_fabric_service_port(self): return self.ip_fabric_service_port
- def set_ip_fabric_service_port(self, ip_fabric_service_port): self.ip_fabric_service_port = ip_fabric_service_port
- def get_ip_fabric_service_ip(self): return self.ip_fabric_service_ip
- def set_ip_fabric_service_ip(self, ip_fabric_service_ip): self.ip_fabric_service_ip = ip_fabric_service_ip
- def add_ip_fabric_service_ip(self, value): self.ip_fabric_service_ip.append(value)
- def insert_ip_fabric_service_ip(self, index, value): self.ip_fabric_service_ip[index] = value
- def delete_ip_fabric_service_ip(self, value): self.ip_fabric_service_ip.remove(value)
- def __eq__(self, other):
- if isinstance(other, self.__class__):
- return (self.linklocal_service_name == other.linklocal_service_name and
- self.linklocal_service_ip == other.linklocal_service_ip and
- self.linklocal_service_port == other.linklocal_service_port and
- self.ip_fabric_DNS_service_name == other.ip_fabric_DNS_service_name and
- self.ip_fabric_service_port == other.ip_fabric_service_port and
- self.ip_fabric_service_ip == other.ip_fabric_service_ip)
- return NotImplemented
- def __ne__(self, other):
- if isinstance(other, self.__class__):
- return not self.__eq__(other)
- return NotImplemented
-
- @classmethod
- def populate (cls, *a, **kwa):
- obj = cls (*a, **kwa)
- obj.set_linklocal_service_name (obj.populate_string ("linklocal_service_name"))
- obj.set_linklocal_service_ip (obj.populate_string ("linklocal_service_ip"))
- obj.set_linklocal_service_port (obj.populate_integer ("linklocal_service_port"))
- obj.set_ip_fabric_DNS_service_name (obj.populate_string ("ip_fabric_DNS_service_name"))
- obj.set_ip_fabric_service_port (obj.populate_integer ("ip_fabric_service_port"))
- obj.set_ip_fabric_service_ip ([obj.populate_string ("ip_fabric_service_ip")])
- return obj
- def export(self, outfile, level=1, namespace_='', name_='LinklocalServiceEntryType', namespacedef_='', pretty_print=True):
- if pretty_print:
- eol_ = '\n'
- else:
- eol_ = ''
- showIndent(outfile, level, pretty_print)
- outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
- already_processed = []
- self.exportAttributes(outfile, level, already_processed, namespace_, name_='LinklocalServiceEntryType')
- if self.hasContent_():
- outfile.write('>%s' % (eol_, ))
- self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print)
- showIndent(outfile, level, pretty_print)
- outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
- else:
- outfile.write('/>%s' % (eol_, ))
- def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='LinklocalServiceEntryType'):
- pass
- def exportChildren(self, outfile, level, namespace_='', name_='LinklocalServiceEntryType', fromsubclass_=False, pretty_print=True):
- if pretty_print:
- eol_ = '\n'
- else:
- eol_ = ''
- if self.linklocal_service_name is not None:
- showIndent(outfile, level, pretty_print)
- outfile.write('<%slinklocal-service-name>%s</%slinklocal-service-name>%s' % (namespace_, self.gds_format_string(quote_xml(self.linklocal_service_name).encode(ExternalEncoding), input_name='linklocal-service-name'), namespace_, eol_))
- if self.linklocal_service_ip is not None:
- showIndent(outfile, level, pretty_print)
- outfile.write('<%slinklocal-service-ip>%s</%slinklocal-service-ip>%s' % (namespace_, self.gds_format_string(quote_xml(self.linklocal_service_ip).encode(ExternalEncoding), input_name='linklocal-service-ip'), namespace_, eol_))
- if self.linklocal_service_port is not None:
- showIndent(outfile, level, pretty_print)
- outfile.write('<%slinklocal-service-port>%s</%slinklocal-service-port>%s' % (namespace_, self.gds_format_integer(self.linklocal_service_port, input_name='linklocal-service-port'), namespace_, eol_))
- if self.ip_fabric_DNS_service_name is not None:
- showIndent(outfile, level, pretty_print)
- outfile.write('<%sip-fabric-DNS-service-name>%s</%sip-fabric-DNS-service-name>%s' % (namespace_, self.gds_format_string(quote_xml(self.ip_fabric_DNS_service_name).encode(ExternalEncoding), input_name='ip-fabric-DNS-service-name'), namespace_, eol_))
- if self.ip_fabric_service_port is not None:
- showIndent(outfile, level, pretty_print)
- outfile.write('<%sip-fabric-service-port>%s</%sip-fabric-service-port>%s' % (namespace_, self.gds_format_integer(self.ip_fabric_service_port, input_name='ip-fabric-service-port'), namespace_, eol_))
- for ip_fabric_service_ip_ in self.ip_fabric_service_ip:
- showIndent(outfile, level, pretty_print)
- outfile.write('<%sip-fabric-service-ip>%s</%sip-fabric-service-ip>%s' % (namespace_, self.gds_format_string(quote_xml(ip_fabric_service_ip_).encode(ExternalEncoding), input_name='ip-fabric-service-ip'), namespace_, eol_))
- def hasContent_(self):
- if (
- self.linklocal_service_name is not None or
- self.linklocal_service_ip is not None or
- self.linklocal_service_port is not None or
- self.ip_fabric_DNS_service_name is not None or
- self.ip_fabric_service_port is not None or
- self.ip_fabric_service_ip
- ):
- return True
- else:
- return False
- def exportLiteral(self, outfile, level, name_='LinklocalServiceEntryType'):
- level += 1
- self.exportLiteralAttributes(outfile, level, [], name_)
- if self.hasContent_():
- self.exportLiteralChildren(outfile, level, name_)
- def exportLiteralAttributes(self, outfile, level, already_processed, name_):
- pass
- def exportLiteralChildren(self, outfile, level, name_):
- if self.linklocal_service_name is not None:
- showIndent(outfile, level)
- outfile.write('linklocal_service_name=%s,\n' % quote_python(self.linklocal_service_name).encode(ExternalEncoding))
- if self.linklocal_service_ip is not None:
- showIndent(outfile, level)
- outfile.write('linklocal_service_ip=%s,\n' % quote_python(self.linklocal_service_ip).encode(ExternalEncoding))
- if self.linklocal_service_port is not None:
- showIndent(outfile, level)
- outfile.write('linklocal_service_port=%d,\n' % self.linklocal_service_port)
- if self.ip_fabric_DNS_service_name is not None:
- showIndent(outfile, level)
- outfile.write('ip_fabric_DNS_service_name=%s,\n' % quote_python(self.ip_fabric_DNS_service_name).encode(ExternalEncoding))
- if self.ip_fabric_service_port is not None:
- showIndent(outfile, level)
- outfile.write('ip_fabric_service_port=%d,\n' % self.ip_fabric_service_port)
- showIndent(outfile, level)
- outfile.write('ip_fabric_service_ip=[\n')
- level += 1
- for ip_fabric_service_ip_ in self.ip_fabric_service_ip:
- showIndent(outfile, level)
- outfile.write('%s,\n' % quote_python(ip_fabric_service_ip_).encode(ExternalEncoding))
- level -= 1
- showIndent(outfile, level)
- outfile.write('],\n')
- def exportDict(self, name_='LinklocalServiceEntryType'):
- obj_json = json.dumps(self, default=lambda o: dict((k, v) for k, v in o.__dict__.iteritems()))
- obj_dict = json.loads(obj_json)
- return {name_: obj_dict}
- def build(self, node):
- self.buildAttributes(node, node.attrib, [])
- for child in node:
- nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
- self.buildChildren(child, node, nodeName_)
- def buildAttributes(self, node, attrs, already_processed):
- pass
- def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
- if nodeName_ == 'linklocal-service-name':
- linklocal_service_name_ = child_.text
- linklocal_service_name_ = self.gds_validate_string(linklocal_service_name_, node, 'linklocal_service_name')
- self.linklocal_service_name = linklocal_service_name_
- elif nodeName_ == 'linklocal-service-ip':
- linklocal_service_ip_ = child_.text
- linklocal_service_ip_ = self.gds_validate_string(linklocal_service_ip_, node, 'linklocal_service_ip')
- self.linklocal_service_ip = linklocal_service_ip_
- self.validate_IpAddress(self.linklocal_service_ip) # validate type IpAddress
- elif nodeName_ == 'linklocal-service-port':
- sval_ = child_.text
- try:
- ival_ = int(sval_)
- except (TypeError, ValueError), exp:
- raise_parse_error(child_, 'requires integer: %s' % exp)
- ival_ = self.gds_validate_integer(ival_, node, 'linklocal_service_port')
- self.linklocal_service_port = ival_
- elif nodeName_ == 'ip-fabric-DNS-service-name':
- ip_fabric_DNS_service_name_ = child_.text
- ip_fabric_DNS_service_name_ = self.gds_validate_string(ip_fabric_DNS_service_name_, node, 'ip_fabric_DNS_service_name')
- self.ip_fabric_DNS_service_name = ip_fabric_DNS_service_name_
- elif nodeName_ == 'ip-fabric-service-port':
- sval_ = child_.text
- try:
- ival_ = int(sval_)
- except (TypeError, ValueError), exp:
- raise_parse_error(child_, 'requires integer: %s' % exp)
- ival_ = self.gds_validate_integer(ival_, node, 'ip_fabric_service_port')
- self.ip_fabric_service_port = ival_
- elif nodeName_ == 'ip-fabric-service-ip':
- ip_fabric_service_ip_ = child_.text
- ip_fabric_service_ip_ = self.gds_validate_string(ip_fabric_service_ip_, node, 'ip_fabric_service_ip')
- self.ip_fabric_service_ip.append(ip_fabric_service_ip_)
- self.validate_IpAddress(self.ip_fabric_service_ip) # validate type IpAddress
-# end class LinklocalServiceEntryType
-
-
-class LinklocalServicesTypes(GeneratedsSuper):
- """
- LinklocalServicesTypes class definition from :doc:`vnc_cfg.xsd`
- """
- subclass = None
- superclass = None
- def __init__(self, linklocal_service_entry=None, **kwargs):
- if (linklocal_service_entry is None) or (linklocal_service_entry == []):
- self.linklocal_service_entry = []
- else:
- if isinstance(linklocal_service_entry[0], dict):
- objs = [LinklocalServiceEntryType(**elem) for elem in linklocal_service_entry]
- self.linklocal_service_entry = objs
- else:
- self.linklocal_service_entry = linklocal_service_entry
- def factory(*args_, **kwargs_):
- if LinklocalServicesTypes.subclass:
- return LinklocalServicesTypes.subclass(*args_, **kwargs_)
- else:
- return LinklocalServicesTypes(*args_, **kwargs_)
- factory = staticmethod(factory)
- def get_linklocal_service_entry(self): return self.linklocal_service_entry
- def set_linklocal_service_entry(self, linklocal_service_entry): self.linklocal_service_entry = linklocal_service_entry
- def add_linklocal_service_entry(self, value): self.linklocal_service_entry.append(value)
- def insert_linklocal_service_entry(self, index, value): self.linklocal_service_entry[index] = value
- def delete_linklocal_service_entry(self, value): self.linklocal_service_entry.remove(value)
- def __eq__(self, other):
- if isinstance(other, self.__class__):
- return (self.linklocal_service_entry == other.linklocal_service_entry)
- return NotImplemented
- def __ne__(self, other):
- if isinstance(other, self.__class__):
- return not self.__eq__(other)
- return NotImplemented
-
- @classmethod
- def populate (cls, *a, **kwa):
- obj = cls (*a, **kwa)
- obj.set_linklocal_service_entry ([LinklocalServiceEntryType.populate ()])
- return obj
- def export(self, outfile, level=1, namespace_='', name_='LinklocalServicesTypes', namespacedef_='', pretty_print=True):
- if pretty_print:
- eol_ = '\n'
- else:
- eol_ = ''
- showIndent(outfile, level, pretty_print)
- outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
- already_processed = []
- self.exportAttributes(outfile, level, already_processed, namespace_, name_='LinklocalServicesTypes')
- if self.hasContent_():
- outfile.write('>%s' % (eol_, ))
- self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print)
- showIndent(outfile, level, pretty_print)
- outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
- else:
- outfile.write('/>%s' % (eol_, ))
- def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='LinklocalServicesTypes'):
- pass
- def exportChildren(self, outfile, level, namespace_='', name_='LinklocalServicesTypes', fromsubclass_=False, pretty_print=True):
- if pretty_print:
- eol_ = '\n'
- else:
- eol_ = ''
- for linklocal_service_entry_ in self.linklocal_service_entry:
- if isinstance(linklocal_service_entry_, dict):
- linklocal_service_entry_ = LinklocalServiceEntryType(**linklocal_service_entry_)
- linklocal_service_entry_.export(outfile, level, namespace_, name_='linklocal-service-entry', pretty_print=pretty_print)
- def hasContent_(self):
- if (
- self.linklocal_service_entry
- ):
- return True
- else:
- return False
- def exportLiteral(self, outfile, level, name_='LinklocalServicesTypes'):
- level += 1
- self.exportLiteralAttributes(outfile, level, [], name_)
- if self.hasContent_():
- self.exportLiteralChildren(outfile, level, name_)
- def exportLiteralAttributes(self, outfile, level, already_processed, name_):
- pass
- def exportLiteralChildren(self, outfile, level, name_):
- showIndent(outfile, level)
- outfile.write('linklocal_service_entry=[\n')
- level += 1
- for linklocal_service_entry_ in self.linklocal_service_entry:
- showIndent(outfile, level)
- outfile.write('model_.LinklocalServiceEntryType(\n')
- linklocal_service_entry_.exportLiteral(outfile, level, name_='LinklocalServiceEntryType')
- showIndent(outfile, level)
- outfile.write('),\n')
- level -= 1
- showIndent(outfile, level)
- outfile.write('],\n')
- def exportDict(self, name_='LinklocalServicesTypes'):
- obj_json = json.dumps(self, default=lambda o: dict((k, v) for k, v in o.__dict__.iteritems()))
- obj_dict = json.loads(obj_json)
- return {name_: obj_dict}
- def build(self, node):
- self.buildAttributes(node, node.attrib, [])
- for child in node:
- nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
- self.buildChildren(child, node, nodeName_)
- def buildAttributes(self, node, attrs, already_processed):
- pass
- def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
- if nodeName_ == 'linklocal-service-entry':
- obj_ = LinklocalServiceEntryType.factory()
- obj_.build(child_)
- self.linklocal_service_entry.append(obj_)
-# end class LinklocalServicesTypes
-
-
-class VirtualDnsType(GeneratedsSuper):
- """
- VirtualDnsType class definition from :doc:`vnc_cfg.xsd`
- """
- subclass = None
- superclass = None
- def __init__(self, domain_name=None, dynamic_records_from_client=None, record_order=None, default_ttl_seconds=None, next_virtual_DNS=None, floating_ip_record=None, **kwargs):
- self.domain_name = domain_name
- self.dynamic_records_from_client = dynamic_records_from_client
- self.record_order = record_order
- self.default_ttl_seconds = default_ttl_seconds
- self.next_virtual_DNS = next_virtual_DNS
- self.floating_ip_record = floating_ip_record
- def factory(*args_, **kwargs_):
- if VirtualDnsType.subclass:
- return VirtualDnsType.subclass(*args_, **kwargs_)
- else:
- return VirtualDnsType(*args_, **kwargs_)
- factory = staticmethod(factory)
- def get_domain_name(self): return self.domain_name
- def set_domain_name(self, domain_name): self.domain_name = domain_name
- def get_dynamic_records_from_client(self): return self.dynamic_records_from_client
- def set_dynamic_records_from_client(self, dynamic_records_from_client): self.dynamic_records_from_client = dynamic_records_from_client
- def get_record_order(self): return self.record_order
- def set_record_order(self, record_order): self.record_order = record_order
- def validate_DnsRecordOrderType(self, value):
- # Validate type DnsRecordOrderType, a restriction on xsd:string.
- error = False
- if isinstance(value, list):
- error = set(value) - set([u'fixed', u'random', u'round-robin'])
- else:
- error = value not in [u'fixed', u'random', u'round-robin']
- if error:
- raise ValueError("DnsRecordOrderType must be one of [u'fixed', u'random', u'round-robin']")
- def get_default_ttl_seconds(self): return self.default_ttl_seconds
- def set_default_ttl_seconds(self, default_ttl_seconds): self.default_ttl_seconds = default_ttl_seconds
- def get_next_virtual_DNS(self): return self.next_virtual_DNS
- def set_next_virtual_DNS(self, next_virtual_DNS): self.next_virtual_DNS = next_virtual_DNS
- def get_floating_ip_record(self): return self.floating_ip_record
- def set_floating_ip_record(self, floating_ip_record): self.floating_ip_record = floating_ip_record
- def validate_FloatingIpDnsNotation(self, value):
- # Validate type FloatingIpDnsNotation, a restriction on xsd:string.
- error = False
- if isinstance(value, list):
- error = set(value) - set([u'dashed-ip', u'dashed-ip-tenant-name', u'vm-name', u'vm-name-tenant-name'])
- else:
- error = value not in [u'dashed-ip', u'dashed-ip-tenant-name', u'vm-name', u'vm-name-tenant-name']
- if error:
- raise ValueError("FloatingIpDnsNotation must be one of [u'dashed-ip', u'dashed-ip-tenant-name', u'vm-name', u'vm-name-tenant-name']")
- def __eq__(self, other):
- if isinstance(other, self.__class__):
- return (self.domain_name == other.domain_name and
- self.dynamic_records_from_client == other.dynamic_records_from_client and
- self.record_order == other.record_order and
- self.default_ttl_seconds == other.default_ttl_seconds and
- self.next_virtual_DNS == other.next_virtual_DNS and
- self.floating_ip_record == other.floating_ip_record)
- return NotImplemented
- def __ne__(self, other):
- if isinstance(other, self.__class__):
- return not self.__eq__(other)
- return NotImplemented
-
- @classmethod
- def populate (cls, *a, **kwa):
- obj = cls (*a, **kwa)
- obj.set_domain_name (obj.populate_string ("domain_name"))
- obj.set_dynamic_records_from_client (obj.populate_boolean ("dynamic_records_from_client"))
- obj.set_record_order (obj.populate_string ("record_order"))
- obj.set_default_ttl_seconds (obj.populate_integer ("default_ttl_seconds"))
- obj.set_next_virtual_DNS (obj.populate_string ("next_virtual_DNS"))
- obj.set_floating_ip_record (obj.populate_string ("floating_ip_record"))
- return obj
- def export(self, outfile, level=1, namespace_='', name_='VirtualDnsType', namespacedef_='', pretty_print=True):
- if pretty_print:
- eol_ = '\n'
- else:
- eol_ = ''
- showIndent(outfile, level, pretty_print)
- outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
- already_processed = []
- self.exportAttributes(outfile, level, already_processed, namespace_, name_='VirtualDnsType')
- if self.hasContent_():
- outfile.write('>%s' % (eol_, ))
- self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print)
- showIndent(outfile, level, pretty_print)
- outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
- else:
- outfile.write('/>%s' % (eol_, ))
- def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='VirtualDnsType'):
- pass
- def exportChildren(self, outfile, level, namespace_='', name_='VirtualDnsType', fromsubclass_=False, pretty_print=True):
- if pretty_print:
- eol_ = '\n'
- else:
- eol_ = ''
- if self.domain_name is not None:
- showIndent(outfile, level, pretty_print)
- outfile.write('<%sdomain-name>%s</%sdomain-name>%s' % (namespace_, self.gds_format_string(quote_xml(self.domain_name).encode(ExternalEncoding), input_name='domain-name'), namespace_, eol_))
- if self.dynamic_records_from_client is not None:
- showIndent(outfile, level, pretty_print)
- outfile.write('<%sdynamic-records-from-client>%s</%sdynamic-records-from-client>%s' % (namespace_, self.gds_format_boolean(self.gds_str_lower(str(self.dynamic_records_from_client)), input_name='dynamic-records-from-client'), namespace_, eol_))
- if self.record_order is not None:
- showIndent(outfile, level, pretty_print)
- outfile.write('<%srecord-order>%s</%srecord-order>%s' % (namespace_, self.gds_format_string(quote_xml(self.record_order).encode(ExternalEncoding), input_name='record-order'), namespace_, eol_))
- if self.default_ttl_seconds is not None:
- showIndent(outfile, level, pretty_print)
- outfile.write('<%sdefault-ttl-seconds>%s</%sdefault-ttl-seconds>%s' % (namespace_, self.gds_format_integer(self.default_ttl_seconds, input_name='default-ttl-seconds'), namespace_, eol_))
- if self.next_virtual_DNS is not None:
- showIndent(outfile, level, pretty_print)
- outfile.write('<%snext-virtual-DNS>%s</%snext-virtual-DNS>%s' % (namespace_, self.gds_format_string(quote_xml(self.next_virtual_DNS).encode(ExternalEncoding), input_name='next-virtual-DNS'), namespace_, eol_))
- if self.floating_ip_record is not None:
- showIndent(outfile, level, pretty_print)
- outfile.write('<%sfloating-ip-record>%s</%sfloating-ip-record>%s' % (namespace_, self.gds_format_string(quote_xml(self.floating_ip_record).encode(ExternalEncoding), input_name='floating-ip-record'), namespace_, eol_))
- def hasContent_(self):
- if (
- self.domain_name is not None or
- self.dynamic_records_from_client is not None or
- self.record_order is not None or
- self.default_ttl_seconds is not None or
- self.next_virtual_DNS is not None or
- self.floating_ip_record is not None
- ):
- return True
- else:
- return False
- def exportLiteral(self, outfile, level, name_='VirtualDnsType'):
- level += 1
- self.exportLiteralAttributes(outfile, level, [], name_)
- if self.hasContent_():
- self.exportLiteralChildren(outfile, level, name_)
- def exportLiteralAttributes(self, outfile, level, already_processed, name_):
- pass
- def exportLiteralChildren(self, outfile, level, name_):
- if self.domain_name is not None:
- showIndent(outfile, level)
- outfile.write('domain_name=%s,\n' % quote_python(self.domain_name).encode(ExternalEncoding))
- if self.dynamic_records_from_client is not None:
- showIndent(outfile, level)
- outfile.write('dynamic_records_from_client=%s,\n' % self.dynamic_records_from_client)
- if self.record_order is not None:
- showIndent(outfile, level)
- outfile.write('record_order=%s,\n' % quote_python(self.record_order).encode(ExternalEncoding))
- if self.default_ttl_seconds is not None:
- showIndent(outfile, level)
- outfile.write('default_ttl_seconds=%d,\n' % self.default_ttl_seconds)
- if self.next_virtual_DNS is not None:
- showIndent(outfile, level)
- outfile.write('next_virtual_DNS=%s,\n' % quote_python(self.next_virtual_DNS).encode(ExternalEncoding))
- if self.floating_ip_record is not None:
- showIndent(outfile, level)
- outfile.write('floating_ip_record=%s,\n' % quote_python(self.floating_ip_record).encode(ExternalEncoding))
- def exportDict(self, name_='VirtualDnsType'):
- obj_json = json.dumps(self, default=lambda o: dict((k, v) for k, v in o.__dict__.iteritems()))
- obj_dict = json.loads(obj_json)
- return {name_: obj_dict}
- def build(self, node):
- self.buildAttributes(node, node.attrib, [])
- for child in node:
- nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
- self.buildChildren(child, node, nodeName_)
- def buildAttributes(self, node, attrs, already_processed):
- pass
- def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
- if nodeName_ == 'domain-name':
- domain_name_ = child_.text
- domain_name_ = self.gds_validate_string(domain_name_, node, 'domain_name')
- self.domain_name = domain_name_
- elif nodeName_ == 'dynamic-records-from-client':
- sval_ = child_.text
- if sval_ in ('true', '1'):
- ival_ = True
- elif sval_ in ('false', '0'):
- ival_ = False
- else:
- raise_parse_error(child_, 'requires boolean')
- ival_ = self.gds_validate_boolean(ival_, node, 'dynamic_records_from_client')
- self.dynamic_records_from_client = ival_
- elif nodeName_ == 'record-order':
- record_order_ = child_.text
- record_order_ = self.gds_validate_string(record_order_, node, 'record_order')
- self.record_order = record_order_
- self.validate_DnsRecordOrderType(self.record_order) # validate type DnsRecordOrderType
- elif nodeName_ == 'default-ttl-seconds':
- sval_ = child_.text
- try:
- ival_ = int(sval_)
- except (TypeError, ValueError), exp:
- raise_parse_error(child_, 'requires integer: %s' % exp)
- ival_ = self.gds_validate_integer(ival_, node, 'default_ttl_seconds')
- self.default_ttl_seconds = ival_
- elif nodeName_ == 'next-virtual-DNS':
- next_virtual_DNS_ = child_.text
- next_virtual_DNS_ = self.gds_validate_string(next_virtual_DNS_, node, 'next_virtual_DNS')
- self.next_virtual_DNS = next_virtual_DNS_
- elif nodeName_ == 'floating-ip-record':
- floating_ip_record_ = child_.text
- floating_ip_record_ = self.gds_validate_string(floating_ip_record_, node, 'floating_ip_record')
- self.floating_ip_record = floating_ip_record_
- self.validate_FloatingIpDnsNotation(self.floating_ip_record) # validate type FloatingIpDnsNotation
-# end class VirtualDnsType
-
-
-class VirtualDnsRecordType(GeneratedsSuper):
- """
- VirtualDnsRecordType class definition from :doc:`vnc_cfg.xsd`
- """
- subclass = None
- superclass = None
- def __init__(self, record_name=None, record_type=None, record_class=None, record_data=None, record_ttl_seconds=None, record_mx_preference=None, **kwargs):
- self.record_name = record_name
- self.record_type = record_type
- self.record_class = record_class
- self.record_data = record_data
- self.record_ttl_seconds = record_ttl_seconds
- self.record_mx_preference = record_mx_preference
- def factory(*args_, **kwargs_):
- if VirtualDnsRecordType.subclass:
- return VirtualDnsRecordType.subclass(*args_, **kwargs_)
- else:
- return VirtualDnsRecordType(*args_, **kwargs_)
- factory = staticmethod(factory)
- def get_record_name(self): return self.record_name
- def set_record_name(self, record_name): self.record_name = record_name
- def get_record_type(self): return self.record_type
- def set_record_type(self, record_type): self.record_type = record_type
- def validate_DnsRecordTypeType(self, value):
- # Validate type DnsRecordTypeType, a restriction on xsd:string.
- error = False
- if isinstance(value, list):
- error = set(value) - set([u'A', u'AAAA', u'CNAME', u'PTR', u'NS', u'MX'])
- else:
- error = value not in [u'A', u'AAAA', u'CNAME', u'PTR', u'NS', u'MX']
- if error:
- raise ValueError("DnsRecordTypeType must be one of [u'A', u'AAAA', u'CNAME', u'PTR', u'NS', u'MX']")
- def get_record_class(self): return self.record_class
- def set_record_class(self, record_class): self.record_class = record_class
- def validate_DnsRecordClassType(self, value):
- # Validate type DnsRecordClassType, a restriction on xsd:string.
- error = False
- if isinstance(value, list):
- error = set(value) - set([u'IN'])
- else:
- error = value not in [u'IN']
- if error:
- raise ValueError("DnsRecordClassType must be one of [u'IN']")
- def get_record_data(self): return self.record_data
- def set_record_data(self, record_data): self.record_data = record_data
- def get_record_ttl_seconds(self): return self.record_ttl_seconds
- def set_record_ttl_seconds(self, record_ttl_seconds): self.record_ttl_seconds = record_ttl_seconds
- def get_record_mx_preference(self): return self.record_mx_preference
- def set_record_mx_preference(self, record_mx_preference): self.record_mx_preference = record_mx_preference
- def __eq__(self, other):
- if isinstance(other, self.__class__):
- return (self.record_name == other.record_name and
- self.record_type == other.record_type and
- self.record_class == other.record_class and
- self.record_data == other.record_data and
- self.record_ttl_seconds == other.record_ttl_seconds and
- self.record_mx_preference == other.record_mx_preference)
- return NotImplemented
- def __ne__(self, other):
- if isinstance(other, self.__class__):
- return not self.__eq__(other)
- return NotImplemented
-
- @classmethod
- def populate (cls, *a, **kwa):
- obj = cls (*a, **kwa)
- obj.set_record_name (obj.populate_string ("record_name"))
- obj.set_record_type (obj.populate_string ("record_type"))
- obj.set_record_class (obj.populate_string ("record_class"))
- obj.set_record_data (obj.populate_string ("record_data"))
- obj.set_record_ttl_seconds (obj.populate_integer ("record_ttl_seconds"))
- obj.set_record_mx_preference (obj.populate_integer ("record_mx_preference"))
- return obj
- def export(self, outfile, level=1, namespace_='', name_='VirtualDnsRecordType', namespacedef_='', pretty_print=True):
- if pretty_print:
- eol_ = '\n'
- else:
- eol_ = ''
- showIndent(outfile, level, pretty_print)
- outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
- already_processed = []
- self.exportAttributes(outfile, level, already_processed, namespace_, name_='VirtualDnsRecordType')
- if self.hasContent_():
- outfile.write('>%s' % (eol_, ))
- self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print)
- showIndent(outfile, level, pretty_print)
- outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
- else:
- outfile.write('/>%s' % (eol_, ))
- def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='VirtualDnsRecordType'):
- pass
- def exportChildren(self, outfile, level, namespace_='', name_='VirtualDnsRecordType', fromsubclass_=False, pretty_print=True):
- if pretty_print:
- eol_ = '\n'
- else:
- eol_ = ''
- if self.record_name is not None:
- showIndent(outfile, level, pretty_print)
- outfile.write('<%srecord-name>%s</%srecord-name>%s' % (namespace_, self.gds_format_string(quote_xml(self.record_name).encode(ExternalEncoding), input_name='record-name'), namespace_, eol_))
- if self.record_type is not None:
- showIndent(outfile, level, pretty_print)
- outfile.write('<%srecord-type>%s</%srecord-type>%s' % (namespace_, self.gds_format_string(quote_xml(self.record_type).encode(ExternalEncoding), input_name='record-type'), namespace_, eol_))
- if self.record_class is not None:
- showIndent(outfile, level, pretty_print)
- outfile.write('<%srecord-class>%s</%srecord-class>%s' % (namespace_, self.gds_format_string(quote_xml(self.record_class).encode(ExternalEncoding), input_name='record-class'), namespace_, eol_))
- if self.record_data is not None:
- showIndent(outfile, level, pretty_print)
- outfile.write('<%srecord-data>%s</%srecord-data>%s' % (namespace_, self.gds_format_string(quote_xml(self.record_data).encode(ExternalEncoding), input_name='record-data'), namespace_, eol_))
- if self.record_ttl_seconds is not None:
- showIndent(outfile, level, pretty_print)
- outfile.write('<%srecord-ttl-seconds>%s</%srecord-ttl-seconds>%s' % (namespace_, self.gds_format_integer(self.record_ttl_seconds, input_name='record-ttl-seconds'), namespace_, eol_))
- if self.record_mx_preference is not None:
- showIndent(outfile, level, pretty_print)
- outfile.write('<%srecord-mx-preference>%s</%srecord-mx-preference>%s' % (namespace_, self.gds_format_integer(self.record_mx_preference, input_name='record-mx-preference'), namespace_, eol_))
- def hasContent_(self):
- if (
- self.record_name is not None or
- self.record_type is not None or
- self.record_class is not None or
- self.record_data is not None or
- self.record_ttl_seconds is not None or
- self.record_mx_preference is not None
- ):
- return True
- else:
- return False
- def exportLiteral(self, outfile, level, name_='VirtualDnsRecordType'):
- level += 1
- self.exportLiteralAttributes(outfile, level, [], name_)
- if self.hasContent_():
- self.exportLiteralChildren(outfile, level, name_)
- def exportLiteralAttributes(self, outfile, level, already_processed, name_):
- pass
- def exportLiteralChildren(self, outfile, level, name_):
- if self.record_name is not None:
- showIndent(outfile, level)
- outfile.write('record_name=%s,\n' % quote_python(self.record_name).encode(ExternalEncoding))
- if self.record_type is not None:
- showIndent(outfile, level)
- outfile.write('record_type=%s,\n' % quote_python(self.record_type).encode(ExternalEncoding))
- if self.record_class is not None:
- showIndent(outfile, level)
- outfile.write('record_class=%s,\n' % quote_python(self.record_class).encode(ExternalEncoding))
- if self.record_data is not None:
- showIndent(outfile, level)
- outfile.write('record_data=%s,\n' % quote_python(self.record_data).encode(ExternalEncoding))
- if self.record_ttl_seconds is not None:
- showIndent(outfile, level)
- outfile.write('record_ttl_seconds=%d,\n' % self.record_ttl_seconds)
- if self.record_mx_preference is not None:
- showIndent(outfile, level)
- outfile.write('record_mx_preference=%d,\n' % self.record_mx_preference)
- def exportDict(self, name_='VirtualDnsRecordType'):
- obj_json = json.dumps(self, default=lambda o: dict((k, v) for k, v in o.__dict__.iteritems()))
- obj_dict = json.loads(obj_json)
- return {name_: obj_dict}
- def build(self, node):
- self.buildAttributes(node, node.attrib, [])
- for child in node:
- nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
- self.buildChildren(child, node, nodeName_)
- def buildAttributes(self, node, attrs, already_processed):
- pass
- def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
- if nodeName_ == 'record-name':
- record_name_ = child_.text
- record_name_ = self.gds_validate_string(record_name_, node, 'record_name')
- self.record_name = record_name_
- elif nodeName_ == 'record-type':
- record_type_ = child_.text
- record_type_ = self.gds_validate_string(record_type_, node, 'record_type')
- self.record_type = record_type_
- self.validate_DnsRecordTypeType(self.record_type) # validate type DnsRecordTypeType
- elif nodeName_ == 'record-class':
- record_class_ = child_.text
- record_class_ = self.gds_validate_string(record_class_, node, 'record_class')
- self.record_class = record_class_
- self.validate_DnsRecordClassType(self.record_class) # validate type DnsRecordClassType
- elif nodeName_ == 'record-data':
- record_data_ = child_.text
- record_data_ = self.gds_validate_string(record_data_, node, 'record_data')
- self.record_data = record_data_
- elif nodeName_ == 'record-ttl-seconds':
- sval_ = child_.text
- try:
- ival_ = int(sval_)
- except (TypeError, ValueError), exp:
- raise_parse_error(child_, 'requires integer: %s' % exp)
- ival_ = self.gds_validate_integer(ival_, node, 'record_ttl_seconds')
- self.record_ttl_seconds = ival_
- elif nodeName_ == 'record-mx-preference':
- sval_ = child_.text
- try:
- ival_ = int(sval_)
- except (TypeError, ValueError), exp:
- raise_parse_error(child_, 'requires integer: %s' % exp)
- ival_ = self.gds_validate_integer(ival_, node, 'record_mx_preference')
- self.record_mx_preference = ival_
-# end class VirtualDnsRecordType
-
-
-class FloatingIpPoolType(GeneratedsSuper):
- """
- FloatingIpPoolType class definition from :doc:`vnc_cfg.xsd`
- """
- subclass = None
- superclass = None
- def __init__(self, subnet=None, **kwargs):
- if (subnet is None) or (subnet == []):
- self.subnet = []
- else:
- if isinstance(subnet[0], dict):
- objs = [SubnetType(**elem) for elem in subnet]
- self.subnet = objs
- else:
- self.subnet = subnet
- def factory(*args_, **kwargs_):
- if FloatingIpPoolType.subclass:
- return FloatingIpPoolType.subclass(*args_, **kwargs_)
- else:
- return FloatingIpPoolType(*args_, **kwargs_)
- factory = staticmethod(factory)
- def get_subnet(self): return self.subnet
- def set_subnet(self, subnet): self.subnet = subnet
- def add_subnet(self, value): self.subnet.append(value)
- def insert_subnet(self, index, value): self.subnet[index] = value
- def delete_subnet(self, value): self.subnet.remove(value)
- def __eq__(self, other):
- if isinstance(other, self.__class__):
- return (self.subnet == other.subnet)
- return NotImplemented
- def __ne__(self, other):
- if isinstance(other, self.__class__):
- return not self.__eq__(other)
- return NotImplemented
-
- @classmethod
- def populate (cls, *a, **kwa):
- obj = cls (*a, **kwa)
- obj.set_subnet ([SubnetType.populate ()])
- return obj
- def export(self, outfile, level=1, namespace_='', name_='FloatingIpPoolType', namespacedef_='', pretty_print=True):
- if pretty_print:
- eol_ = '\n'
- else:
- eol_ = ''
- showIndent(outfile, level, pretty_print)
- outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
- already_processed = []
- self.exportAttributes(outfile, level, already_processed, namespace_, name_='FloatingIpPoolType')
- if self.hasContent_():
- outfile.write('>%s' % (eol_, ))
- self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print)
- showIndent(outfile, level, pretty_print)
- outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
- else:
- outfile.write('/>%s' % (eol_, ))
- def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='FloatingIpPoolType'):
- pass
- def exportChildren(self, outfile, level, namespace_='', name_='FloatingIpPoolType', fromsubclass_=False, pretty_print=True):
- if pretty_print:
- eol_ = '\n'
- else:
- eol_ = ''
- for subnet_ in self.subnet:
- if isinstance(subnet_, dict):
- subnet_ = SubnetType(**subnet_)
- subnet_.export(outfile, level, namespace_, name_='subnet', pretty_print=pretty_print)
- def hasContent_(self):
- if (
- self.subnet
- ):
- return True
- else:
- return False
- def exportLiteral(self, outfile, level, name_='FloatingIpPoolType'):
- level += 1
- self.exportLiteralAttributes(outfile, level, [], name_)
- if self.hasContent_():
- self.exportLiteralChildren(outfile, level, name_)
- def exportLiteralAttributes(self, outfile, level, already_processed, name_):
- pass
- def exportLiteralChildren(self, outfile, level, name_):
- showIndent(outfile, level)
- outfile.write('subnet=[\n')
- level += 1
- for subnet_ in self.subnet:
- showIndent(outfile, level)
- outfile.write('model_.SubnetType(\n')
- subnet_.exportLiteral(outfile, level, name_='SubnetType')
- showIndent(outfile, level)
- outfile.write('),\n')
- level -= 1
- showIndent(outfile, level)
- outfile.write('],\n')
- def exportDict(self, name_='FloatingIpPoolType'):
- obj_json = json.dumps(self, default=lambda o: dict((k, v) for k, v in o.__dict__.iteritems()))
- obj_dict = json.loads(obj_json)
- return {name_: obj_dict}
- def build(self, node):
- self.buildAttributes(node, node.attrib, [])
- for child in node:
- nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
- self.buildChildren(child, node, nodeName_)
- def buildAttributes(self, node, attrs, already_processed):
- pass
- def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
- if nodeName_ == 'subnet':
- obj_ = SubnetType.factory()
- obj_.build(child_)
- self.subnet.append(obj_)
-# end class FloatingIpPoolType
-
-
-class SubnetListType(GeneratedsSuper):
- """
- SubnetListType class definition from :doc:`vnc_cfg.xsd`
- """
- subclass = None
- superclass = None
- def __init__(self, subnet=None, **kwargs):
- if (subnet is None) or (subnet == []):
- self.subnet = []
- else:
- if isinstance(subnet[0], dict):
- objs = [SubnetType(**elem) for elem in subnet]
- self.subnet = objs
- else:
- self.subnet = subnet
- def factory(*args_, **kwargs_):
- if SubnetListType.subclass:
- return SubnetListType.subclass(*args_, **kwargs_)
- else:
- return SubnetListType(*args_, **kwargs_)
- factory = staticmethod(factory)
- def get_subnet(self): return self.subnet
- def set_subnet(self, subnet): self.subnet = subnet
- def add_subnet(self, value): self.subnet.append(value)
- def insert_subnet(self, index, value): self.subnet[index] = value
- def delete_subnet(self, value): self.subnet.remove(value)
- def __eq__(self, other):
- if isinstance(other, self.__class__):
- return (self.subnet == other.subnet)
- return NotImplemented
- def __ne__(self, other):
- if isinstance(other, self.__class__):
- return not self.__eq__(other)
- return NotImplemented
-
- @classmethod
- def populate (cls, *a, **kwa):
- obj = cls (*a, **kwa)
- obj.set_subnet ([SubnetType.populate ()])
- return obj
- def export(self, outfile, level=1, namespace_='', name_='SubnetListType', namespacedef_='', pretty_print=True):
- if pretty_print:
- eol_ = '\n'
- else:
- eol_ = ''
- showIndent(outfile, level, pretty_print)
- outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
- already_processed = []
- self.exportAttributes(outfile, level, already_processed, namespace_, name_='SubnetListType')
- if self.hasContent_():
- outfile.write('>%s' % (eol_, ))
- self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print)
- showIndent(outfile, level, pretty_print)
- outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
- else:
- outfile.write('/>%s' % (eol_, ))
- def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='SubnetListType'):
- pass
- def exportChildren(self, outfile, level, namespace_='', name_='SubnetListType', fromsubclass_=False, pretty_print=True):
- if pretty_print:
- eol_ = '\n'
- else:
- eol_ = ''
- for subnet_ in self.subnet:
- if isinstance(subnet_, dict):
- subnet_ = SubnetType(**subnet_)
- subnet_.export(outfile, level, namespace_, name_='subnet', pretty_print=pretty_print)
- def hasContent_(self):
- if (
- self.subnet
- ):
- return True
- else:
- return False
- def exportLiteral(self, outfile, level, name_='SubnetListType'):
- level += 1
- self.exportLiteralAttributes(outfile, level, [], name_)
- if self.hasContent_():
- self.exportLiteralChildren(outfile, level, name_)
- def exportLiteralAttributes(self, outfile, level, already_processed, name_):
- pass
- def exportLiteralChildren(self, outfile, level, name_):
- showIndent(outfile, level)
- outfile.write('subnet=[\n')
- level += 1
- for subnet_ in self.subnet:
- showIndent(outfile, level)
- outfile.write('model_.SubnetType(\n')
- subnet_.exportLiteral(outfile, level, name_='SubnetType')
- showIndent(outfile, level)
- outfile.write('),\n')
- level -= 1
- showIndent(outfile, level)
- outfile.write('],\n')
- def exportDict(self, name_='SubnetListType'):
- obj_json = json.dumps(self, default=lambda o: dict((k, v) for k, v in o.__dict__.iteritems()))
- obj_dict = json.loads(obj_json)
- return {name_: obj_dict}
- def build(self, node):
- self.buildAttributes(node, node.attrib, [])
- for child in node:
- nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
- self.buildChildren(child, node, nodeName_)
- def buildAttributes(self, node, attrs, already_processed):
- pass
- def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
- if nodeName_ == 'subnet':
- obj_ = SubnetType.factory()
- obj_.build(child_)
- self.subnet.append(obj_)
-# end class SubnetListType
-
-
-class IpamSubnetType(GeneratedsSuper):
- """
- IpamSubnetType class definition from :doc:`vnc_cfg.xsd`
- """
- subclass = None
- superclass = None
- def __init__(self, subnet=None, default_gateway=None, dns_server_address=None, subnet_uuid=None, enable_dhcp=True, dns_nameservers=None, allocation_pools=None, addr_from_start=None, dhcp_option_list=None, host_routes=None, subnet_name=None, **kwargs):
- if isinstance(subnet, dict):
- obj = SubnetType(**subnet)
- self.subnet = obj
- else:
- self.subnet = subnet
- self.default_gateway = default_gateway
- self.dns_server_address = dns_server_address
- self.subnet_uuid = subnet_uuid
- self.enable_dhcp = enable_dhcp
- if (dns_nameservers is None) or (dns_nameservers == []):
- self.dns_nameservers = []
- else:
- self.dns_nameservers = dns_nameservers
- if (allocation_pools is None) or (allocation_pools == []):
- self.allocation_pools = []
- else:
- if isinstance(allocation_pools[0], dict):
- objs = [AllocationPoolType(**elem) for elem in allocation_pools]
- self.allocation_pools = objs
- else:
- self.allocation_pools = allocation_pools
- self.addr_from_start = addr_from_start
- if isinstance(dhcp_option_list, dict):
- obj = DhcpOptionsListType(**dhcp_option_list)
- self.dhcp_option_list = obj
- else:
- self.dhcp_option_list = dhcp_option_list
- if isinstance(host_routes, dict):
- obj = RouteTableType(**host_routes)
- self.host_routes = obj
- else:
- self.host_routes = host_routes
- self.subnet_name = subnet_name
- def factory(*args_, **kwargs_):
- if IpamSubnetType.subclass:
- return IpamSubnetType.subclass(*args_, **kwargs_)
- else:
- return IpamSubnetType(*args_, **kwargs_)
- factory = staticmethod(factory)
- def get_subnet(self): return self.subnet
- def set_subnet(self, subnet): self.subnet = subnet
- def get_default_gateway(self): return self.default_gateway
- def set_default_gateway(self, default_gateway): self.default_gateway = default_gateway
- def validate_IpAddressType(self, value):
- # Validate type IpAddressType, a restriction on xsd:string.
- pass
- def get_dns_server_address(self): return self.dns_server_address
- def set_dns_server_address(self, dns_server_address): self.dns_server_address = dns_server_address
- def get_subnet_uuid(self): return self.subnet_uuid
- def set_subnet_uuid(self, subnet_uuid): self.subnet_uuid = subnet_uuid
- def get_enable_dhcp(self): return self.enable_dhcp
- def set_enable_dhcp(self, enable_dhcp): self.enable_dhcp = enable_dhcp
- def get_dns_nameservers(self): return self.dns_nameservers
- def set_dns_nameservers(self, dns_nameservers): self.dns_nameservers = dns_nameservers
- def add_dns_nameservers(self, value): self.dns_nameservers.append(value)
- def insert_dns_nameservers(self, index, value): self.dns_nameservers[index] = value
- def delete_dns_nameservers(self, value): self.dns_nameservers.remove(value)
- def get_allocation_pools(self): return self.allocation_pools
- def set_allocation_pools(self, allocation_pools): self.allocation_pools = allocation_pools
- def add_allocation_pools(self, value): self.allocation_pools.append(value)
- def insert_allocation_pools(self, index, value): self.allocation_pools[index] = value
- def delete_allocation_pools(self, value): self.allocation_pools.remove(value)
- def get_addr_from_start(self): return self.addr_from_start
- def set_addr_from_start(self, addr_from_start): self.addr_from_start = addr_from_start
- def get_dhcp_option_list(self): return self.dhcp_option_list
- def set_dhcp_option_list(self, dhcp_option_list): self.dhcp_option_list = dhcp_option_list
- def get_host_routes(self): return self.host_routes
- def set_host_routes(self, host_routes): self.host_routes = host_routes
- def get_subnet_name(self): return self.subnet_name
- def set_subnet_name(self, subnet_name): self.subnet_name = subnet_name
- def __eq__(self, other):
- if isinstance(other, self.__class__):
- return (self.subnet == other.subnet and
- self.default_gateway == other.default_gateway and
- self.dns_server_address == other.dns_server_address and
- self.subnet_uuid == other.subnet_uuid and
- self.enable_dhcp == other.enable_dhcp and
- self.dns_nameservers == other.dns_nameservers and
- self.allocation_pools == other.allocation_pools and
- self.addr_from_start == other.addr_from_start and
- self.dhcp_option_list == other.dhcp_option_list and
- self.host_routes == other.host_routes and
- self.subnet_name == other.subnet_name)
- return NotImplemented
- def __ne__(self, other):
- if isinstance(other, self.__class__):
- return not self.__eq__(other)
- return NotImplemented
-
- @classmethod
- def populate (cls, *a, **kwa):
- obj = cls (*a, **kwa)
- obj.set_subnet (SubnetType.populate ())
- obj.set_default_gateway (obj.populate_string ("default_gateway"))
- obj.set_dns_server_address (obj.populate_string ("dns_server_address"))
- obj.set_subnet_uuid (obj.populate_string ("subnet_uuid"))
- obj.set_enable_dhcp (obj.populate_boolean ("enable_dhcp"))
- obj.set_dns_nameservers ([obj.populate_string ("dns_nameservers")])
- obj.set_allocation_pools ([AllocationPoolType.populate ()])
- obj.set_addr_from_start (obj.populate_boolean ("addr_from_start"))
- obj.set_dhcp_option_list (DhcpOptionsListType.populate ())
- obj.set_host_routes (RouteTableType.populate ())
- obj.set_subnet_name (obj.populate_string ("subnet_name"))
- return obj
- def export(self, outfile, level=1, namespace_='', name_='IpamSubnetType', namespacedef_='', pretty_print=True):
- if pretty_print:
- eol_ = '\n'
- else:
- eol_ = ''
- showIndent(outfile, level, pretty_print)
- outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
- already_processed = []
- self.exportAttributes(outfile, level, already_processed, namespace_, name_='IpamSubnetType')
- if self.hasContent_():
- outfile.write('>%s' % (eol_, ))
- self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print)
- showIndent(outfile, level, pretty_print)
- outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
- else:
- outfile.write('/>%s' % (eol_, ))
- def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='IpamSubnetType'):
- pass
- def exportChildren(self, outfile, level, namespace_='', name_='IpamSubnetType', fromsubclass_=False, pretty_print=True):
- if pretty_print:
- eol_ = '\n'
- else:
- eol_ = ''
- if self.subnet is not None:
- self.subnet.export(outfile, level, namespace_, name_='subnet', pretty_print=pretty_print)
- if self.default_gateway is not None:
- showIndent(outfile, level, pretty_print)
- outfile.write('<%sdefault-gateway>%s</%sdefault-gateway>%s' % (namespace_, self.gds_format_string(quote_xml(self.default_gateway).encode(ExternalEncoding), input_name='default-gateway'), namespace_, eol_))
- if self.dns_server_address is not None:
- showIndent(outfile, level, pretty_print)
- outfile.write('<%sdns-server-address>%s</%sdns-server-address>%s' % (namespace_, self.gds_format_string(quote_xml(self.dns_server_address).encode(ExternalEncoding), input_name='dns-server-address'), namespace_, eol_))
- if self.subnet_uuid is not None:
- showIndent(outfile, level, pretty_print)
- outfile.write('<%ssubnet-uuid>%s</%ssubnet-uuid>%s' % (namespace_, self.gds_format_string(quote_xml(self.subnet_uuid).encode(ExternalEncoding), input_name='subnet-uuid'), namespace_, eol_))
- if self.enable_dhcp is not None:
- showIndent(outfile, level, pretty_print)
- outfile.write('<%senable-dhcp>%s</%senable-dhcp>%s' % (namespace_, self.gds_format_boolean(self.gds_str_lower(str(self.enable_dhcp)), input_name='enable-dhcp'), namespace_, eol_))
- for dns_nameservers_ in self.dns_nameservers:
- showIndent(outfile, level, pretty_print)
- outfile.write('<%sdns-nameservers>%s</%sdns-nameservers>%s' % (namespace_, self.gds_format_string(quote_xml(dns_nameservers_).encode(ExternalEncoding), input_name='dns-nameservers'), namespace_, eol_))
- for allocation_pools_ in self.allocation_pools:
- if isinstance(allocation_pools_, dict):
- allocation_pools_ = AllocationPoolType(**allocation_pools_)
- allocation_pools_.export(outfile, level, namespace_, name_='allocation-pools', pretty_print=pretty_print)
- if self.addr_from_start is not None:
- showIndent(outfile, level, pretty_print)
- outfile.write('<%saddr_from_start>%s</%saddr_from_start>%s' % (namespace_, self.gds_format_boolean(self.gds_str_lower(str(self.addr_from_start)), input_name='addr_from_start'), namespace_, eol_))
- if self.dhcp_option_list is not None:
- self.dhcp_option_list.export(outfile, level, namespace_, name_='dhcp-option-list', pretty_print=pretty_print)
- if self.host_routes is not None:
- self.host_routes.export(outfile, level, namespace_, name_='host-routes', pretty_print=pretty_print)
- if self.subnet_name is not None:
- showIndent(outfile, level, pretty_print)
- outfile.write('<%ssubnet-name>%s</%ssubnet-name>%s' % (namespace_, self.gds_format_string(quote_xml(self.subnet_name).encode(ExternalEncoding), input_name='subnet-name'), namespace_, eol_))
- def hasContent_(self):
- if (
- self.subnet is not None or
- self.default_gateway is not None or
- self.dns_server_address is not None or
- self.subnet_uuid is not None or
- self.enable_dhcp is not None or
- self.dns_nameservers or
- self.allocation_pools or
- self.addr_from_start is not None or
- self.dhcp_option_list is not None or
- self.host_routes is not None or
- self.subnet_name is not None
- ):
- return True
- else:
- return False
- def exportLiteral(self, outfile, level, name_='IpamSubnetType'):
- level += 1
- self.exportLiteralAttributes(outfile, level, [], name_)
- if self.hasContent_():
- self.exportLiteralChildren(outfile, level, name_)
- def exportLiteralAttributes(self, outfile, level, already_processed, name_):
- pass
- def exportLiteralChildren(self, outfile, level, name_):
- if self.subnet is not None:
- showIndent(outfile, level)
- outfile.write('subnet=model_.SubnetType(\n')
- self.subnet.exportLiteral(outfile, level, name_='subnet')
- showIndent(outfile, level)
- outfile.write('),\n')
- if self.default_gateway is not None:
- showIndent(outfile, level)
- outfile.write('default_gateway=%s,\n' % quote_python(self.default_gateway).encode(ExternalEncoding))
- if self.dns_server_address is not None:
- showIndent(outfile, level)
- outfile.write('dns_server_address=%s,\n' % quote_python(self.dns_server_address).encode(ExternalEncoding))
- if self.subnet_uuid is not None:
- showIndent(outfile, level)
- outfile.write('subnet_uuid=%s,\n' % quote_python(self.subnet_uuid).encode(ExternalEncoding))
- if self.enable_dhcp is not None:
- showIndent(outfile, level)
- outfile.write('enable_dhcp=%s,\n' % self.enable_dhcp)
- showIndent(outfile, level)
- outfile.write('dns_nameservers=[\n')
- level += 1
- for dns_nameservers_ in self.dns_nameservers:
- showIndent(outfile, level)
- outfile.write('%s,\n' % quote_python(dns_nameservers_).encode(ExternalEncoding))
- level -= 1
- showIndent(outfile, level)
- outfile.write('],\n')
- showIndent(outfile, level)
- outfile.write('allocation_pools=[\n')
- level += 1
- for allocation_pools_ in self.allocation_pools:
- showIndent(outfile, level)
- outfile.write('model_.AllocationPoolType(\n')
- allocation_pools_.exportLiteral(outfile, level, name_='AllocationPoolType')
- showIndent(outfile, level)
- outfile.write('),\n')
- level -= 1
- showIndent(outfile, level)
- outfile.write('],\n')
- if self.addr_from_start is not None:
- showIndent(outfile, level)
- outfile.write('addr_from_start=%s,\n' % self.addr_from_start)
- if self.dhcp_option_list is not None:
- showIndent(outfile, level)
- outfile.write('dhcp_option_list=model_.DhcpOptionsListType(\n')
- self.dhcp_option_list.exportLiteral(outfile, level, name_='dhcp_option_list')
- showIndent(outfile, level)
- outfile.write('),\n')
- if self.host_routes is not None:
- showIndent(outfile, level)
- outfile.write('host_routes=model_.RouteTableType(\n')
- self.host_routes.exportLiteral(outfile, level, name_='host_routes')
- showIndent(outfile, level)
- outfile.write('),\n')
- if self.subnet_name is not None:
- showIndent(outfile, level)
- outfile.write('subnet_name=%s,\n' % quote_python(self.subnet_name).encode(ExternalEncoding))
- def exportDict(self, name_='IpamSubnetType'):
- obj_json = json.dumps(self, default=lambda o: dict((k, v) for k, v in o.__dict__.iteritems()))
- obj_dict = json.loads(obj_json)
- return {name_: obj_dict}
- def build(self, node):
- self.buildAttributes(node, node.attrib, [])
- for child in node:
- nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
- self.buildChildren(child, node, nodeName_)
- def buildAttributes(self, node, attrs, already_processed):
- pass
- def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
- if nodeName_ == 'subnet':
- obj_ = SubnetType.factory()
- obj_.build(child_)
- self.set_subnet(obj_)
- elif nodeName_ == 'default-gateway':
- default_gateway_ = child_.text
- default_gateway_ = self.gds_validate_string(default_gateway_, node, 'default_gateway')
- self.default_gateway = default_gateway_
- self.validate_IpAddressType(self.default_gateway) # validate type IpAddressType
- elif nodeName_ == 'dns-server-address':
- dns_server_address_ = child_.text
- dns_server_address_ = self.gds_validate_string(dns_server_address_, node, 'dns_server_address')
- self.dns_server_address = dns_server_address_
- self.validate_IpAddressType(self.dns_server_address) # validate type IpAddressType
- elif nodeName_ == 'subnet-uuid':
- subnet_uuid_ = child_.text
- subnet_uuid_ = self.gds_validate_string(subnet_uuid_, node, 'subnet_uuid')
- self.subnet_uuid = subnet_uuid_
- elif nodeName_ == 'enable-dhcp':
- sval_ = child_.text
- if sval_ in ('true', '1'):
- ival_ = True
- elif sval_ in ('false', '0'):
- ival_ = False
- else:
- raise_parse_error(child_, 'requires boolean')
- ival_ = self.gds_validate_boolean(ival_, node, 'enable_dhcp')
- self.enable_dhcp = ival_
- elif nodeName_ == 'dns-nameservers':
- dns_nameservers_ = child_.text
- dns_nameservers_ = self.gds_validate_string(dns_nameservers_, node, 'dns_nameservers')
- self.dns_nameservers.append(dns_nameservers_)
- elif nodeName_ == 'allocation-pools':
- obj_ = AllocationPoolType.factory()
- obj_.build(child_)
- self.allocation_pools.append(obj_)
- elif nodeName_ == 'addr_from_start':
- sval_ = child_.text
- if sval_ in ('true', '1'):
- ival_ = True
- elif sval_ in ('false', '0'):
- ival_ = False
- else:
- raise_parse_error(child_, 'requires boolean')
- ival_ = self.gds_validate_boolean(ival_, node, 'addr_from_start')
- self.addr_from_start = ival_
- elif nodeName_ == 'dhcp-option-list':
- obj_ = DhcpOptionsListType.factory()
- obj_.build(child_)
- self.set_dhcp_option_list(obj_)
- elif nodeName_ == 'host-routes':
- obj_ = RouteTableType.factory()
- obj_.build(child_)
- self.set_host_routes(obj_)
- elif nodeName_ == 'subnet-name':
- subnet_name_ = child_.text
- subnet_name_ = self.gds_validate_string(subnet_name_, node, 'subnet_name')
- self.subnet_name = subnet_name_
-# end class IpamSubnetType
-
-
-class VnSubnetsType(GeneratedsSuper):
- """
- VnSubnetsType class definition from :doc:`vnc_cfg.xsd`
- """
- subclass = None
- superclass = None
- def __init__(self, ipam_subnets=None, host_routes=None, **kwargs):
- if (ipam_subnets is None) or (ipam_subnets == []):
- self.ipam_subnets = []
- else:
- if isinstance(ipam_subnets[0], dict):
- objs = [IpamSubnetType(**elem) for elem in ipam_subnets]
- self.ipam_subnets = objs
- else:
- self.ipam_subnets = ipam_subnets
- if isinstance(host_routes, dict):
- obj = RouteTableType(**host_routes)
- self.host_routes = obj
- else:
- self.host_routes = host_routes
- def factory(*args_, **kwargs_):
- if VnSubnetsType.subclass:
- return VnSubnetsType.subclass(*args_, **kwargs_)
- else:
- return VnSubnetsType(*args_, **kwargs_)
- factory = staticmethod(factory)
- def get_ipam_subnets(self): return self.ipam_subnets
- def set_ipam_subnets(self, ipam_subnets): self.ipam_subnets = ipam_subnets
- def add_ipam_subnets(self, value): self.ipam_subnets.append(value)
- def insert_ipam_subnets(self, index, value): self.ipam_subnets[index] = value
- def delete_ipam_subnets(self, value): self.ipam_subnets.remove(value)
- def get_host_routes(self): return self.host_routes
- def set_host_routes(self, host_routes): self.host_routes = host_routes
- def __eq__(self, other):
- if isinstance(other, self.__class__):
- return (self.ipam_subnets == other.ipam_subnets and
- self.host_routes == other.host_routes)
- return NotImplemented
- def __ne__(self, other):
- if isinstance(other, self.__class__):
- return not self.__eq__(other)
- return NotImplemented
-
- @classmethod
- def populate (cls, *a, **kwa):
- obj = cls (*a, **kwa)
- obj.set_ipam_subnets ([IpamSubnetType.populate ()])
- obj.set_host_routes (RouteTableType.populate ())
- return obj
- def export(self, outfile, level=1, namespace_='', name_='VnSubnetsType', namespacedef_='', pretty_print=True):
- if pretty_print:
- eol_ = '\n'
- else:
- eol_ = ''
- showIndent(outfile, level, pretty_print)
- outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
- already_processed = []
- self.exportAttributes(outfile, level, already_processed, namespace_, name_='VnSubnetsType')
- if self.hasContent_():
- outfile.write('>%s' % (eol_, ))
- self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print)
- showIndent(outfile, level, pretty_print)
- outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
- else:
- outfile.write('/>%s' % (eol_, ))
- def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='VnSubnetsType'):
- pass
- def exportChildren(self, outfile, level, namespace_='', name_='VnSubnetsType', fromsubclass_=False, pretty_print=True):
- if pretty_print:
- eol_ = '\n'
- else:
- eol_ = ''
- for ipam_subnets_ in self.ipam_subnets:
- if isinstance(ipam_subnets_, dict):
- ipam_subnets_ = IpamSubnetType(**ipam_subnets_)
- ipam_subnets_.export(outfile, level, namespace_, name_='ipam-subnets', pretty_print=pretty_print)
- if self.host_routes is not None:
- self.host_routes.export(outfile, level, namespace_, name_='host-routes', pretty_print=pretty_print)
- def hasContent_(self):
- if (
- self.ipam_subnets or
- self.host_routes is not None
- ):
- return True
- else:
- return False
- def exportLiteral(self, outfile, level, name_='VnSubnetsType'):
- level += 1
- self.exportLiteralAttributes(outfile, level, [], name_)
- if self.hasContent_():
- self.exportLiteralChildren(outfile, level, name_)
- def exportLiteralAttributes(self, outfile, level, already_processed, name_):
- pass
- def exportLiteralChildren(self, outfile, level, name_):
- showIndent(outfile, level)
- outfile.write('ipam_subnets=[\n')
- level += 1
- for ipam_subnets_ in self.ipam_subnets:
- showIndent(outfile, level)
- outfile.write('model_.IpamSubnetType(\n')
- ipam_subnets_.exportLiteral(outfile, level, name_='IpamSubnetType')
- showIndent(outfile, level)
- outfile.write('),\n')
- level -= 1
- showIndent(outfile, level)
- outfile.write('],\n')
- if self.host_routes is not None:
- showIndent(outfile, level)
- outfile.write('host_routes=model_.RouteTableType(\n')
- self.host_routes.exportLiteral(outfile, level, name_='host_routes')
- showIndent(outfile, level)
- outfile.write('),\n')
- def exportDict(self, name_='VnSubnetsType'):
- obj_json = json.dumps(self, default=lambda o: dict((k, v) for k, v in o.__dict__.iteritems()))
- obj_dict = json.loads(obj_json)
- return {name_: obj_dict}
- def build(self, node):
- self.buildAttributes(node, node.attrib, [])
- for child in node:
- nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
- self.buildChildren(child, node, nodeName_)
- def buildAttributes(self, node, attrs, already_processed):
- pass
- def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
- if nodeName_ == 'ipam-subnets':
- obj_ = IpamSubnetType.factory()
- obj_.build(child_)
- self.ipam_subnets.append(obj_)
- elif nodeName_ == 'host-routes':
- obj_ = RouteTableType.factory()
- obj_.build(child_)
- self.set_host_routes(obj_)
-# end class VnSubnetsType
-
-
-class DomainLimitsType(GeneratedsSuper):
- """
- DomainLimitsType class definition from :doc:`vnc_cfg.xsd`
- """
- subclass = None
- superclass = None
- def __init__(self, project_limit=None, virtual_network_limit=None, security_group_limit=None, **kwargs):
- self.project_limit = project_limit
- self.virtual_network_limit = virtual_network_limit
- self.security_group_limit = security_group_limit
- def factory(*args_, **kwargs_):
- if DomainLimitsType.subclass:
- return DomainLimitsType.subclass(*args_, **kwargs_)
- else:
- return DomainLimitsType(*args_, **kwargs_)
- factory = staticmethod(factory)
- def get_project_limit(self): return self.project_limit
- def set_project_limit(self, project_limit): self.project_limit = project_limit
- def get_virtual_network_limit(self): return self.virtual_network_limit
- def set_virtual_network_limit(self, virtual_network_limit): self.virtual_network_limit = virtual_network_limit
- def get_security_group_limit(self): return self.security_group_limit
- def set_security_group_limit(self, security_group_limit): self.security_group_limit = security_group_limit
- def __eq__(self, other):
- if isinstance(other, self.__class__):
- return (self.project_limit == other.project_limit and
- self.virtual_network_limit == other.virtual_network_limit and
- self.security_group_limit == other.security_group_limit)
- return NotImplemented
- def __ne__(self, other):
- if isinstance(other, self.__class__):
- return not self.__eq__(other)
- return NotImplemented
-
- @classmethod
- def populate (cls, *a, **kwa):
- obj = cls (*a, **kwa)
- obj.set_project_limit (obj.populate_integer ("project_limit"))
- obj.set_virtual_network_limit (obj.populate_integer ("virtual_network_limit"))
- obj.set_security_group_limit (obj.populate_integer ("security_group_limit"))
- return obj
- def export(self, outfile, level=1, namespace_='', name_='DomainLimitsType', namespacedef_='', pretty_print=True):
- if pretty_print:
- eol_ = '\n'
- else:
- eol_ = ''
- showIndent(outfile, level, pretty_print)
- outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
- already_processed = []
- self.exportAttributes(outfile, level, already_processed, namespace_, name_='DomainLimitsType')
- if self.hasContent_():
- outfile.write('>%s' % (eol_, ))
- self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print)
- showIndent(outfile, level, pretty_print)
- outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
- else:
- outfile.write('/>%s' % (eol_, ))
- def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='DomainLimitsType'):
- pass
- def exportChildren(self, outfile, level, namespace_='', name_='DomainLimitsType', fromsubclass_=False, pretty_print=True):
- if pretty_print:
- eol_ = '\n'
- else:
- eol_ = ''
- if self.project_limit is not None:
- showIndent(outfile, level, pretty_print)
- outfile.write('<%sproject-limit>%s</%sproject-limit>%s' % (namespace_, self.gds_format_integer(self.project_limit, input_name='project-limit'), namespace_, eol_))
- if self.virtual_network_limit is not None:
- showIndent(outfile, level, pretty_print)
- outfile.write('<%svirtual-network-limit>%s</%svirtual-network-limit>%s' % (namespace_, self.gds_format_integer(self.virtual_network_limit, input_name='virtual-network-limit'), namespace_, eol_))
- if self.security_group_limit is not None:
- showIndent(outfile, level, pretty_print)
- outfile.write('<%ssecurity-group-limit>%s</%ssecurity-group-limit>%s' % (namespace_, self.gds_format_integer(self.security_group_limit, input_name='security-group-limit'), namespace_, eol_))
- def hasContent_(self):
- if (
- self.project_limit is not None or
- self.virtual_network_limit is not None or
- self.security_group_limit is not None
- ):
- return True
- else:
- return False
- def exportLiteral(self, outfile, level, name_='DomainLimitsType'):
- level += 1
- self.exportLiteralAttributes(outfile, level, [], name_)
- if self.hasContent_():
- self.exportLiteralChildren(outfile, level, name_)
- def exportLiteralAttributes(self, outfile, level, already_processed, name_):
- pass
- def exportLiteralChildren(self, outfile, level, name_):
- if self.project_limit is not None:
- showIndent(outfile, level)
- outfile.write('project_limit=%d,\n' % self.project_limit)
- if self.virtual_network_limit is not None:
- showIndent(outfile, level)
- outfile.write('virtual_network_limit=%d,\n' % self.virtual_network_limit)
- if self.security_group_limit is not None:
- showIndent(outfile, level)
- outfile.write('security_group_limit=%d,\n' % self.security_group_limit)
- def exportDict(self, name_='DomainLimitsType'):
- obj_json = json.dumps(self, default=lambda o: dict((k, v) for k, v in o.__dict__.iteritems()))
- obj_dict = json.loads(obj_json)
- return {name_: obj_dict}
- def build(self, node):
- self.buildAttributes(node, node.attrib, [])
- for child in node:
- nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
- self.buildChildren(child, node, nodeName_)
- def buildAttributes(self, node, attrs, already_processed):
- pass
- def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
- if nodeName_ == 'project-limit':
- sval_ = child_.text
- try:
- ival_ = int(sval_)
- except (TypeError, ValueError), exp:
- raise_parse_error(child_, 'requires integer: %s' % exp)
- ival_ = self.gds_validate_integer(ival_, node, 'project_limit')
- self.project_limit = ival_
- elif nodeName_ == 'virtual-network-limit':
- sval_ = child_.text
- try:
- ival_ = int(sval_)
- except (TypeError, ValueError), exp:
- raise_parse_error(child_, 'requires integer: %s' % exp)
- ival_ = self.gds_validate_integer(ival_, node, 'virtual_network_limit')
- self.virtual_network_limit = ival_
- elif nodeName_ == 'security-group-limit':
- sval_ = child_.text
- try:
- ival_ = int(sval_)
- except (TypeError, ValueError), exp:
- raise_parse_error(child_, 'requires integer: %s' % exp)
- ival_ = self.gds_validate_integer(ival_, node, 'security_group_limit')
- self.security_group_limit = ival_
-# end class DomainLimitsType
-
-
-class PermType(GeneratedsSuper):
- """
- PermType class definition from :doc:`vnc_cfg.xsd`
- """
- subclass = None
- superclass = None
- def __init__(self, owner=None, owner_access=None, group=None, group_access=None, other_access=None, **kwargs):
- self.owner = owner
- self.owner_access = owner_access
- self.group = group
- self.group_access = group_access
- self.other_access = other_access
- def factory(*args_, **kwargs_):
- if PermType.subclass:
- return PermType.subclass(*args_, **kwargs_)
- else:
- return PermType(*args_, **kwargs_)
- factory = staticmethod(factory)
- def get_owner(self): return self.owner
- def set_owner(self, owner): self.owner = owner
- def get_owner_access(self): return self.owner_access
- def set_owner_access(self, owner_access): self.owner_access = owner_access
- def validate_AccessType(self, value):
- # Validate type AccessType, a restriction on xsd:integer.
- error = False
- if isinstance(value, list):
- v_int = map(int, value)
- v1, v2 = min(v_int), max(v_int)
- else:
- v1, v2 = int(value), int(value)
- error = (0 > v1)
- error |= (v2 > 7)
- if error:
- raise ValueError("AccessType must be in the range 0-7")
- def get_group(self): return self.group
- def set_group(self, group): self.group = group
- def get_group_access(self): return self.group_access
- def set_group_access(self, group_access): self.group_access = group_access
- def get_other_access(self): return self.other_access
- def set_other_access(self, other_access): self.other_access = other_access
- def __eq__(self, other):
- if isinstance(other, self.__class__):
- return (self.owner == other.owner and
- self.owner_access == other.owner_access and
- self.group == other.group and
- self.group_access == other.group_access and
- self.other_access == other.other_access)
- return NotImplemented
- def __ne__(self, other):
- if isinstance(other, self.__class__):
- return not self.__eq__(other)
- return NotImplemented
-
- @classmethod
- def populate (cls, *a, **kwa):
- obj = cls (*a, **kwa)
- obj.set_owner (obj.populate_string ("owner"))
- obj.set_owner_access (obj.populate_integer ("owner_access"))
- obj.set_group (obj.populate_string ("group"))
- obj.set_group_access (obj.populate_integer ("group_access"))
- obj.set_other_access (obj.populate_integer ("other_access"))
- return obj
- def export(self, outfile, level=1, namespace_='', name_='PermType', namespacedef_='', pretty_print=True):
- if pretty_print:
- eol_ = '\n'
- else:
- eol_ = ''
- showIndent(outfile, level, pretty_print)
- outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
- already_processed = []
- self.exportAttributes(outfile, level, already_processed, namespace_, name_='PermType')
- if self.hasContent_():
- outfile.write('>%s' % (eol_, ))
- self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print)
- showIndent(outfile, level, pretty_print)
- outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
- else:
- outfile.write('/>%s' % (eol_, ))
- def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='PermType'):
- pass
- def exportChildren(self, outfile, level, namespace_='', name_='PermType', fromsubclass_=False, pretty_print=True):
- if pretty_print:
- eol_ = '\n'
- else:
- eol_ = ''
- if self.owner is not None:
- showIndent(outfile, level, pretty_print)
- outfile.write('<%sowner>%s</%sowner>%s' % (namespace_, self.gds_format_string(quote_xml(self.owner).encode(ExternalEncoding), input_name='owner'), namespace_, eol_))
- if self.owner_access is not None:
- showIndent(outfile, level, pretty_print)
- outfile.write('<%sowner-access>%s</%sowner-access>%s' % (namespace_, self.gds_format_integer(self.owner_access, input_name='owner-access'), namespace_, eol_))
- if self.group is not None:
- showIndent(outfile, level, pretty_print)
- outfile.write('<%sgroup>%s</%sgroup>%s' % (namespace_, self.gds_format_string(quote_xml(self.group).encode(ExternalEncoding), input_name='group'), namespace_, eol_))
- if self.group_access is not None:
- showIndent(outfile, level, pretty_print)
- outfile.write('<%sgroup-access>%s</%sgroup-access>%s' % (namespace_, self.gds_format_integer(self.group_access, input_name='group-access'), namespace_, eol_))
- if self.other_access is not None:
- showIndent(outfile, level, pretty_print)
- outfile.write('<%sother-access>%s</%sother-access>%s' % (namespace_, self.gds_format_integer(self.other_access, input_name='other-access'), namespace_, eol_))
- def hasContent_(self):
- if (
- self.owner is not None or
- self.owner_access is not None or
- self.group is not None or
- self.group_access is not None or
- self.other_access is not None
- ):
- return True
- else:
- return False
- def exportLiteral(self, outfile, level, name_='PermType'):
- level += 1
- self.exportLiteralAttributes(outfile, level, [], name_)
- if self.hasContent_():
- self.exportLiteralChildren(outfile, level, name_)
- def exportLiteralAttributes(self, outfile, level, already_processed, name_):
- pass
- def exportLiteralChildren(self, outfile, level, name_):
- if self.owner is not None:
- showIndent(outfile, level)
- outfile.write('owner=%s,\n' % quote_python(self.owner).encode(ExternalEncoding))
- if self.owner_access is not None:
- showIndent(outfile, level)
- outfile.write('owner_access=%d,\n' % self.owner_access)
- if self.group is not None:
- showIndent(outfile, level)
- outfile.write('group=%s,\n' % quote_python(self.group).encode(ExternalEncoding))
- if self.group_access is not None:
- showIndent(outfile, level)
- outfile.write('group_access=%d,\n' % self.group_access)
- if self.other_access is not None:
- showIndent(outfile, level)
- outfile.write('other_access=%d,\n' % self.other_access)
- def exportDict(self, name_='PermType'):
- obj_json = json.dumps(self, default=lambda o: dict((k, v) for k, v in o.__dict__.iteritems()))
- obj_dict = json.loads(obj_json)
- return {name_: obj_dict}
- def build(self, node):
- self.buildAttributes(node, node.attrib, [])
- for child in node:
- nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
- self.buildChildren(child, node, nodeName_)
- def buildAttributes(self, node, attrs, already_processed):
- pass
- def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
- if nodeName_ == 'owner':
- owner_ = child_.text
- owner_ = self.gds_validate_string(owner_, node, 'owner')
- self.owner = owner_
- elif nodeName_ == 'owner-access':
- sval_ = child_.text
- try:
- ival_ = int(sval_)
- except (TypeError, ValueError), exp:
- raise_parse_error(child_, 'requires integer: %s' % exp)
- ival_ = self.gds_validate_integer(ival_, node, 'owner_access')
- self.owner_access = ival_
- self.validate_AccessType(self.owner_access) # validate type AccessType
- elif nodeName_ == 'group':
- group_ = child_.text
- group_ = self.gds_validate_string(group_, node, 'group')
- self.group = group_
- elif nodeName_ == 'group-access':
- sval_ = child_.text
- try:
- ival_ = int(sval_)
- except (TypeError, ValueError), exp:
- raise_parse_error(child_, 'requires integer: %s' % exp)
- ival_ = self.gds_validate_integer(ival_, node, 'group_access')
- self.group_access = ival_
- self.validate_AccessType(self.group_access) # validate type AccessType
- elif nodeName_ == 'other-access':
- sval_ = child_.text
- try:
- ival_ = int(sval_)
- except (TypeError, ValueError), exp:
- raise_parse_error(child_, 'requires integer: %s' % exp)
- ival_ = self.gds_validate_integer(ival_, node, 'other_access')
- self.other_access = ival_
- self.validate_AccessType(self.other_access) # validate type AccessType
-# end class PermType
-
-
-class IdPermsType(GeneratedsSuper):
- """
- IdPermsType class definition from :doc:`vnc_cfg.xsd`
- """
- subclass = None
- superclass = None
- def __init__(self, permissions=None, uuid=None, enable=None, created=None, last_modified=None, description=None, user_visible=True, creator=None, **kwargs):
- if isinstance(permissions, dict):
- obj = PermType(**permissions)
- self.permissions = obj
- else:
- self.permissions = permissions
- if isinstance(uuid, dict):
- obj = UuidType(**uuid)
- self.uuid = obj
- else:
- self.uuid = uuid
- self.enable = enable
- self.created = created
- self.last_modified = last_modified
- self.description = description
- self.user_visible = user_visible
- self.creator = creator
- def factory(*args_, **kwargs_):
- if IdPermsType.subclass:
- return IdPermsType.subclass(*args_, **kwargs_)
- else:
- return IdPermsType(*args_, **kwargs_)
- factory = staticmethod(factory)
- def get_permissions(self): return self.permissions
- def set_permissions(self, permissions): self.permissions = permissions
- def get_uuid(self): return self.uuid
- def set_uuid(self, uuid): self.uuid = uuid
- def get_enable(self): return self.enable
- def set_enable(self, enable): self.enable = enable
- def get_created(self): return self.created
- def set_created(self, created): self.created = created
- def get_last_modified(self): return self.last_modified
- def set_last_modified(self, last_modified): self.last_modified = last_modified
- def get_description(self): return self.description
- def set_description(self, description): self.description = description
- def get_user_visible(self): return self.user_visible
- def set_user_visible(self, user_visible): self.user_visible = user_visible
- def get_creator(self): return self.creator
- def set_creator(self, creator): self.creator = creator
- def validate_CreatorType(self, value):
- # Validate type CreatorType, a restriction on xsd:string.
- error = False
- if isinstance(value, list):
- error = set(value) - set([u'vcenter-plugin', u'test'])
- else:
- error = value not in [u'vcenter-plugin', u'test']
- if error:
- raise ValueError("CreatorType must be one of [u'vcenter-plugin', u'test']")
- def __eq__(self, other):
- if isinstance(other, self.__class__):
- return (self.permissions == other.permissions and
- self.uuid == other.uuid and
- self.enable == other.enable and
- self.created == other.created and
- self.last_modified == other.last_modified and
- self.description == other.description and
- self.user_visible == other.user_visible and
- self.creator == other.creator)
- return NotImplemented
- def __ne__(self, other):
- if isinstance(other, self.__class__):
- return not self.__eq__(other)
- return NotImplemented
-
- @classmethod
- def populate (cls, *a, **kwa):
- obj = cls (*a, **kwa)
- obj.set_permissions (PermType.populate ())
- obj.set_uuid (UuidType.populate ())
- obj.set_enable (obj.populate_boolean ("enable"))
- obj.set_created (obj.populate_dateTime ("created"))
- obj.set_last_modified (obj.populate_dateTime ("last_modified"))
- obj.set_description (obj.populate_string ("description"))
- obj.set_user_visible (obj.populate_boolean ("user_visible"))
- obj.set_creator (obj.populate_string ("creator"))
- return obj
- def export(self, outfile, level=1, namespace_='', name_='IdPermsType', namespacedef_='', pretty_print=True):
- if pretty_print:
- eol_ = '\n'
- else:
- eol_ = ''
- showIndent(outfile, level, pretty_print)
- outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
- already_processed = []
- self.exportAttributes(outfile, level, already_processed, namespace_, name_='IdPermsType')
- if self.hasContent_():
- outfile.write('>%s' % (eol_, ))
- self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print)
- showIndent(outfile, level, pretty_print)
- outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
- else:
- outfile.write('/>%s' % (eol_, ))
- def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='IdPermsType'):
- pass
- def exportChildren(self, outfile, level, namespace_='', name_='IdPermsType', fromsubclass_=False, pretty_print=True):
- if pretty_print:
- eol_ = '\n'
- else:
- eol_ = ''
- if self.permissions is not None:
- self.permissions.export(outfile, level, namespace_, name_='permissions', pretty_print=pretty_print)
- if self.uuid is not None:
- self.uuid.export(outfile, level, namespace_, name_='uuid', pretty_print=pretty_print)
- if self.enable is not None:
- showIndent(outfile, level, pretty_print)
- outfile.write('<%senable>%s</%senable>%s' % (namespace_, self.gds_format_boolean(self.gds_str_lower(str(self.enable)), input_name='enable'), namespace_, eol_))
- if self.created is not None:
- showIndent(outfile, level, pretty_print)
- outfile.write('<%screated>%s</%screated>%s' % (namespace_, self.gds_format_string(quote_xml(self.created).encode(ExternalEncoding), input_name='created'), namespace_, eol_))
- if self.last_modified is not None:
- showIndent(outfile, level, pretty_print)
- outfile.write('<%slast-modified>%s</%slast-modified>%s' % (namespace_, self.gds_format_string(quote_xml(self.last_modified).encode(ExternalEncoding), input_name='last-modified'), namespace_, eol_))
- if self.description is not None:
- showIndent(outfile, level, pretty_print)
- outfile.write('<%sdescription>%s</%sdescription>%s' % (namespace_, self.gds_format_string(quote_xml(self.description).encode(ExternalEncoding), input_name='description'), namespace_, eol_))
- if self.user_visible is not None:
- showIndent(outfile, level, pretty_print)
- outfile.write('<%suser-visible>%s</%suser-visible>%s' % (namespace_, self.gds_format_boolean(self.gds_str_lower(str(self.user_visible)), input_name='user-visible'), namespace_, eol_))
- if self.creator is not None:
- showIndent(outfile, level, pretty_print)
- outfile.write('<%screator>%s</%screator>%s' % (namespace_, self.gds_format_string(quote_xml(self.creator).encode(ExternalEncoding), input_name='creator'), namespace_, eol_))
- def hasContent_(self):
- if (
- self.permissions is not None or
- self.uuid is not None or
- self.enable is not None or
- self.created is not None or
- self.last_modified is not None or
- self.description is not None or
- self.user_visible is not None or
- self.creator is not None
- ):
- return True
- else:
- return False
- def exportLiteral(self, outfile, level, name_='IdPermsType'):
- level += 1
- self.exportLiteralAttributes(outfile, level, [], name_)
- if self.hasContent_():
- self.exportLiteralChildren(outfile, level, name_)
- def exportLiteralAttributes(self, outfile, level, already_processed, name_):
- pass
- def exportLiteralChildren(self, outfile, level, name_):
- if self.permissions is not None:
- showIndent(outfile, level)
- outfile.write('permissions=model_.PermType(\n')
- self.permissions.exportLiteral(outfile, level, name_='permissions')
- showIndent(outfile, level)
- outfile.write('),\n')
- if self.uuid is not None:
- showIndent(outfile, level)
- outfile.write('uuid=model_.UuidType(\n')
- self.uuid.exportLiteral(outfile, level, name_='uuid')
- showIndent(outfile, level)
- outfile.write('),\n')
- if self.enable is not None:
- showIndent(outfile, level)
- outfile.write('enable=%s,\n' % self.enable)
- if self.created is not None:
- showIndent(outfile, level)
- outfile.write('created=%s,\n' % quote_python(self.created).encode(ExternalEncoding))
- if self.last_modified is not None:
- showIndent(outfile, level)
- outfile.write('last_modified=%s,\n' % quote_python(self.last_modified).encode(ExternalEncoding))
- if self.description is not None:
- showIndent(outfile, level)
- outfile.write('description=%s,\n' % quote_python(self.description).encode(ExternalEncoding))
- if self.user_visible is not None:
- showIndent(outfile, level)
- outfile.write('user_visible=%s,\n' % self.user_visible)
- if self.creator is not None:
- showIndent(outfile, level)
- outfile.write('creator=%s,\n' % quote_python(self.creator).encode(ExternalEncoding))
- def exportDict(self, name_='IdPermsType'):
- obj_json = json.dumps(self, default=lambda o: dict((k, v) for k, v in o.__dict__.iteritems()))
- obj_dict = json.loads(obj_json)
- return {name_: obj_dict}
- def build(self, node):
- self.buildAttributes(node, node.attrib, [])
- for child in node:
- nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
- self.buildChildren(child, node, nodeName_)
- def buildAttributes(self, node, attrs, already_processed):
- pass
- def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
- if nodeName_ == 'permissions':
- obj_ = PermType.factory()
- obj_.build(child_)
- self.set_permissions(obj_)
- elif nodeName_ == 'uuid':
- obj_ = UuidType.factory()
- obj_.build(child_)
- self.set_uuid(obj_)
- elif nodeName_ == 'enable':
- sval_ = child_.text
- if sval_ in ('true', '1'):
- ival_ = True
- elif sval_ in ('false', '0'):
- ival_ = False
- else:
- raise_parse_error(child_, 'requires boolean')
- ival_ = self.gds_validate_boolean(ival_, node, 'enable')
- self.enable = ival_
- elif nodeName_ == 'created':
- created_ = child_.text
- created_ = self.gds_validate_string(created_, node, 'created')
- self.created = created_
- elif nodeName_ == 'last-modified':
- last_modified_ = child_.text
- last_modified_ = self.gds_validate_string(last_modified_, node, 'last_modified')
- self.last_modified = last_modified_
- elif nodeName_ == 'description':
- description_ = child_.text
- description_ = self.gds_validate_string(description_, node, 'description')
- self.description = description_
- elif nodeName_ == 'user-visible':
- sval_ = child_.text
- if sval_ in ('true', '1'):
- ival_ = True
- elif sval_ in ('false', '0'):
- ival_ = False
- else:
- raise_parse_error(child_, 'requires boolean')
- ival_ = self.gds_validate_boolean(ival_, node, 'user_visible')
- self.user_visible = ival_
- elif nodeName_ == 'creator':
- creator_ = child_.text
- creator_ = self.gds_validate_string(creator_, node, 'creator')
- self.creator = creator_
- self.validate_CreatorType(self.creator) # validate type CreatorType
-# end class IdPermsType
-
-
-class PluginProperty(GeneratedsSuper):
- """
- PluginProperty class definition from :doc:`vnc_cfg.xsd`
- """
- subclass = None
- superclass = None
- def __init__(self, property=None, value=None, **kwargs):
- self.property = property
- self.value = value
- def factory(*args_, **kwargs_):
- if PluginProperty.subclass:
- return PluginProperty.subclass(*args_, **kwargs_)
- else:
- return PluginProperty(*args_, **kwargs_)
- factory = staticmethod(factory)
- def get_property(self): return self.property
- def set_property(self, property): self.property = property
- def get_value(self): return self.value
- def set_value(self, value): self.value = value
- def __eq__(self, other):
- if isinstance(other, self.__class__):
- return (self.property == other.property and
- self.value == other.value)
- return NotImplemented
- def __ne__(self, other):
- if isinstance(other, self.__class__):
- return not self.__eq__(other)
- return NotImplemented
-
- @classmethod
- def populate (cls, *a, **kwa):
- obj = cls (*a, **kwa)
- obj.set_property (obj.populate_string ("property"))
- obj.set_value (obj.populate_string ("value"))
- return obj
- def export(self, outfile, level=1, namespace_='', name_='PluginProperty', namespacedef_='', pretty_print=True):
- if pretty_print:
- eol_ = '\n'
- else:
- eol_ = ''
- showIndent(outfile, level, pretty_print)
- outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
- already_processed = []
- self.exportAttributes(outfile, level, already_processed, namespace_, name_='PluginProperty')
- if self.hasContent_():
- outfile.write('>%s' % (eol_, ))
- self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print)
- showIndent(outfile, level, pretty_print)
- outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
- else:
- outfile.write('/>%s' % (eol_, ))
- def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='PluginProperty'):
- pass
- def exportChildren(self, outfile, level, namespace_='', name_='PluginProperty', fromsubclass_=False, pretty_print=True):
- if pretty_print:
- eol_ = '\n'
- else:
- eol_ = ''
- if self.property is not None:
- showIndent(outfile, level, pretty_print)
- outfile.write('<%sproperty>%s</%sproperty>%s' % (namespace_, self.gds_format_string(quote_xml(self.property).encode(ExternalEncoding), input_name='property'), namespace_, eol_))
- if self.value is not None:
- showIndent(outfile, level, pretty_print)
- outfile.write('<%svalue>%s</%svalue>%s' % (namespace_, self.gds_format_string(quote_xml(self.value).encode(ExternalEncoding), input_name='value'), namespace_, eol_))
- def hasContent_(self):
- if (
- self.property is not None or
- self.value is not None
- ):
- return True
- else:
- return False
- def exportLiteral(self, outfile, level, name_='PluginProperty'):
- level += 1
- self.exportLiteralAttributes(outfile, level, [], name_)
- if self.hasContent_():
- self.exportLiteralChildren(outfile, level, name_)
- def exportLiteralAttributes(self, outfile, level, already_processed, name_):
- pass
- def exportLiteralChildren(self, outfile, level, name_):
- if self.property is not None:
- showIndent(outfile, level)
- outfile.write('property=%s,\n' % quote_python(self.property).encode(ExternalEncoding))
- if self.value is not None:
- showIndent(outfile, level)
- outfile.write('value=%s,\n' % quote_python(self.value).encode(ExternalEncoding))
- def exportDict(self, name_='PluginProperty'):
- obj_json = json.dumps(self, default=lambda o: dict((k, v) for k, v in o.__dict__.iteritems()))
- obj_dict = json.loads(obj_json)
- return {name_: obj_dict}
- def build(self, node):
- self.buildAttributes(node, node.attrib, [])
- for child in node:
- nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
- self.buildChildren(child, node, nodeName_)
- def buildAttributes(self, node, attrs, already_processed):
- pass
- def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
- if nodeName_ == 'property':
- property_ = child_.text
- property_ = self.gds_validate_string(property_, node, 'property')
- self.property = property_
- elif nodeName_ == 'value':
- value_ = child_.text
- value_ = self.gds_validate_string(value_, node, 'value')
- self.value = value_
-# end class PluginProperty
-
-
-class PluginProperties(GeneratedsSuper):
- """
- PluginProperties class definition from :doc:`vnc_cfg.xsd`
- """
- subclass = None
- superclass = None
- def __init__(self, plugin_property=None, **kwargs):
- if (plugin_property is None) or (plugin_property == []):
- self.plugin_property = []
- else:
- if isinstance(plugin_property[0], dict):
- objs = [PluginProperty(**elem) for elem in plugin_property]
- self.plugin_property = objs
- else:
- self.plugin_property = plugin_property
- def factory(*args_, **kwargs_):
- if PluginProperties.subclass:
- return PluginProperties.subclass(*args_, **kwargs_)
- else:
- return PluginProperties(*args_, **kwargs_)
- factory = staticmethod(factory)
- def get_plugin_property(self): return self.plugin_property
- def set_plugin_property(self, plugin_property): self.plugin_property = plugin_property
- def add_plugin_property(self, value): self.plugin_property.append(value)
- def insert_plugin_property(self, index, value): self.plugin_property[index] = value
- def delete_plugin_property(self, value): self.plugin_property.remove(value)
- def __eq__(self, other):
- if isinstance(other, self.__class__):
- return (self.plugin_property == other.plugin_property)
- return NotImplemented
- def __ne__(self, other):
- if isinstance(other, self.__class__):
- return not self.__eq__(other)
- return NotImplemented
-
- @classmethod
- def populate (cls, *a, **kwa):
- obj = cls (*a, **kwa)
- obj.set_plugin_property ([PluginProperty.populate ()])
- return obj
- def export(self, outfile, level=1, namespace_='', name_='PluginProperties', namespacedef_='', pretty_print=True):
- if pretty_print:
- eol_ = '\n'
- else:
- eol_ = ''
- showIndent(outfile, level, pretty_print)
- outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
- already_processed = []
- self.exportAttributes(outfile, level, already_processed, namespace_, name_='PluginProperties')
- if self.hasContent_():
- outfile.write('>%s' % (eol_, ))
- self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print)
- showIndent(outfile, level, pretty_print)
- outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
- else:
- outfile.write('/>%s' % (eol_, ))
- def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='PluginProperties'):
- pass
- def exportChildren(self, outfile, level, namespace_='', name_='PluginProperties', fromsubclass_=False, pretty_print=True):
- if pretty_print:
- eol_ = '\n'
- else:
- eol_ = ''
- for plugin_property_ in self.plugin_property:
- if isinstance(plugin_property_, dict):
- plugin_property_ = PluginProperty(**plugin_property_)
- plugin_property_.export(outfile, level, namespace_, name_='plugin-property', pretty_print=pretty_print)
- def hasContent_(self):
- if (
- self.plugin_property
- ):
- return True
- else:
- return False
- def exportLiteral(self, outfile, level, name_='PluginProperties'):
- level += 1
- self.exportLiteralAttributes(outfile, level, [], name_)
- if self.hasContent_():
- self.exportLiteralChildren(outfile, level, name_)
- def exportLiteralAttributes(self, outfile, level, already_processed, name_):
- pass
- def exportLiteralChildren(self, outfile, level, name_):
- showIndent(outfile, level)
- outfile.write('plugin_property=[\n')
- level += 1
- for plugin_property_ in self.plugin_property:
- showIndent(outfile, level)
- outfile.write('model_.PluginProperty(\n')
- plugin_property_.exportLiteral(outfile, level, name_='PluginProperty')
- showIndent(outfile, level)
- outfile.write('),\n')
- level -= 1
- showIndent(outfile, level)
- outfile.write('],\n')
- def exportDict(self, name_='PluginProperties'):
- obj_json = json.dumps(self, default=lambda o: dict((k, v) for k, v in o.__dict__.iteritems()))
- obj_dict = json.loads(obj_json)
- return {name_: obj_dict}
- def build(self, node):
- self.buildAttributes(node, node.attrib, [])
- for child in node:
- nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
- self.buildChildren(child, node, nodeName_)
- def buildAttributes(self, node, attrs, already_processed):
- pass
- def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
- if nodeName_ == 'plugin-property':
- obj_ = PluginProperty.factory()
- obj_.build(child_)
- self.plugin_property.append(obj_)
-# end class PluginProperties
-
-
-class QuotaType(GeneratedsSuper):
- """
- QuotaType class definition from :doc:`vnc_cfg.xsd`
- """
- subclass = None
- superclass = None
- def __init__(self, defaults=None, floating_ip=None, instance_ip=None, virtual_machine_interface=None, virtual_network=None, virtual_router=None, virtual_DNS=None, virtual_DNS_record=None, bgp_router=None, network_ipam=None, access_control_list=None, floating_ip_pool=None, service_template=None, service_instance=None, logical_router=None, security_group=None, security_group_rule=None, subnet=None, global_vrouter_config=None, loadbalancer_pool=None, loadbalancer_member=None, loadbalancer_healthmonitor=None, virtual_ip=None, **kwargs):
- self.defaults = defaults
- self.floating_ip = floating_ip
- self.instance_ip = instance_ip
- self.virtual_machine_interface = virtual_machine_interface
- self.virtual_network = virtual_network
- self.virtual_router = virtual_router
- self.virtual_DNS = virtual_DNS
- self.virtual_DNS_record = virtual_DNS_record
- self.bgp_router = bgp_router
- self.network_ipam = network_ipam
- self.access_control_list = access_control_list
- self.floating_ip_pool = floating_ip_pool
- self.service_template = service_template
- self.service_instance = service_instance
- self.logical_router = logical_router
- self.security_group = security_group
- self.security_group_rule = security_group_rule
- self.subnet = subnet
- self.global_vrouter_config = global_vrouter_config
- self.loadbalancer_pool = loadbalancer_pool
- self.loadbalancer_member = loadbalancer_member
- self.loadbalancer_healthmonitor = loadbalancer_healthmonitor
- self.virtual_ip = virtual_ip
- def factory(*args_, **kwargs_):
- if QuotaType.subclass:
- return QuotaType.subclass(*args_, **kwargs_)
- else:
- return QuotaType(*args_, **kwargs_)
- factory = staticmethod(factory)
- def get_defaults(self): return self.defaults
- def set_defaults(self, defaults): self.defaults = defaults
- def get_floating_ip(self): return self.floating_ip
- def set_floating_ip(self, floating_ip): self.floating_ip = floating_ip
- def get_instance_ip(self): return self.instance_ip
- def set_instance_ip(self, instance_ip): self.instance_ip = instance_ip
- def get_virtual_machine_interface(self): return self.virtual_machine_interface
- def set_virtual_machine_interface(self, virtual_machine_interface): self.virtual_machine_interface = virtual_machine_interface
- def get_virtual_network(self): return self.virtual_network
- def set_virtual_network(self, virtual_network): self.virtual_network = virtual_network
- def get_virtual_router(self): return self.virtual_router
- def set_virtual_router(self, virtual_router): self.virtual_router = virtual_router
- def get_virtual_DNS(self): return self.virtual_DNS
- def set_virtual_DNS(self, virtual_DNS): self.virtual_DNS = virtual_DNS
- def get_virtual_DNS_record(self): return self.virtual_DNS_record
- def set_virtual_DNS_record(self, virtual_DNS_record): self.virtual_DNS_record = virtual_DNS_record
- def get_bgp_router(self): return self.bgp_router
- def set_bgp_router(self, bgp_router): self.bgp_router = bgp_router
- def get_network_ipam(self): return self.network_ipam
- def set_network_ipam(self, network_ipam): self.network_ipam = network_ipam
- def get_access_control_list(self): return self.access_control_list
- def set_access_control_list(self, access_control_list): self.access_control_list = access_control_list
- def get_floating_ip_pool(self): return self.floating_ip_pool
- def set_floating_ip_pool(self, floating_ip_pool): self.floating_ip_pool = floating_ip_pool
- def get_service_template(self): return self.service_template
- def set_service_template(self, service_template): self.service_template = service_template
- def get_service_instance(self): return self.service_instance
- def set_service_instance(self, service_instance): self.service_instance = service_instance
- def get_logical_router(self): return self.logical_router
- def set_logical_router(self, logical_router): self.logical_router = logical_router
- def get_security_group(self): return self.security_group
- def set_security_group(self, security_group): self.security_group = security_group
- def get_security_group_rule(self): return self.security_group_rule
- def set_security_group_rule(self, security_group_rule): self.security_group_rule = security_group_rule
- def get_subnet(self): return self.subnet
- def set_subnet(self, subnet): self.subnet = subnet
- def get_global_vrouter_config(self): return self.global_vrouter_config
- def set_global_vrouter_config(self, global_vrouter_config): self.global_vrouter_config = global_vrouter_config
- def get_loadbalancer_pool(self): return self.loadbalancer_pool
- def set_loadbalancer_pool(self, loadbalancer_pool): self.loadbalancer_pool = loadbalancer_pool
- def get_loadbalancer_member(self): return self.loadbalancer_member
- def set_loadbalancer_member(self, loadbalancer_member): self.loadbalancer_member = loadbalancer_member
- def get_loadbalancer_healthmonitor(self): return self.loadbalancer_healthmonitor
- def set_loadbalancer_healthmonitor(self, loadbalancer_healthmonitor): self.loadbalancer_healthmonitor = loadbalancer_healthmonitor
- def get_virtual_ip(self): return self.virtual_ip
- def set_virtual_ip(self, virtual_ip): self.virtual_ip = virtual_ip
- def __eq__(self, other):
- if isinstance(other, self.__class__):
- return (self.defaults == other.defaults and
- self.floating_ip == other.floating_ip and
- self.instance_ip == other.instance_ip and
- self.virtual_machine_interface == other.virtual_machine_interface and
- self.virtual_network == other.virtual_network and
- self.virtual_router == other.virtual_router and
- self.virtual_DNS == other.virtual_DNS and
- self.virtual_DNS_record == other.virtual_DNS_record and
- self.bgp_router == other.bgp_router and
- self.network_ipam == other.network_ipam and
- self.access_control_list == other.access_control_list and
- self.floating_ip_pool == other.floating_ip_pool and
- self.service_template == other.service_template and
- self.service_instance == other.service_instance and
- self.logical_router == other.logical_router and
- self.security_group == other.security_group and
- self.security_group_rule == other.security_group_rule and
- self.subnet == other.subnet and
- self.global_vrouter_config == other.global_vrouter_config and
- self.loadbalancer_pool == other.loadbalancer_pool and
- self.loadbalancer_member == other.loadbalancer_member and
- self.loadbalancer_healthmonitor == other.loadbalancer_healthmonitor and
- self.virtual_ip == other.virtual_ip)
- return NotImplemented
- def __ne__(self, other):
- if isinstance(other, self.__class__):
- return not self.__eq__(other)
- return NotImplemented
-
- @classmethod
- def populate (cls, *a, **kwa):
- obj = cls (*a, **kwa)
- obj.set_defaults (obj.populate_integer ("defaults"))
- obj.set_floating_ip (obj.populate_integer ("floating_ip"))
- obj.set_instance_ip (obj.populate_integer ("instance_ip"))
- obj.set_virtual_machine_interface (obj.populate_integer ("virtual_machine_interface"))
- obj.set_virtual_network (obj.populate_integer ("virtual_network"))
- obj.set_virtual_router (obj.populate_integer ("virtual_router"))
- obj.set_virtual_DNS (obj.populate_integer ("virtual_DNS"))
- obj.set_virtual_DNS_record (obj.populate_integer ("virtual_DNS_record"))
- obj.set_bgp_router (obj.populate_integer ("bgp_router"))
- obj.set_network_ipam (obj.populate_integer ("network_ipam"))
- obj.set_access_control_list (obj.populate_integer ("access_control_list"))
- obj.set_floating_ip_pool (obj.populate_integer ("floating_ip_pool"))
- obj.set_service_template (obj.populate_integer ("service_template"))
- obj.set_service_instance (obj.populate_integer ("service_instance"))
- obj.set_logical_router (obj.populate_integer ("logical_router"))
- obj.set_security_group (obj.populate_integer ("security_group"))
- obj.set_security_group_rule (obj.populate_integer ("security_group_rule"))
- obj.set_subnet (obj.populate_integer ("subnet"))
- obj.set_global_vrouter_config (obj.populate_integer ("global_vrouter_config"))
- obj.set_loadbalancer_pool (obj.populate_integer ("loadbalancer_pool"))
- obj.set_loadbalancer_member (obj.populate_integer ("loadbalancer_member"))
- obj.set_loadbalancer_healthmonitor (obj.populate_integer ("loadbalancer_healthmonitor"))
- obj.set_virtual_ip (obj.populate_integer ("virtual_ip"))
- return obj
- def export(self, outfile, level=1, namespace_='', name_='QuotaType', namespacedef_='', pretty_print=True):
- if pretty_print:
- eol_ = '\n'
- else:
- eol_ = ''
- showIndent(outfile, level, pretty_print)
- outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
- already_processed = []
- self.exportAttributes(outfile, level, already_processed, namespace_, name_='QuotaType')
- if self.hasContent_():
- outfile.write('>%s' % (eol_, ))
- self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print)
- showIndent(outfile, level, pretty_print)
- outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
- else:
- outfile.write('/>%s' % (eol_, ))
- def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='QuotaType'):
- pass
- def exportChildren(self, outfile, level, namespace_='', name_='QuotaType', fromsubclass_=False, pretty_print=True):
- if pretty_print:
- eol_ = '\n'
- else:
- eol_ = ''
- if self.defaults is not None:
- showIndent(outfile, level, pretty_print)
- outfile.write('<%sdefaults>%s</%sdefaults>%s' % (namespace_, self.gds_format_integer(self.defaults, input_name='defaults'), namespace_, eol_))
- if self.floating_ip is not None:
- showIndent(outfile, level, pretty_print)
- outfile.write('<%sfloating-ip>%s</%sfloating-ip>%s' % (namespace_, self.gds_format_integer(self.floating_ip, input_name='floating-ip'), namespace_, eol_))
- if self.instance_ip is not None:
- showIndent(outfile, level, pretty_print)
- outfile.write('<%sinstance-ip>%s</%sinstance-ip>%s' % (namespace_, self.gds_format_integer(self.instance_ip, input_name='instance-ip'), namespace_, eol_))
- if self.virtual_machine_interface is not None:
- showIndent(outfile, level, pretty_print)
- outfile.write('<%svirtual-machine-interface>%s</%svirtual-machine-interface>%s' % (namespace_, self.gds_format_integer(self.virtual_machine_interface, input_name='virtual-machine-interface'), namespace_, eol_))
- if self.virtual_network is not None:
- showIndent(outfile, level, pretty_print)
- outfile.write('<%svirtual-network>%s</%svirtual-network>%s' % (namespace_, self.gds_format_integer(self.virtual_network, input_name='virtual-network'), namespace_, eol_))
- if self.virtual_router is not None:
- showIndent(outfile, level, pretty_print)
- outfile.write('<%svirtual-router>%s</%svirtual-router>%s' % (namespace_, self.gds_format_integer(self.virtual_router, input_name='virtual-router'), namespace_, eol_))
- if self.virtual_DNS is not None:
- showIndent(outfile, level, pretty_print)
- outfile.write('<%svirtual-DNS>%s</%svirtual-DNS>%s' % (namespace_, self.gds_format_integer(self.virtual_DNS, input_name='virtual-DNS'), namespace_, eol_))
- if self.virtual_DNS_record is not None:
- showIndent(outfile, level, pretty_print)
- outfile.write('<%svirtual-DNS-record>%s</%svirtual-DNS-record>%s' % (namespace_, self.gds_format_integer(self.virtual_DNS_record, input_name='virtual-DNS-record'), namespace_, eol_))
- if self.bgp_router is not None:
- showIndent(outfile, level, pretty_print)
- outfile.write('<%sbgp-router>%s</%sbgp-router>%s' % (namespace_, self.gds_format_integer(self.bgp_router, input_name='bgp-router'), namespace_, eol_))
- if self.network_ipam is not None:
- showIndent(outfile, level, pretty_print)
- outfile.write('<%snetwork-ipam>%s</%snetwork-ipam>%s' % (namespace_, self.gds_format_integer(self.network_ipam, input_name='network-ipam'), namespace_, eol_))
- if self.access_control_list is not None:
- showIndent(outfile, level, pretty_print)
- outfile.write('<%saccess-control-list>%s</%saccess-control-list>%s' % (namespace_, self.gds_format_integer(self.access_control_list, input_name='access-control-list'), namespace_, eol_))
- if self.floating_ip_pool is not None:
- showIndent(outfile, level, pretty_print)
- outfile.write('<%sfloating-ip-pool>%s</%sfloating-ip-pool>%s' % (namespace_, self.gds_format_integer(self.floating_ip_pool, input_name='floating-ip-pool'), namespace_, eol_))
- if self.service_template is not None:
- showIndent(outfile, level, pretty_print)
- outfile.write('<%sservice-template>%s</%sservice-template>%s' % (namespace_, self.gds_format_integer(self.service_template, input_name='service-template'), namespace_, eol_))
- if self.service_instance is not None:
- showIndent(outfile, level, pretty_print)
- outfile.write('<%sservice-instance>%s</%sservice-instance>%s' % (namespace_, self.gds_format_integer(self.service_instance, input_name='service-instance'), namespace_, eol_))
- if self.logical_router is not None:
- showIndent(outfile, level, pretty_print)
- outfile.write('<%slogical-router>%s</%slogical-router>%s' % (namespace_, self.gds_format_integer(self.logical_router, input_name='logical-router'), namespace_, eol_))
- if self.security_group is not None:
- showIndent(outfile, level, pretty_print)
- outfile.write('<%ssecurity-group>%s</%ssecurity-group>%s' % (namespace_, self.gds_format_integer(self.security_group, input_name='security-group'), namespace_, eol_))
- if self.security_group_rule is not None:
- showIndent(outfile, level, pretty_print)
- outfile.write('<%ssecurity-group-rule>%s</%ssecurity-group-rule>%s' % (namespace_, self.gds_format_integer(self.security_group_rule, input_name='security-group-rule'), namespace_, eol_))
- if self.subnet is not None:
- showIndent(outfile, level, pretty_print)
- outfile.write('<%ssubnet>%s</%ssubnet>%s' % (namespace_, self.gds_format_integer(self.subnet, input_name='subnet'), namespace_, eol_))
- if self.global_vrouter_config is not None:
- showIndent(outfile, level, pretty_print)
- outfile.write('<%sglobal-vrouter-config>%s</%sglobal-vrouter-config>%s' % (namespace_, self.gds_format_integer(self.global_vrouter_config, input_name='global-vrouter-config'), namespace_, eol_))
- if self.loadbalancer_pool is not None:
- showIndent(outfile, level, pretty_print)
- outfile.write('<%sloadbalancer-pool>%s</%sloadbalancer-pool>%s' % (namespace_, self.gds_format_integer(self.loadbalancer_pool, input_name='loadbalancer-pool'), namespace_, eol_))
- if self.loadbalancer_member is not None:
- showIndent(outfile, level, pretty_print)
- outfile.write('<%sloadbalancer-member>%s</%sloadbalancer-member>%s' % (namespace_, self.gds_format_integer(self.loadbalancer_member, input_name='loadbalancer-member'), namespace_, eol_))
- if self.loadbalancer_healthmonitor is not None:
- showIndent(outfile, level, pretty_print)
- outfile.write('<%sloadbalancer-healthmonitor>%s</%sloadbalancer-healthmonitor>%s' % (namespace_, self.gds_format_integer(self.loadbalancer_healthmonitor, input_name='loadbalancer-healthmonitor'), namespace_, eol_))
- if self.virtual_ip is not None:
- showIndent(outfile, level, pretty_print)
- outfile.write('<%svirtual-ip>%s</%svirtual-ip>%s' % (namespace_, self.gds_format_integer(self.virtual_ip, input_name='virtual-ip'), namespace_, eol_))
- def hasContent_(self):
- if (
- self.defaults is not None or
- self.floating_ip is not None or
- self.instance_ip is not None or
- self.virtual_machine_interface is not None or
- self.virtual_network is not None or
- self.virtual_router is not None or
- self.virtual_DNS is not None or
- self.virtual_DNS_record is not None or
- self.bgp_router is not None or
- self.network_ipam is not None or
- self.access_control_list is not None or
- self.floating_ip_pool is not None or
- self.service_template is not None or
- self.service_instance is not None or
- self.logical_router is not None or
- self.security_group is not None or
- self.security_group_rule is not None or
- self.subnet is not None or
- self.global_vrouter_config is not None or
- self.loadbalancer_pool is not None or
- self.loadbalancer_member is not None or
- self.loadbalancer_healthmonitor is not None or
- self.virtual_ip is not None
- ):
- return True
- else:
- return False
- def exportLiteral(self, outfile, level, name_='QuotaType'):
- level += 1
- self.exportLiteralAttributes(outfile, level, [], name_)
- if self.hasContent_():
- self.exportLiteralChildren(outfile, level, name_)
- def exportLiteralAttributes(self, outfile, level, already_processed, name_):
- pass
- def exportLiteralChildren(self, outfile, level, name_):
- if self.defaults is not None:
- showIndent(outfile, level)
- outfile.write('defaults=%d,\n' % self.defaults)
- if self.floating_ip is not None:
- showIndent(outfile, level)
- outfile.write('floating_ip=%d,\n' % self.floating_ip)
- if self.instance_ip is not None:
- showIndent(outfile, level)
- outfile.write('instance_ip=%d,\n' % self.instance_ip)
- if self.virtual_machine_interface is not None:
- showIndent(outfile, level)
- outfile.write('virtual_machine_interface=%d,\n' % self.virtual_machine_interface)
- if self.virtual_network is not None:
- showIndent(outfile, level)
- outfile.write('virtual_network=%d,\n' % self.virtual_network)
- if self.virtual_router is not None:
- showIndent(outfile, level)
- outfile.write('virtual_router=%d,\n' % self.virtual_router)
- if self.virtual_DNS is not None:
- showIndent(outfile, level)
- outfile.write('virtual_DNS=%d,\n' % self.virtual_DNS)
- if self.virtual_DNS_record is not None:
- showIndent(outfile, level)
- outfile.write('virtual_DNS_record=%d,\n' % self.virtual_DNS_record)
- if self.bgp_router is not None:
- showIndent(outfile, level)
- outfile.write('bgp_router=%d,\n' % self.bgp_router)
- if self.network_ipam is not None:
- showIndent(outfile, level)
- outfile.write('network_ipam=%d,\n' % self.network_ipam)
- if self.access_control_list is not None:
- showIndent(outfile, level)
- outfile.write('access_control_list=%d,\n' % self.access_control_list)
- if self.floating_ip_pool is not None:
- showIndent(outfile, level)
- outfile.write('floating_ip_pool=%d,\n' % self.floating_ip_pool)
- if self.service_template is not None:
- showIndent(outfile, level)
- outfile.write('service_template=%d,\n' % self.service_template)
- if self.service_instance is not None:
- showIndent(outfile, level)
- outfile.write('service_instance=%d,\n' % self.service_instance)
- if self.logical_router is not None:
- showIndent(outfile, level)
- outfile.write('logical_router=%d,\n' % self.logical_router)
- if self.security_group is not None:
- showIndent(outfile, level)
- outfile.write('security_group=%d,\n' % self.security_group)
- if self.security_group_rule is not None:
- showIndent(outfile, level)
- outfile.write('security_group_rule=%d,\n' % self.security_group_rule)
- if self.subnet is not None:
- showIndent(outfile, level)
- outfile.write('subnet=%d,\n' % self.subnet)
- if self.global_vrouter_config is not None:
- showIndent(outfile, level)
- outfile.write('global_vrouter_config=%d,\n' % self.global_vrouter_config)
- if self.loadbalancer_pool is not None:
- showIndent(outfile, level)
- outfile.write('loadbalancer_pool=%d,\n' % self.loadbalancer_pool)
- if self.loadbalancer_member is not None:
- showIndent(outfile, level)
- outfile.write('loadbalancer_member=%d,\n' % self.loadbalancer_member)
- if self.loadbalancer_healthmonitor is not None:
- showIndent(outfile, level)
- outfile.write('loadbalancer_healthmonitor=%d,\n' % self.loadbalancer_healthmonitor)
- if self.virtual_ip is not None:
- showIndent(outfile, level)
- outfile.write('virtual_ip=%d,\n' % self.virtual_ip)
- def exportDict(self, name_='QuotaType'):
- obj_json = json.dumps(self, default=lambda o: dict((k, v) for k, v in o.__dict__.iteritems()))
- obj_dict = json.loads(obj_json)
- return {name_: obj_dict}
- def build(self, node):
- self.buildAttributes(node, node.attrib, [])
- for child in node:
- nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
- self.buildChildren(child, node, nodeName_)
- def buildAttributes(self, node, attrs, already_processed):
- pass
- def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
- if nodeName_ == 'defaults':
- sval_ = child_.text
- try:
- ival_ = int(sval_)
- except (TypeError, ValueError), exp:
- raise_parse_error(child_, 'requires integer: %s' % exp)
- ival_ = self.gds_validate_integer(ival_, node, 'defaults')
- self.defaults = ival_
- elif nodeName_ == 'floating-ip':
- sval_ = child_.text
- try:
- ival_ = int(sval_)
- except (TypeError, ValueError), exp:
- raise_parse_error(child_, 'requires integer: %s' % exp)
- ival_ = self.gds_validate_integer(ival_, node, 'floating_ip')
- self.floating_ip = ival_
- elif nodeName_ == 'instance-ip':
- sval_ = child_.text
- try:
- ival_ = int(sval_)
- except (TypeError, ValueError), exp:
- raise_parse_error(child_, 'requires integer: %s' % exp)
- ival_ = self.gds_validate_integer(ival_, node, 'instance_ip')
- self.instance_ip = ival_
- elif nodeName_ == 'virtual-machine-interface':
- sval_ = child_.text
- try:
- ival_ = int(sval_)
- except (TypeError, ValueError), exp:
- raise_parse_error(child_, 'requires integer: %s' % exp)
- ival_ = self.gds_validate_integer(ival_, node, 'virtual_machine_interface')
- self.virtual_machine_interface = ival_
- elif nodeName_ == 'virtual-network':
- sval_ = child_.text
- try:
- ival_ = int(sval_)
- except (TypeError, ValueError), exp:
- raise_parse_error(child_, 'requires integer: %s' % exp)
- ival_ = self.gds_validate_integer(ival_, node, 'virtual_network')
- self.virtual_network = ival_
- elif nodeName_ == 'virtual-router':
- sval_ = child_.text
- try:
- ival_ = int(sval_)
- except (TypeError, ValueError), exp:
- raise_parse_error(child_, 'requires integer: %s' % exp)
- ival_ = self.gds_validate_integer(ival_, node, 'virtual_router')
- self.virtual_router = ival_
- elif nodeName_ == 'virtual-DNS':
- sval_ = child_.text
- try:
- ival_ = int(sval_)
- except (TypeError, ValueError), exp:
- raise_parse_error(child_, 'requires integer: %s' % exp)
- ival_ = self.gds_validate_integer(ival_, node, 'virtual_DNS')
- self.virtual_DNS = ival_
- elif nodeName_ == 'virtual-DNS-record':
- sval_ = child_.text
- try:
- ival_ = int(sval_)
- except (TypeError, ValueError), exp:
- raise_parse_error(child_, 'requires integer: %s' % exp)
- ival_ = self.gds_validate_integer(ival_, node, 'virtual_DNS_record')
- self.virtual_DNS_record = ival_
- elif nodeName_ == 'bgp-router':
- sval_ = child_.text
- try:
- ival_ = int(sval_)
- except (TypeError, ValueError), exp:
- raise_parse_error(child_, 'requires integer: %s' % exp)
- ival_ = self.gds_validate_integer(ival_, node, 'bgp_router')
- self.bgp_router = ival_
- elif nodeName_ == 'network-ipam':
- sval_ = child_.text
- try:
- ival_ = int(sval_)
- except (TypeError, ValueError), exp:
- raise_parse_error(child_, 'requires integer: %s' % exp)
- ival_ = self.gds_validate_integer(ival_, node, 'network_ipam')
- self.network_ipam = ival_
- elif nodeName_ == 'access-control-list':
- sval_ = child_.text
- try:
- ival_ = int(sval_)
- except (TypeError, ValueError), exp:
- raise_parse_error(child_, 'requires integer: %s' % exp)
- ival_ = self.gds_validate_integer(ival_, node, 'access_control_list')
- self.access_control_list = ival_
- elif nodeName_ == 'floating-ip-pool':
- sval_ = child_.text
- try:
- ival_ = int(sval_)
- except (TypeError, ValueError), exp:
- raise_parse_error(child_, 'requires integer: %s' % exp)
- ival_ = self.gds_validate_integer(ival_, node, 'floating_ip_pool')
- self.floating_ip_pool = ival_
- elif nodeName_ == 'service-template':
- sval_ = child_.text
- try:
- ival_ = int(sval_)
- except (TypeError, ValueError), exp:
- raise_parse_error(child_, 'requires integer: %s' % exp)
- ival_ = self.gds_validate_integer(ival_, node, 'service_template')
- self.service_template = ival_
- elif nodeName_ == 'service-instance':
- sval_ = child_.text
- try:
- ival_ = int(sval_)
- except (TypeError, ValueError), exp:
- raise_parse_error(child_, 'requires integer: %s' % exp)
- ival_ = self.gds_validate_integer(ival_, node, 'service_instance')
- self.service_instance = ival_
- elif nodeName_ == 'logical-router':
- sval_ = child_.text
- try:
- ival_ = int(sval_)
- except (TypeError, ValueError), exp:
- raise_parse_error(child_, 'requires integer: %s' % exp)
- ival_ = self.gds_validate_integer(ival_, node, 'logical_router')
- self.logical_router = ival_
- elif nodeName_ == 'security-group':
- sval_ = child_.text
- try:
- ival_ = int(sval_)
- except (TypeError, ValueError), exp:
- raise_parse_error(child_, 'requires integer: %s' % exp)
- ival_ = self.gds_validate_integer(ival_, node, 'security_group')
- self.security_group = ival_
- elif nodeName_ == 'security-group-rule':
- sval_ = child_.text
- try:
- ival_ = int(sval_)
- except (TypeError, ValueError), exp:
- raise_parse_error(child_, 'requires integer: %s' % exp)
- ival_ = self.gds_validate_integer(ival_, node, 'security_group_rule')
- self.security_group_rule = ival_
- elif nodeName_ == 'subnet':
- sval_ = child_.text
- try:
- ival_ = int(sval_)
- except (TypeError, ValueError), exp:
- raise_parse_error(child_, 'requires integer: %s' % exp)
- ival_ = self.gds_validate_integer(ival_, node, 'subnet')
- self.subnet = ival_
- elif nodeName_ == 'global-vrouter-config':
- sval_ = child_.text
- try:
- ival_ = int(sval_)
- except (TypeError, ValueError), exp:
- raise_parse_error(child_, 'requires integer: %s' % exp)
- ival_ = self.gds_validate_integer(ival_, node, 'global_vrouter_config')
- self.global_vrouter_config = ival_
- elif nodeName_ == 'loadbalancer-pool':
- sval_ = child_.text
- try:
- ival_ = int(sval_)
- except (TypeError, ValueError), exp:
- raise_parse_error(child_, 'requires integer: %s' % exp)
- ival_ = self.gds_validate_integer(ival_, node, 'loadbalancer_pool')
- self.loadbalancer_pool = ival_
- elif nodeName_ == 'loadbalancer-member':
- sval_ = child_.text
- try:
- ival_ = int(sval_)
- except (TypeError, ValueError), exp:
- raise_parse_error(child_, 'requires integer: %s' % exp)
- ival_ = self.gds_validate_integer(ival_, node, 'loadbalancer_member')
- self.loadbalancer_member = ival_
- elif nodeName_ == 'loadbalancer-healthmonitor':
- sval_ = child_.text
- try:
- ival_ = int(sval_)
- except (TypeError, ValueError), exp:
- raise_parse_error(child_, 'requires integer: %s' % exp)
- ival_ = self.gds_validate_integer(ival_, node, 'loadbalancer_healthmonitor')
- self.loadbalancer_healthmonitor = ival_
- elif nodeName_ == 'virtual-ip':
- sval_ = child_.text
- try:
- ival_ = int(sval_)
- except (TypeError, ValueError), exp:
- raise_parse_error(child_, 'requires integer: %s' % exp)
- ival_ = self.gds_validate_integer(ival_, node, 'virtual_ip')
- self.virtual_ip = ival_
-# end class QuotaType
-
-
-class config_root_global_system_config(GeneratedsSuper):
- """
- config_root_global_system_config class definition from :doc:`vnc_cfg.xsd`
- """
- subclass = None
- superclass = None
- def __init__(self, **kwargs):
- pass
- def factory(*args_, **kwargs_):
- if config_root_global_system_config.subclass:
- return config_root_global_system_config.subclass(*args_, **kwargs_)
- else:
- return config_root_global_system_config(*args_, **kwargs_)
- factory = staticmethod(factory)
- def __eq__(self, other): return True
- def __ne__(self, other): return False
- @classmethod
- def populate (cls, *a, **kwa):
- obj = cls (*a, **kwa)
- return obj
- def export(self, outfile, level=1, namespace_='', name_='config-root-global-system-config', namespacedef_='', pretty_print=True):
- if pretty_print:
- eol_ = '\n'
- else:
- eol_ = ''
- showIndent(outfile, level, pretty_print)
- outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
- already_processed = []
- self.exportAttributes(outfile, level, already_processed, namespace_, name_='config-root-global-system-config')
- if self.hasContent_():
- outfile.write('>%s' % (eol_, ))
- self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print)
- outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
- else:
- outfile.write('/>%s' % (eol_, ))
- def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='config-root-global-system-config'):
- pass
- def exportChildren(self, outfile, level, namespace_='', name_='config-root-global-system-config', fromsubclass_=False, pretty_print=True):
- pass
- def hasContent_(self):
- if (
-
- ):
- return True
- else:
- return False
- def exportLiteral(self, outfile, level, name_='config-root-global-system-config'):
- level += 1
- self.exportLiteralAttributes(outfile, level, [], name_)
- if self.hasContent_():
- self.exportLiteralChildren(outfile, level, name_)
- def exportLiteralAttributes(self, outfile, level, already_processed, name_):
- pass
- def exportLiteralChildren(self, outfile, level, name_):
- pass
- def exportDict(self, name_='config-root-global-system-config'):
- obj_json = json.dumps(self, default=lambda o: dict((k, v) for k, v in o.__dict__.iteritems()))
- obj_dict = json.loads(obj_json)
- return {name_: obj_dict}
- def build(self, node):
- self.buildAttributes(node, node.attrib, [])
- for child in node:
- nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
- self.buildChildren(child, node, nodeName_)
- def buildAttributes(self, node, attrs, already_processed):
- pass
- def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
- pass
-# end class config_root_global_system_config
-
-
-class global_system_config_bgp_router(GeneratedsSuper):
- """
- global_system_config_bgp_router class definition from :doc:`vnc_cfg.xsd`
- """
- subclass = None
- superclass = None
- def __init__(self, **kwargs):
- pass
- def factory(*args_, **kwargs_):
- if global_system_config_bgp_router.subclass:
- return global_system_config_bgp_router.subclass(*args_, **kwargs_)
- else:
- return global_system_config_bgp_router(*args_, **kwargs_)
- factory = staticmethod(factory)
- def __eq__(self, other): return True
- def __ne__(self, other): return False
- @classmethod
- def populate (cls, *a, **kwa):
- obj = cls (*a, **kwa)
- return obj
- def export(self, outfile, level=1, namespace_='', name_='global-system-config-bgp-router', namespacedef_='', pretty_print=True):
- if pretty_print:
- eol_ = '\n'
- else:
- eol_ = ''
- showIndent(outfile, level, pretty_print)
- outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
- already_processed = []
- self.exportAttributes(outfile, level, already_processed, namespace_, name_='global-system-config-bgp-router')
- if self.hasContent_():
- outfile.write('>%s' % (eol_, ))
- self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print)
- outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
- else:
- outfile.write('/>%s' % (eol_, ))
- def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='global-system-config-bgp-router'):
- pass
- def exportChildren(self, outfile, level, namespace_='', name_='global-system-config-bgp-router', fromsubclass_=False, pretty_print=True):
- pass
- def hasContent_(self):
- if (
-
- ):
- return True
- else:
- return False
- def exportLiteral(self, outfile, level, name_='global-system-config-bgp-router'):
- level += 1
- self.exportLiteralAttributes(outfile, level, [], name_)
- if self.hasContent_():
- self.exportLiteralChildren(outfile, level, name_)
- def exportLiteralAttributes(self, outfile, level, already_processed, name_):
- pass
- def exportLiteralChildren(self, outfile, level, name_):
- pass
- def exportDict(self, name_='global-system-config-bgp-router'):
- obj_json = json.dumps(self, default=lambda o: dict((k, v) for k, v in o.__dict__.iteritems()))
- obj_dict = json.loads(obj_json)
- return {name_: obj_dict}
- def build(self, node):
- self.buildAttributes(node, node.attrib, [])
- for child in node:
- nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
- self.buildChildren(child, node, nodeName_)
- def buildAttributes(self, node, attrs, already_processed):
- pass
- def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
- pass
-# end class global_system_config_bgp_router
-
-
-class global_system_config_global_vrouter_config(GeneratedsSuper):
- """
- global_system_config_global_vrouter_config class definition from :doc:`vnc_cfg.xsd`
- """
- subclass = None
- superclass = None
- def __init__(self, **kwargs):
- pass
- def factory(*args_, **kwargs_):
- if global_system_config_global_vrouter_config.subclass:
- return global_system_config_global_vrouter_config.subclass(*args_, **kwargs_)
- else:
- return global_system_config_global_vrouter_config(*args_, **kwargs_)
- factory = staticmethod(factory)
- def __eq__(self, other): return True
- def __ne__(self, other): return False
- @classmethod
- def populate (cls, *a, **kwa):
- obj = cls (*a, **kwa)
- return obj
- def export(self, outfile, level=1, namespace_='', name_='global-system-config-global-vrouter-config', namespacedef_='', pretty_print=True):
- if pretty_print:
- eol_ = '\n'
- else:
- eol_ = ''
- showIndent(outfile, level, pretty_print)
- outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
- already_processed = []
- self.exportAttributes(outfile, level, already_processed, namespace_, name_='global-system-config-global-vrouter-config')
- if self.hasContent_():
- outfile.write('>%s' % (eol_, ))
- self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print)
- outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
- else:
- outfile.write('/>%s' % (eol_, ))
- def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='global-system-config-global-vrouter-config'):
- pass
- def exportChildren(self, outfile, level, namespace_='', name_='global-system-config-global-vrouter-config', fromsubclass_=False, pretty_print=True):
- pass
- def hasContent_(self):
- if (
-
- ):
- return True
- else:
- return False
- def exportLiteral(self, outfile, level, name_='global-system-config-global-vrouter-config'):
- level += 1
- self.exportLiteralAttributes(outfile, level, [], name_)
- if self.hasContent_():
- self.exportLiteralChildren(outfile, level, name_)
- def exportLiteralAttributes(self, outfile, level, already_processed, name_):
- pass
- def exportLiteralChildren(self, outfile, level, name_):
- pass
- def exportDict(self, name_='global-system-config-global-vrouter-config'):
- obj_json = json.dumps(self, default=lambda o: dict((k, v) for k, v in o.__dict__.iteritems()))
- obj_dict = json.loads(obj_json)
- return {name_: obj_dict}
- def build(self, node):
- self.buildAttributes(node, node.attrib, [])
- for child in node:
- nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
- self.buildChildren(child, node, nodeName_)
- def buildAttributes(self, node, attrs, already_processed):
- pass
- def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
- pass
-# end class global_system_config_global_vrouter_config
-
-
-class config_root_domain(GeneratedsSuper):
- """
- config_root_domain class definition from :doc:`vnc_cfg.xsd`
- """
- subclass = None
- superclass = None
- def __init__(self, **kwargs):
- pass
- def factory(*args_, **kwargs_):
- if config_root_domain.subclass:
- return config_root_domain.subclass(*args_, **kwargs_)
- else:
- return config_root_domain(*args_, **kwargs_)
- factory = staticmethod(factory)
- def __eq__(self, other): return True
- def __ne__(self, other): return False
- @classmethod
- def populate (cls, *a, **kwa):
- obj = cls (*a, **kwa)
- return obj
- def export(self, outfile, level=1, namespace_='', name_='config-root-domain', namespacedef_='', pretty_print=True):
- if pretty_print:
- eol_ = '\n'
- else:
- eol_ = ''
- showIndent(outfile, level, pretty_print)
- outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
- already_processed = []
- self.exportAttributes(outfile, level, already_processed, namespace_, name_='config-root-domain')
- if self.hasContent_():
- outfile.write('>%s' % (eol_, ))
- self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print)
- outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
- else:
- outfile.write('/>%s' % (eol_, ))
- def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='config-root-domain'):
- pass
- def exportChildren(self, outfile, level, namespace_='', name_='config-root-domain', fromsubclass_=False, pretty_print=True):
- pass
- def hasContent_(self):
- if (
-
- ):
- return True
- else:
- return False
- def exportLiteral(self, outfile, level, name_='config-root-domain'):
- level += 1
- self.exportLiteralAttributes(outfile, level, [], name_)
- if self.hasContent_():
- self.exportLiteralChildren(outfile, level, name_)
- def exportLiteralAttributes(self, outfile, level, already_processed, name_):
- pass
- def exportLiteralChildren(self, outfile, level, name_):
- pass
- def exportDict(self, name_='config-root-domain'):
- obj_json = json.dumps(self, default=lambda o: dict((k, v) for k, v in o.__dict__.iteritems()))
- obj_dict = json.loads(obj_json)
- return {name_: obj_dict}
- def build(self, node):
- self.buildAttributes(node, node.attrib, [])
- for child in node:
- nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
- self.buildChildren(child, node, nodeName_)
- def buildAttributes(self, node, attrs, already_processed):
- pass
- def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
- pass
-# end class config_root_domain
-
-
-class domain_project(GeneratedsSuper):
- """
- domain_project class definition from :doc:`vnc_cfg.xsd`
- """
- subclass = None
- superclass = None
- def __init__(self, **kwargs):
- pass
- def factory(*args_, **kwargs_):
- if domain_project.subclass:
- return domain_project.subclass(*args_, **kwargs_)
- else:
- return domain_project(*args_, **kwargs_)
- factory = staticmethod(factory)
- def __eq__(self, other): return True
- def __ne__(self, other): return False
- @classmethod
- def populate (cls, *a, **kwa):
- obj = cls (*a, **kwa)
- return obj
- def export(self, outfile, level=1, namespace_='', name_='domain-project', namespacedef_='', pretty_print=True):
- if pretty_print:
- eol_ = '\n'
- else:
- eol_ = ''
- showIndent(outfile, level, pretty_print)
- outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
- already_processed = []
- self.exportAttributes(outfile, level, already_processed, namespace_, name_='domain-project')
- if self.hasContent_():
- outfile.write('>%s' % (eol_, ))
- self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print)
- outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
- else:
- outfile.write('/>%s' % (eol_, ))
- def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='domain-project'):
- pass
- def exportChildren(self, outfile, level, namespace_='', name_='domain-project', fromsubclass_=False, pretty_print=True):
- pass
- def hasContent_(self):
- if (
-
- ):
- return True
- else:
- return False
- def exportLiteral(self, outfile, level, name_='domain-project'):
- level += 1
- self.exportLiteralAttributes(outfile, level, [], name_)
- if self.hasContent_():
- self.exportLiteralChildren(outfile, level, name_)
- def exportLiteralAttributes(self, outfile, level, already_processed, name_):
- pass
- def exportLiteralChildren(self, outfile, level, name_):
- pass
- def exportDict(self, name_='domain-project'):
- obj_json = json.dumps(self, default=lambda o: dict((k, v) for k, v in o.__dict__.iteritems()))
- obj_dict = json.loads(obj_json)
- return {name_: obj_dict}
- def build(self, node):
- self.buildAttributes(node, node.attrib, [])
- for child in node:
- nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
- self.buildChildren(child, node, nodeName_)
- def buildAttributes(self, node, attrs, already_processed):
- pass
- def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
- pass
-# end class domain_project
-
-
-class domain_namespace(GeneratedsSuper):
- """
- domain_namespace class definition from :doc:`vnc_cfg.xsd`
- """
- subclass = None
- superclass = None
- def __init__(self, **kwargs):
- pass
- def factory(*args_, **kwargs_):
- if domain_namespace.subclass:
- return domain_namespace.subclass(*args_, **kwargs_)
- else:
- return domain_namespace(*args_, **kwargs_)
- factory = staticmethod(factory)
- def __eq__(self, other): return True
- def __ne__(self, other): return False
- @classmethod
- def populate (cls, *a, **kwa):
- obj = cls (*a, **kwa)
- return obj
- def export(self, outfile, level=1, namespace_='', name_='domain-namespace', namespacedef_='', pretty_print=True):
- if pretty_print:
- eol_ = '\n'
- else:
- eol_ = ''
- showIndent(outfile, level, pretty_print)
- outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
- already_processed = []
- self.exportAttributes(outfile, level, already_processed, namespace_, name_='domain-namespace')
- if self.hasContent_():
- outfile.write('>%s' % (eol_, ))
- self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print)
- outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
- else:
- outfile.write('/>%s' % (eol_, ))
- def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='domain-namespace'):
- pass
- def exportChildren(self, outfile, level, namespace_='', name_='domain-namespace', fromsubclass_=False, pretty_print=True):
- pass
- def hasContent_(self):
- if (
-
- ):
- return True
- else:
- return False
- def exportLiteral(self, outfile, level, name_='domain-namespace'):
- level += 1
- self.exportLiteralAttributes(outfile, level, [], name_)
- if self.hasContent_():
- self.exportLiteralChildren(outfile, level, name_)
- def exportLiteralAttributes(self, outfile, level, already_processed, name_):
- pass
- def exportLiteralChildren(self, outfile, level, name_):
- pass
- def exportDict(self, name_='domain-namespace'):
- obj_json = json.dumps(self, default=lambda o: dict((k, v) for k, v in o.__dict__.iteritems()))
- obj_dict = json.loads(obj_json)
- return {name_: obj_dict}
- def build(self, node):
- self.buildAttributes(node, node.attrib, [])
- for child in node:
- nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
- self.buildChildren(child, node, nodeName_)
- def buildAttributes(self, node, attrs, already_processed):
- pass
- def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
- pass
-# end class domain_namespace
-
-
-class project_security_group(GeneratedsSuper):
- """
- project_security_group class definition from :doc:`vnc_cfg.xsd`
- """
- subclass = None
- superclass = None
- def __init__(self, **kwargs):
- pass
- def factory(*args_, **kwargs_):
- if project_security_group.subclass:
- return project_security_group.subclass(*args_, **kwargs_)
- else:
- return project_security_group(*args_, **kwargs_)
- factory = staticmethod(factory)
- def __eq__(self, other): return True
- def __ne__(self, other): return False
- @classmethod
- def populate (cls, *a, **kwa):
- obj = cls (*a, **kwa)
- return obj
- def export(self, outfile, level=1, namespace_='', name_='project-security-group', namespacedef_='', pretty_print=True):
- if pretty_print:
- eol_ = '\n'
- else:
- eol_ = ''
- showIndent(outfile, level, pretty_print)
- outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
- already_processed = []
- self.exportAttributes(outfile, level, already_processed, namespace_, name_='project-security-group')
- if self.hasContent_():
- outfile.write('>%s' % (eol_, ))
- self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print)
- outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
- else:
- outfile.write('/>%s' % (eol_, ))
- def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='project-security-group'):
- pass
- def exportChildren(self, outfile, level, namespace_='', name_='project-security-group', fromsubclass_=False, pretty_print=True):
- pass
- def hasContent_(self):
- if (
-
- ):
- return True
- else:
- return False
- def exportLiteral(self, outfile, level, name_='project-security-group'):
- level += 1
- self.exportLiteralAttributes(outfile, level, [], name_)
- if self.hasContent_():
- self.exportLiteralChildren(outfile, level, name_)
- def exportLiteralAttributes(self, outfile, level, already_processed, name_):
- pass
- def exportLiteralChildren(self, outfile, level, name_):
- pass
- def exportDict(self, name_='project-security-group'):
- obj_json = json.dumps(self, default=lambda o: dict((k, v) for k, v in o.__dict__.iteritems()))
- obj_dict = json.loads(obj_json)
- return {name_: obj_dict}
- def build(self, node):
- self.buildAttributes(node, node.attrib, [])
- for child in node:
- nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
- self.buildChildren(child, node, nodeName_)
- def buildAttributes(self, node, attrs, already_processed):
- pass
- def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
- pass
-# end class project_security_group
-
-
-class project_virtual_network(GeneratedsSuper):
- """
- project_virtual_network class definition from :doc:`vnc_cfg.xsd`
- """
- subclass = None
- superclass = None
- def __init__(self, **kwargs):
- pass
- def factory(*args_, **kwargs_):
- if project_virtual_network.subclass:
- return project_virtual_network.subclass(*args_, **kwargs_)
- else:
- return project_virtual_network(*args_, **kwargs_)
- factory = staticmethod(factory)
- def __eq__(self, other): return True
- def __ne__(self, other): return False
- @classmethod
- def populate (cls, *a, **kwa):
- obj = cls (*a, **kwa)
- return obj
- def export(self, outfile, level=1, namespace_='', name_='project-virtual-network', namespacedef_='', pretty_print=True):
- if pretty_print:
- eol_ = '\n'
- else:
- eol_ = ''
- showIndent(outfile, level, pretty_print)
- outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
- already_processed = []
- self.exportAttributes(outfile, level, already_processed, namespace_, name_='project-virtual-network')
- if self.hasContent_():
- outfile.write('>%s' % (eol_, ))
- self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print)
- outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
- else:
- outfile.write('/>%s' % (eol_, ))
- def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='project-virtual-network'):
- pass
- def exportChildren(self, outfile, level, namespace_='', name_='project-virtual-network', fromsubclass_=False, pretty_print=True):
- pass
- def hasContent_(self):
- if (
-
- ):
- return True
- else:
- return False
- def exportLiteral(self, outfile, level, name_='project-virtual-network'):
- level += 1
- self.exportLiteralAttributes(outfile, level, [], name_)
- if self.hasContent_():
- self.exportLiteralChildren(outfile, level, name_)
- def exportLiteralAttributes(self, outfile, level, already_processed, name_):
- pass
- def exportLiteralChildren(self, outfile, level, name_):
- pass
- def exportDict(self, name_='project-virtual-network'):
- obj_json = json.dumps(self, default=lambda o: dict((k, v) for k, v in o.__dict__.iteritems()))
- obj_dict = json.loads(obj_json)
- return {name_: obj_dict}
- def build(self, node):
- self.buildAttributes(node, node.attrib, [])
- for child in node:
- nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
- self.buildChildren(child, node, nodeName_)
- def buildAttributes(self, node, attrs, already_processed):
- pass
- def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
- pass
-# end class project_virtual_network
-
-
-class project_qos_queue(GeneratedsSuper):
- """
- project_qos_queue class definition from :doc:`vnc_cfg.xsd`
- """
- subclass = None
- superclass = None
- def __init__(self, **kwargs):
- pass
- def factory(*args_, **kwargs_):
- if project_qos_queue.subclass:
- return project_qos_queue.subclass(*args_, **kwargs_)
- else:
- return project_qos_queue(*args_, **kwargs_)
- factory = staticmethod(factory)
- def __eq__(self, other): return True
- def __ne__(self, other): return False
- @classmethod
- def populate (cls, *a, **kwa):
- obj = cls (*a, **kwa)
- return obj
- def export(self, outfile, level=1, namespace_='', name_='project-qos-queue', namespacedef_='', pretty_print=True):
- if pretty_print:
- eol_ = '\n'
- else:
- eol_ = ''
- showIndent(outfile, level, pretty_print)
- outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
- already_processed = []
- self.exportAttributes(outfile, level, already_processed, namespace_, name_='project-qos-queue')
- if self.hasContent_():
- outfile.write('>%s' % (eol_, ))
- self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print)
- outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
- else:
- outfile.write('/>%s' % (eol_, ))
- def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='project-qos-queue'):
- pass
- def exportChildren(self, outfile, level, namespace_='', name_='project-qos-queue', fromsubclass_=False, pretty_print=True):
- pass
- def hasContent_(self):
- if (
-
- ):
- return True
- else:
- return False
- def exportLiteral(self, outfile, level, name_='project-qos-queue'):
- level += 1
- self.exportLiteralAttributes(outfile, level, [], name_)
- if self.hasContent_():
- self.exportLiteralChildren(outfile, level, name_)
- def exportLiteralAttributes(self, outfile, level, already_processed, name_):
- pass
- def exportLiteralChildren(self, outfile, level, name_):
- pass
- def exportDict(self, name_='project-qos-queue'):
- obj_json = json.dumps(self, default=lambda o: dict((k, v) for k, v in o.__dict__.iteritems()))
- obj_dict = json.loads(obj_json)
- return {name_: obj_dict}
- def build(self, node):
- self.buildAttributes(node, node.attrib, [])
- for child in node:
- nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
- self.buildChildren(child, node, nodeName_)
- def buildAttributes(self, node, attrs, already_processed):
- pass
- def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
- pass
-# end class project_qos_queue
-
-
-class project_qos_forwarding_class(GeneratedsSuper):
- """
- project_qos_forwarding_class class definition from :doc:`vnc_cfg.xsd`
- """
- subclass = None
- superclass = None
- def __init__(self, **kwargs):
- pass
- def factory(*args_, **kwargs_):
- if project_qos_forwarding_class.subclass:
- return project_qos_forwarding_class.subclass(*args_, **kwargs_)
- else:
- return project_qos_forwarding_class(*args_, **kwargs_)
- factory = staticmethod(factory)
- def __eq__(self, other): return True
- def __ne__(self, other): return False
- @classmethod
- def populate (cls, *a, **kwa):
- obj = cls (*a, **kwa)
- return obj
- def export(self, outfile, level=1, namespace_='', name_='project-qos-forwarding-class', namespacedef_='', pretty_print=True):
- if pretty_print:
- eol_ = '\n'
- else:
- eol_ = ''
- showIndent(outfile, level, pretty_print)
- outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
- already_processed = []
- self.exportAttributes(outfile, level, already_processed, namespace_, name_='project-qos-forwarding-class')
- if self.hasContent_():
- outfile.write('>%s' % (eol_, ))
- self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print)
- outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
- else:
- outfile.write('/>%s' % (eol_, ))
- def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='project-qos-forwarding-class'):
- pass
- def exportChildren(self, outfile, level, namespace_='', name_='project-qos-forwarding-class', fromsubclass_=False, pretty_print=True):
- pass
- def hasContent_(self):
- if (
-
- ):
- return True
- else:
- return False
- def exportLiteral(self, outfile, level, name_='project-qos-forwarding-class'):
- level += 1
- self.exportLiteralAttributes(outfile, level, [], name_)
- if self.hasContent_():
- self.exportLiteralChildren(outfile, level, name_)
- def exportLiteralAttributes(self, outfile, level, already_processed, name_):
- pass
- def exportLiteralChildren(self, outfile, level, name_):
- pass
- def exportDict(self, name_='project-qos-forwarding-class'):
- obj_json = json.dumps(self, default=lambda o: dict((k, v) for k, v in o.__dict__.iteritems()))
- obj_dict = json.loads(obj_json)
- return {name_: obj_dict}
- def build(self, node):
- self.buildAttributes(node, node.attrib, [])
- for child in node:
- nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
- self.buildChildren(child, node, nodeName_)
- def buildAttributes(self, node, attrs, already_processed):
- pass
- def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
- pass
-# end class project_qos_forwarding_class
-
-
-class qos_forwarding_class_qos_queue(GeneratedsSuper):
- """
- qos_forwarding_class_qos_queue class definition from :doc:`vnc_cfg.xsd`
- """
- subclass = None
- superclass = None
- def __init__(self, **kwargs):
- pass
- def factory(*args_, **kwargs_):
- if qos_forwarding_class_qos_queue.subclass:
- return qos_forwarding_class_qos_queue.subclass(*args_, **kwargs_)
- else:
- return qos_forwarding_class_qos_queue(*args_, **kwargs_)
- factory = staticmethod(factory)
- def __eq__(self, other): return True
- def __ne__(self, other): return False
- @classmethod
- def populate (cls, *a, **kwa):
- obj = cls (*a, **kwa)
- return obj
- def export(self, outfile, level=1, namespace_='', name_='qos-forwarding-class-qos-queue', namespacedef_='', pretty_print=True):
- if pretty_print:
- eol_ = '\n'
- else:
- eol_ = ''
- showIndent(outfile, level, pretty_print)
- outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
- already_processed = []
- self.exportAttributes(outfile, level, already_processed, namespace_, name_='qos-forwarding-class-qos-queue')
- if self.hasContent_():
- outfile.write('>%s' % (eol_, ))
- self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print)
- outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
- else:
- outfile.write('/>%s' % (eol_, ))
- def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='qos-forwarding-class-qos-queue'):
- pass
- def exportChildren(self, outfile, level, namespace_='', name_='qos-forwarding-class-qos-queue', fromsubclass_=False, pretty_print=True):
- pass
- def hasContent_(self):
- if (
-
- ):
- return True
- else:
- return False
- def exportLiteral(self, outfile, level, name_='qos-forwarding-class-qos-queue'):
- level += 1
- self.exportLiteralAttributes(outfile, level, [], name_)
- if self.hasContent_():
- self.exportLiteralChildren(outfile, level, name_)
- def exportLiteralAttributes(self, outfile, level, already_processed, name_):
- pass
- def exportLiteralChildren(self, outfile, level, name_):
- pass
- def exportDict(self, name_='qos-forwarding-class-qos-queue'):
- obj_json = json.dumps(self, default=lambda o: dict((k, v) for k, v in o.__dict__.iteritems()))
- obj_dict = json.loads(obj_json)
- return {name_: obj_dict}
- def build(self, node):
- self.buildAttributes(node, node.attrib, [])
- for child in node:
- nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
- self.buildChildren(child, node, nodeName_)
- def buildAttributes(self, node, attrs, already_processed):
- pass
- def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
- pass
-# end class qos_forwarding_class_qos_queue
-
-
-class virtual_network_qos_forwarding_class(GeneratedsSuper):
- """
- virtual_network_qos_forwarding_class class definition from :doc:`vnc_cfg.xsd`
- """
- subclass = None
- superclass = None
- def __init__(self, **kwargs):
- pass
- def factory(*args_, **kwargs_):
- if virtual_network_qos_forwarding_class.subclass:
- return virtual_network_qos_forwarding_class.subclass(*args_, **kwargs_)
- else:
- return virtual_network_qos_forwarding_class(*args_, **kwargs_)
- factory = staticmethod(factory)
- def __eq__(self, other): return True
- def __ne__(self, other): return False
- @classmethod
- def populate (cls, *a, **kwa):
- obj = cls (*a, **kwa)
- return obj
- def export(self, outfile, level=1, namespace_='', name_='virtual-network-qos-forwarding-class', namespacedef_='', pretty_print=True):
- if pretty_print:
- eol_ = '\n'
- else:
- eol_ = ''
- showIndent(outfile, level, pretty_print)
- outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
- already_processed = []
- self.exportAttributes(outfile, level, already_processed, namespace_, name_='virtual-network-qos-forwarding-class')
- if self.hasContent_():
- outfile.write('>%s' % (eol_, ))
- self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print)
- outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
- else:
- outfile.write('/>%s' % (eol_, ))
- def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='virtual-network-qos-forwarding-class'):
- pass
- def exportChildren(self, outfile, level, namespace_='', name_='virtual-network-qos-forwarding-class', fromsubclass_=False, pretty_print=True):
- pass
- def hasContent_(self):
- if (
-
- ):
- return True
- else:
- return False
- def exportLiteral(self, outfile, level, name_='virtual-network-qos-forwarding-class'):
- level += 1
- self.exportLiteralAttributes(outfile, level, [], name_)
- if self.hasContent_():
- self.exportLiteralChildren(outfile, level, name_)
- def exportLiteralAttributes(self, outfile, level, already_processed, name_):
- pass
- def exportLiteralChildren(self, outfile, level, name_):
- pass
- def exportDict(self, name_='virtual-network-qos-forwarding-class'):
- obj_json = json.dumps(self, default=lambda o: dict((k, v) for k, v in o.__dict__.iteritems()))
- obj_dict = json.loads(obj_json)
- return {name_: obj_dict}
- def build(self, node):
- self.buildAttributes(node, node.attrib, [])
- for child in node:
- nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
- self.buildChildren(child, node, nodeName_)
- def buildAttributes(self, node, attrs, already_processed):
- pass
- def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
- pass
-# end class virtual_network_qos_forwarding_class
-
-
-class virtual_machine_interface_qos_forwarding_class(GeneratedsSuper):
- """
- virtual_machine_interface_qos_forwarding_class class definition from :doc:`vnc_cfg.xsd`
- """
- subclass = None
- superclass = None
- def __init__(self, **kwargs):
- pass
- def factory(*args_, **kwargs_):
- if virtual_machine_interface_qos_forwarding_class.subclass:
- return virtual_machine_interface_qos_forwarding_class.subclass(*args_, **kwargs_)
- else:
- return virtual_machine_interface_qos_forwarding_class(*args_, **kwargs_)
- factory = staticmethod(factory)
- def __eq__(self, other): return True
- def __ne__(self, other): return False
- @classmethod
- def populate (cls, *a, **kwa):
- obj = cls (*a, **kwa)
- return obj
- def export(self, outfile, level=1, namespace_='', name_='virtual-machine-interface-qos-forwarding-class', namespacedef_='', pretty_print=True):
- if pretty_print:
- eol_ = '\n'
- else:
- eol_ = ''
- showIndent(outfile, level, pretty_print)
- outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
- already_processed = []
- self.exportAttributes(outfile, level, already_processed, namespace_, name_='virtual-machine-interface-qos-forwarding-class')
- if self.hasContent_():
- outfile.write('>%s' % (eol_, ))
- self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print)
- outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
- else:
- outfile.write('/>%s' % (eol_, ))
- def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='virtual-machine-interface-qos-forwarding-class'):
- pass
- def exportChildren(self, outfile, level, namespace_='', name_='virtual-machine-interface-qos-forwarding-class', fromsubclass_=False, pretty_print=True):
- pass
- def hasContent_(self):
- if (
-
- ):
- return True
- else:
- return False
- def exportLiteral(self, outfile, level, name_='virtual-machine-interface-qos-forwarding-class'):
- level += 1
- self.exportLiteralAttributes(outfile, level, [], name_)
- if self.hasContent_():
- self.exportLiteralChildren(outfile, level, name_)
- def exportLiteralAttributes(self, outfile, level, already_processed, name_):
- pass
- def exportLiteralChildren(self, outfile, level, name_):
- pass
- def exportDict(self, name_='virtual-machine-interface-qos-forwarding-class'):
- obj_json = json.dumps(self, default=lambda o: dict((k, v) for k, v in o.__dict__.iteritems()))
- obj_dict = json.loads(obj_json)
- return {name_: obj_dict}
- def build(self, node):
- self.buildAttributes(node, node.attrib, [])
- for child in node:
- nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
- self.buildChildren(child, node, nodeName_)
- def buildAttributes(self, node, attrs, already_processed):
- pass
- def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
- pass
-# end class virtual_machine_interface_qos_forwarding_class
-
-
-class VirtualNetworkType(GeneratedsSuper):
- """
- VirtualNetworkType class definition from :doc:`vnc_cfg.xsd`
- """
- subclass = None
- superclass = None
- def __init__(self, allow_transit=None, network_id=None, vxlan_network_identifier=None, forwarding_mode=None, rpf=None, **kwargs):
- self.allow_transit = allow_transit
- self.network_id = network_id
- self.vxlan_network_identifier = vxlan_network_identifier
- self.forwarding_mode = forwarding_mode
- self.rpf = rpf
- def factory(*args_, **kwargs_):
- if VirtualNetworkType.subclass:
- return VirtualNetworkType.subclass(*args_, **kwargs_)
- else:
- return VirtualNetworkType(*args_, **kwargs_)
- factory = staticmethod(factory)
- def get_allow_transit(self): return self.allow_transit
- def set_allow_transit(self, allow_transit): self.allow_transit = allow_transit
- def get_network_id(self): return self.network_id
- def set_network_id(self, network_id): self.network_id = network_id
- def get_vxlan_network_identifier(self): return self.vxlan_network_identifier
- def set_vxlan_network_identifier(self, vxlan_network_identifier): self.vxlan_network_identifier = vxlan_network_identifier
- def validate_VxlanNetworkIdentifierType(self, value):
- # Validate type VxlanNetworkIdentifierType, a restriction on xsd:integer.
- error = False
- if isinstance(value, list):
- v_int = map(int, value)
- v1, v2 = min(v_int), max(v_int)
- else:
- v1, v2 = int(value), int(value)
- error = (1 > v1)
- error |= (v2 > 1048575)
- if error:
- raise ValueError("VxlanNetworkIdentifierType must be in the range 1-1048575")
- def get_forwarding_mode(self): return self.forwarding_mode
- def set_forwarding_mode(self, forwarding_mode): self.forwarding_mode = forwarding_mode
- def validate_ForwardingModeType(self, value):
- # Validate type ForwardingModeType, a restriction on xsd:string.
- error = False
- if isinstance(value, list):
- error = set(value) - set([u'l2_l3', u'l2'])
- else:
- error = value not in [u'l2_l3', u'l2']
- if error:
- raise ValueError("ForwardingModeType must be one of [u'l2_l3', u'l2']")
- def get_rpf(self): return self.rpf
- def set_rpf(self, rpf): self.rpf = rpf
- def validate_RpfModeType(self, value):
- # Validate type RpfModeType, a restriction on xsd:string.
- error = False
- if isinstance(value, list):
- error = set(value) - set([u'enable', u'disable'])
- else:
- error = value not in [u'enable', u'disable']
- if error:
- raise ValueError("RpfModeType must be one of [u'enable', u'disable']")
- def __eq__(self, other):
- if isinstance(other, self.__class__):
- return (self.allow_transit == other.allow_transit and
- self.network_id == other.network_id and
- self.vxlan_network_identifier == other.vxlan_network_identifier and
- self.forwarding_mode == other.forwarding_mode and
- self.rpf == other.rpf)
- return NotImplemented
- def __ne__(self, other):
- if isinstance(other, self.__class__):
- return not self.__eq__(other)
- return NotImplemented
-
- @classmethod
- def populate (cls, *a, **kwa):
- obj = cls (*a, **kwa)
- obj.set_allow_transit (obj.populate_boolean ("allow_transit"))
- obj.set_network_id (obj.populate_integer ("network_id"))
- obj.set_vxlan_network_identifier (obj.populate_integer ("vxlan_network_identifier"))
- obj.set_forwarding_mode (obj.populate_string ("forwarding_mode"))
- obj.set_rpf (obj.populate_string ("rpf"))
- return obj
- def export(self, outfile, level=1, namespace_='', name_='VirtualNetworkType', namespacedef_='', pretty_print=True):
- if pretty_print:
- eol_ = '\n'
- else:
- eol_ = ''
- showIndent(outfile, level, pretty_print)
- outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
- already_processed = []
- self.exportAttributes(outfile, level, already_processed, namespace_, name_='VirtualNetworkType')
- if self.hasContent_():
- outfile.write('>%s' % (eol_, ))
- self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print)
- showIndent(outfile, level, pretty_print)
- outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
- else:
- outfile.write('/>%s' % (eol_, ))
- def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='VirtualNetworkType'):
- pass
- def exportChildren(self, outfile, level, namespace_='', name_='VirtualNetworkType', fromsubclass_=False, pretty_print=True):
- if pretty_print:
- eol_ = '\n'
- else:
- eol_ = ''
- if self.allow_transit is not None:
- showIndent(outfile, level, pretty_print)
- outfile.write('<%sallow-transit>%s</%sallow-transit>%s' % (namespace_, self.gds_format_boolean(self.gds_str_lower(str(self.allow_transit)), input_name='allow-transit'), namespace_, eol_))
- if self.network_id is not None:
- showIndent(outfile, level, pretty_print)
- outfile.write('<%snetwork-id>%s</%snetwork-id>%s' % (namespace_, self.gds_format_integer(self.network_id, input_name='network-id'), namespace_, eol_))
- if self.vxlan_network_identifier is not None:
- showIndent(outfile, level, pretty_print)
- outfile.write('<%svxlan-network-identifier>%s</%svxlan-network-identifier>%s' % (namespace_, self.gds_format_integer(self.vxlan_network_identifier, input_name='vxlan-network-identifier'), namespace_, eol_))
- if self.forwarding_mode is not None:
- showIndent(outfile, level, pretty_print)
- outfile.write('<%sforwarding-mode>%s</%sforwarding-mode>%s' % (namespace_, self.gds_format_string(quote_xml(self.forwarding_mode).encode(ExternalEncoding), input_name='forwarding-mode'), namespace_, eol_))
- if self.rpf is not None:
- showIndent(outfile, level, pretty_print)
- outfile.write('<%srpf>%s</%srpf>%s' % (namespace_, self.gds_format_string(quote_xml(self.rpf).encode(ExternalEncoding), input_name='rpf'), namespace_, eol_))
- def hasContent_(self):
- if (
- self.allow_transit is not None or
- self.network_id is not None or
- self.vxlan_network_identifier is not None or
- self.forwarding_mode is not None or
- self.rpf is not None
- ):
- return True
- else:
- return False
- def exportLiteral(self, outfile, level, name_='VirtualNetworkType'):
- level += 1
- self.exportLiteralAttributes(outfile, level, [], name_)
- if self.hasContent_():
- self.exportLiteralChildren(outfile, level, name_)
- def exportLiteralAttributes(self, outfile, level, already_processed, name_):
- pass
- def exportLiteralChildren(self, outfile, level, name_):
- if self.allow_transit is not None:
- showIndent(outfile, level)
- outfile.write('allow_transit=%s,\n' % self.allow_transit)
- if self.network_id is not None:
- showIndent(outfile, level)
- outfile.write('network_id=%d,\n' % self.network_id)
- if self.vxlan_network_identifier is not None:
- showIndent(outfile, level)
- outfile.write('vxlan_network_identifier=%d,\n' % self.vxlan_network_identifier)
- if self.forwarding_mode is not None:
- showIndent(outfile, level)
- outfile.write('forwarding_mode=%s,\n' % quote_python(self.forwarding_mode).encode(ExternalEncoding))
- if self.rpf is not None:
- showIndent(outfile, level)
- outfile.write('rpf=%s,\n' % quote_python(self.rpf).encode(ExternalEncoding))
- def exportDict(self, name_='VirtualNetworkType'):
- obj_json = json.dumps(self, default=lambda o: dict((k, v) for k, v in o.__dict__.iteritems()))
- obj_dict = json.loads(obj_json)
- return {name_: obj_dict}
- def build(self, node):
- self.buildAttributes(node, node.attrib, [])
- for child in node:
- nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
- self.buildChildren(child, node, nodeName_)
- def buildAttributes(self, node, attrs, already_processed):
- pass
- def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
- if nodeName_ == 'allow-transit':
- sval_ = child_.text
- if sval_ in ('true', '1'):
- ival_ = True
- elif sval_ in ('false', '0'):
- ival_ = False
- else:
- raise_parse_error(child_, 'requires boolean')
- ival_ = self.gds_validate_boolean(ival_, node, 'allow_transit')
- self.allow_transit = ival_
- elif nodeName_ == 'network-id':
- sval_ = child_.text
- try:
- ival_ = int(sval_)
- except (TypeError, ValueError), exp:
- raise_parse_error(child_, 'requires integer: %s' % exp)
- ival_ = self.gds_validate_integer(ival_, node, 'network_id')
- self.network_id = ival_
- elif nodeName_ == 'vxlan-network-identifier':
- sval_ = child_.text
- try:
- ival_ = int(sval_)
- except (TypeError, ValueError), exp:
- raise_parse_error(child_, 'requires integer: %s' % exp)
- ival_ = self.gds_validate_integer(ival_, node, 'vxlan_network_identifier')
- self.vxlan_network_identifier = ival_
- self.validate_VxlanNetworkIdentifierType(self.vxlan_network_identifier) # validate type VxlanNetworkIdentifierType
- elif nodeName_ == 'forwarding-mode':
- forwarding_mode_ = child_.text
- forwarding_mode_ = self.gds_validate_string(forwarding_mode_, node, 'forwarding_mode')
- self.forwarding_mode = forwarding_mode_
- self.validate_ForwardingModeType(self.forwarding_mode) # validate type ForwardingModeType
- elif nodeName_ == 'rpf':
- rpf_ = child_.text
- rpf_ = self.gds_validate_string(rpf_, node, 'rpf')
- self.rpf = rpf_
- self.validate_RpfModeType(self.rpf) # validate type RpfModeType
-# end class VirtualNetworkType
-
-
-class RouteTargetList(GeneratedsSuper):
- """
- RouteTargetList class definition from :doc:`vnc_cfg.xsd`
- """
- subclass = None
- superclass = None
- def __init__(self, route_target=None, **kwargs):
- if (route_target is None) or (route_target == []):
- self.route_target = []
- else:
- self.route_target = route_target
- def factory(*args_, **kwargs_):
- if RouteTargetList.subclass:
- return RouteTargetList.subclass(*args_, **kwargs_)
- else:
- return RouteTargetList(*args_, **kwargs_)
- factory = staticmethod(factory)
- def get_route_target(self): return self.route_target
- def set_route_target(self, route_target): self.route_target = route_target
- def add_route_target(self, value): self.route_target.append(value)
- def insert_route_target(self, index, value): self.route_target[index] = value
- def delete_route_target(self, value): self.route_target.remove(value)
- def __eq__(self, other):
- if isinstance(other, self.__class__):
- return (self.route_target == other.route_target)
- return NotImplemented
- def __ne__(self, other):
- if isinstance(other, self.__class__):
- return not self.__eq__(other)
- return NotImplemented
-
- @classmethod
- def populate (cls, *a, **kwa):
- obj = cls (*a, **kwa)
- obj.set_route_target ([obj.populate_string ("route_target")])
- return obj
- def export(self, outfile, level=1, namespace_='', name_='RouteTargetList', namespacedef_='', pretty_print=True):
- if pretty_print:
- eol_ = '\n'
- else:
- eol_ = ''
- showIndent(outfile, level, pretty_print)
- outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
- already_processed = []
- self.exportAttributes(outfile, level, already_processed, namespace_, name_='RouteTargetList')
- if self.hasContent_():
- outfile.write('>%s' % (eol_, ))
- self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print)
- showIndent(outfile, level, pretty_print)
- outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
- else:
- outfile.write('/>%s' % (eol_, ))
- def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='RouteTargetList'):
- pass
- def exportChildren(self, outfile, level, namespace_='', name_='RouteTargetList', fromsubclass_=False, pretty_print=True):
- if pretty_print:
- eol_ = '\n'
- else:
- eol_ = ''
- for route_target_ in self.route_target:
- showIndent(outfile, level, pretty_print)
- outfile.write('<%sroute-target>%s</%sroute-target>%s' % (namespace_, self.gds_format_string(quote_xml(route_target_).encode(ExternalEncoding), input_name='route-target'), namespace_, eol_))
- def hasContent_(self):
- if (
- self.route_target
- ):
- return True
- else:
- return False
- def exportLiteral(self, outfile, level, name_='RouteTargetList'):
- level += 1
- self.exportLiteralAttributes(outfile, level, [], name_)
- if self.hasContent_():
- self.exportLiteralChildren(outfile, level, name_)
- def exportLiteralAttributes(self, outfile, level, already_processed, name_):
- pass
- def exportLiteralChildren(self, outfile, level, name_):
- showIndent(outfile, level)
- outfile.write('route_target=[\n')
- level += 1
- for route_target_ in self.route_target:
- showIndent(outfile, level)
- outfile.write('%s,\n' % quote_python(route_target_).encode(ExternalEncoding))
- level -= 1
- showIndent(outfile, level)
- outfile.write('],\n')
- def exportDict(self, name_='RouteTargetList'):
- obj_json = json.dumps(self, default=lambda o: dict((k, v) for k, v in o.__dict__.iteritems()))
- obj_dict = json.loads(obj_json)
- return {name_: obj_dict}
- def build(self, node):
- self.buildAttributes(node, node.attrib, [])
- for child in node:
- nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
- self.buildChildren(child, node, nodeName_)
- def buildAttributes(self, node, attrs, already_processed):
- pass
- def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
- if nodeName_ == 'route-target':
- route_target_ = child_.text
- route_target_ = self.gds_validate_string(route_target_, node, 'route_target')
- self.route_target.append(route_target_)
-# end class RouteTargetList
-
-
-class project_network_ipam(GeneratedsSuper):
- """
- project_network_ipam class definition from :doc:`vnc_cfg.xsd`
- """
- subclass = None
- superclass = None
- def __init__(self, **kwargs):
- pass
- def factory(*args_, **kwargs_):
- if project_network_ipam.subclass:
- return project_network_ipam.subclass(*args_, **kwargs_)
- else:
- return project_network_ipam(*args_, **kwargs_)
- factory = staticmethod(factory)
- def __eq__(self, other): return True
- def __ne__(self, other): return False
- @classmethod
- def populate (cls, *a, **kwa):
- obj = cls (*a, **kwa)
- return obj
- def export(self, outfile, level=1, namespace_='', name_='project-network-ipam', namespacedef_='', pretty_print=True):
- if pretty_print:
- eol_ = '\n'
- else:
- eol_ = ''
- showIndent(outfile, level, pretty_print)
- outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
- already_processed = []
- self.exportAttributes(outfile, level, already_processed, namespace_, name_='project-network-ipam')
- if self.hasContent_():
- outfile.write('>%s' % (eol_, ))
- self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print)
- outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
- else:
- outfile.write('/>%s' % (eol_, ))
- def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='project-network-ipam'):
- pass
- def exportChildren(self, outfile, level, namespace_='', name_='project-network-ipam', fromsubclass_=False, pretty_print=True):
- pass
- def hasContent_(self):
- if (
-
- ):
- return True
- else:
- return False
- def exportLiteral(self, outfile, level, name_='project-network-ipam'):
- level += 1
- self.exportLiteralAttributes(outfile, level, [], name_)
- if self.hasContent_():
- self.exportLiteralChildren(outfile, level, name_)
- def exportLiteralAttributes(self, outfile, level, already_processed, name_):
- pass
- def exportLiteralChildren(self, outfile, level, name_):
- pass
- def exportDict(self, name_='project-network-ipam'):
- obj_json = json.dumps(self, default=lambda o: dict((k, v) for k, v in o.__dict__.iteritems()))
- obj_dict = json.loads(obj_json)
- return {name_: obj_dict}
- def build(self, node):
- self.buildAttributes(node, node.attrib, [])
- for child in node:
- nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
- self.buildChildren(child, node, nodeName_)
- def buildAttributes(self, node, attrs, already_processed):
- pass
- def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
- pass
-# end class project_network_ipam
-
-
-class project_network_policy(GeneratedsSuper):
- """
- project_network_policy class definition from :doc:`vnc_cfg.xsd`
- """
- subclass = None
- superclass = None
- def __init__(self, **kwargs):
- pass
- def factory(*args_, **kwargs_):
- if project_network_policy.subclass:
- return project_network_policy.subclass(*args_, **kwargs_)
- else:
- return project_network_policy(*args_, **kwargs_)
- factory = staticmethod(factory)
- def __eq__(self, other): return True
- def __ne__(self, other): return False
- @classmethod
- def populate (cls, *a, **kwa):
- obj = cls (*a, **kwa)
- return obj
- def export(self, outfile, level=1, namespace_='', name_='project-network-policy', namespacedef_='', pretty_print=True):
- if pretty_print:
- eol_ = '\n'
- else:
- eol_ = ''
- showIndent(outfile, level, pretty_print)
- outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
- already_processed = []
- self.exportAttributes(outfile, level, already_processed, namespace_, name_='project-network-policy')
- if self.hasContent_():
- outfile.write('>%s' % (eol_, ))
- self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print)
- outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
- else:
- outfile.write('/>%s' % (eol_, ))
- def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='project-network-policy'):
- pass
- def exportChildren(self, outfile, level, namespace_='', name_='project-network-policy', fromsubclass_=False, pretty_print=True):
- pass
- def hasContent_(self):
- if (
-
- ):
- return True
- else:
- return False
- def exportLiteral(self, outfile, level, name_='project-network-policy'):
- level += 1
- self.exportLiteralAttributes(outfile, level, [], name_)
- if self.hasContent_():
- self.exportLiteralChildren(outfile, level, name_)
- def exportLiteralAttributes(self, outfile, level, already_processed, name_):
- pass
- def exportLiteralChildren(self, outfile, level, name_):
- pass
- def exportDict(self, name_='project-network-policy'):
- obj_json = json.dumps(self, default=lambda o: dict((k, v) for k, v in o.__dict__.iteritems()))
- obj_dict = json.loads(obj_json)
- return {name_: obj_dict}
- def build(self, node):
- self.buildAttributes(node, node.attrib, [])
- for child in node:
- nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
- self.buildChildren(child, node, nodeName_)
- def buildAttributes(self, node, attrs, already_processed):
- pass
- def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
- pass
-# end class project_network_policy
-
-
-class virtual_network_access_control_list(GeneratedsSuper):
- """
- virtual_network_access_control_list class definition from :doc:`vnc_cfg.xsd`
- """
- subclass = None
- superclass = None
- def __init__(self, **kwargs):
- pass
- def factory(*args_, **kwargs_):
- if virtual_network_access_control_list.subclass:
- return virtual_network_access_control_list.subclass(*args_, **kwargs_)
- else:
- return virtual_network_access_control_list(*args_, **kwargs_)
- factory = staticmethod(factory)
- def __eq__(self, other): return True
- def __ne__(self, other): return False
- @classmethod
- def populate (cls, *a, **kwa):
- obj = cls (*a, **kwa)
- return obj
- def export(self, outfile, level=1, namespace_='', name_='virtual-network-access-control-list', namespacedef_='', pretty_print=True):
- if pretty_print:
- eol_ = '\n'
- else:
- eol_ = ''
- showIndent(outfile, level, pretty_print)
- outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
- already_processed = []
- self.exportAttributes(outfile, level, already_processed, namespace_, name_='virtual-network-access-control-list')
- if self.hasContent_():
- outfile.write('>%s' % (eol_, ))
- self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print)
- outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
- else:
- outfile.write('/>%s' % (eol_, ))
- def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='virtual-network-access-control-list'):
- pass
- def exportChildren(self, outfile, level, namespace_='', name_='virtual-network-access-control-list', fromsubclass_=False, pretty_print=True):
- pass
- def hasContent_(self):
- if (
-
- ):
- return True
- else:
- return False
- def exportLiteral(self, outfile, level, name_='virtual-network-access-control-list'):
- level += 1
- self.exportLiteralAttributes(outfile, level, [], name_)
- if self.hasContent_():
- self.exportLiteralChildren(outfile, level, name_)
- def exportLiteralAttributes(self, outfile, level, already_processed, name_):
- pass
- def exportLiteralChildren(self, outfile, level, name_):
- pass
- def exportDict(self, name_='virtual-network-access-control-list'):
- obj_json = json.dumps(self, default=lambda o: dict((k, v) for k, v in o.__dict__.iteritems()))
- obj_dict = json.loads(obj_json)
- return {name_: obj_dict}
- def build(self, node):
- self.buildAttributes(node, node.attrib, [])
- for child in node:
- nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
- self.buildChildren(child, node, nodeName_)
- def buildAttributes(self, node, attrs, already_processed):
- pass
- def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
- pass
-# end class virtual_network_access_control_list
-
-
-class security_group_access_control_list(GeneratedsSuper):
- """
- security_group_access_control_list class definition from :doc:`vnc_cfg.xsd`
- """
- subclass = None
- superclass = None
- def __init__(self, **kwargs):
- pass
- def factory(*args_, **kwargs_):
- if security_group_access_control_list.subclass:
- return security_group_access_control_list.subclass(*args_, **kwargs_)
- else:
- return security_group_access_control_list(*args_, **kwargs_)
- factory = staticmethod(factory)
- def __eq__(self, other): return True
- def __ne__(self, other): return False
- @classmethod
- def populate (cls, *a, **kwa):
- obj = cls (*a, **kwa)
- return obj
- def export(self, outfile, level=1, namespace_='', name_='security-group-access-control-list', namespacedef_='', pretty_print=True):
- if pretty_print:
- eol_ = '\n'
- else:
- eol_ = ''
- showIndent(outfile, level, pretty_print)
- outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
- already_processed = []
- self.exportAttributes(outfile, level, already_processed, namespace_, name_='security-group-access-control-list')
- if self.hasContent_():
- outfile.write('>%s' % (eol_, ))
- self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print)
- outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
- else:
- outfile.write('/>%s' % (eol_, ))
- def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='security-group-access-control-list'):
- pass
- def exportChildren(self, outfile, level, namespace_='', name_='security-group-access-control-list', fromsubclass_=False, pretty_print=True):
- pass
- def hasContent_(self):
- if (
-
- ):
- return True
- else:
- return False
- def exportLiteral(self, outfile, level, name_='security-group-access-control-list'):
- level += 1
- self.exportLiteralAttributes(outfile, level, [], name_)
- if self.hasContent_():
- self.exportLiteralChildren(outfile, level, name_)
- def exportLiteralAttributes(self, outfile, level, already_processed, name_):
- pass
- def exportLiteralChildren(self, outfile, level, name_):
- pass
- def exportDict(self, name_='security-group-access-control-list'):
- obj_json = json.dumps(self, default=lambda o: dict((k, v) for k, v in o.__dict__.iteritems()))
- obj_dict = json.loads(obj_json)
- return {name_: obj_dict}
- def build(self, node):
- self.buildAttributes(node, node.attrib, [])
- for child in node:
- nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
- self.buildChildren(child, node, nodeName_)
- def buildAttributes(self, node, attrs, already_processed):
- pass
- def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
- pass
-# end class security_group_access_control_list
-
-
-class virtual_machine_interface_security_group(GeneratedsSuper):
- """
- virtual_machine_interface_security_group class definition from :doc:`vnc_cfg.xsd`
- """
- subclass = None
- superclass = None
- def __init__(self, **kwargs):
- pass
- def factory(*args_, **kwargs_):
- if virtual_machine_interface_security_group.subclass:
- return virtual_machine_interface_security_group.subclass(*args_, **kwargs_)
- else:
- return virtual_machine_interface_security_group(*args_, **kwargs_)
- factory = staticmethod(factory)
- def __eq__(self, other): return True
- def __ne__(self, other): return False
- @classmethod
- def populate (cls, *a, **kwa):
- obj = cls (*a, **kwa)
- return obj
- def export(self, outfile, level=1, namespace_='', name_='virtual-machine-interface-security-group', namespacedef_='', pretty_print=True):
- if pretty_print:
- eol_ = '\n'
- else:
- eol_ = ''
- showIndent(outfile, level, pretty_print)
- outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
- already_processed = []
- self.exportAttributes(outfile, level, already_processed, namespace_, name_='virtual-machine-interface-security-group')
- if self.hasContent_():
- outfile.write('>%s' % (eol_, ))
- self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print)
- outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
- else:
- outfile.write('/>%s' % (eol_, ))
- def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='virtual-machine-interface-security-group'):
- pass
- def exportChildren(self, outfile, level, namespace_='', name_='virtual-machine-interface-security-group', fromsubclass_=False, pretty_print=True):
- pass
- def hasContent_(self):
- if (
-
- ):
- return True
- else:
- return False
- def exportLiteral(self, outfile, level, name_='virtual-machine-interface-security-group'):
- level += 1
- self.exportLiteralAttributes(outfile, level, [], name_)
- if self.hasContent_():
- self.exportLiteralChildren(outfile, level, name_)
- def exportLiteralAttributes(self, outfile, level, already_processed, name_):
- pass
- def exportLiteralChildren(self, outfile, level, name_):
- pass
- def exportDict(self, name_='virtual-machine-interface-security-group'):
- obj_json = json.dumps(self, default=lambda o: dict((k, v) for k, v in o.__dict__.iteritems()))
- obj_dict = json.loads(obj_json)
- return {name_: obj_dict}
- def build(self, node):
- self.buildAttributes(node, node.attrib, [])
- for child in node:
- nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
- self.buildChildren(child, node, nodeName_)
- def buildAttributes(self, node, attrs, already_processed):
- pass
- def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
- pass
-# end class virtual_machine_interface_security_group
-
-
-class VrfAssignRuleType(GeneratedsSuper):
- """
- VrfAssignRuleType class definition from :doc:`vnc_cfg.xsd`
- """
- subclass = None
- superclass = None
- def __init__(self, match_condition=None, vlan_tag=None, routing_instance=None, ignore_acl=None, **kwargs):
- if isinstance(match_condition, dict):
- obj = MatchConditionType(**match_condition)
- self.match_condition = obj
- else:
- self.match_condition = match_condition
- self.vlan_tag = vlan_tag
- self.routing_instance = routing_instance
- self.ignore_acl = ignore_acl
- def factory(*args_, **kwargs_):
- if VrfAssignRuleType.subclass:
- return VrfAssignRuleType.subclass(*args_, **kwargs_)
- else:
- return VrfAssignRuleType(*args_, **kwargs_)
- factory = staticmethod(factory)
- def get_match_condition(self): return self.match_condition
- def set_match_condition(self, match_condition): self.match_condition = match_condition
- def get_vlan_tag(self): return self.vlan_tag
- def set_vlan_tag(self, vlan_tag): self.vlan_tag = vlan_tag
- def get_routing_instance(self): return self.routing_instance
- def set_routing_instance(self, routing_instance): self.routing_instance = routing_instance
- def get_ignore_acl(self): return self.ignore_acl
- def set_ignore_acl(self, ignore_acl): self.ignore_acl = ignore_acl
- def __eq__(self, other):
- if isinstance(other, self.__class__):
- return (self.match_condition == other.match_condition and
- self.vlan_tag == other.vlan_tag and
- self.routing_instance == other.routing_instance and
- self.ignore_acl == other.ignore_acl)
- return NotImplemented
- def __ne__(self, other):
- if isinstance(other, self.__class__):
- return not self.__eq__(other)
- return NotImplemented
-
- @classmethod
- def populate (cls, *a, **kwa):
- obj = cls (*a, **kwa)
- obj.set_match_condition (MatchConditionType.populate ())
- obj.set_vlan_tag (obj.populate_integer ("vlan_tag"))
- obj.set_routing_instance (obj.populate_string ("routing_instance"))
- obj.set_ignore_acl (obj.populate_boolean ("ignore_acl"))
- return obj
- def export(self, outfile, level=1, namespace_='', name_='VrfAssignRuleType', namespacedef_='', pretty_print=True):
- if pretty_print:
- eol_ = '\n'
- else:
- eol_ = ''
- showIndent(outfile, level, pretty_print)
- outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
- already_processed = []
- self.exportAttributes(outfile, level, already_processed, namespace_, name_='VrfAssignRuleType')
- if self.hasContent_():
- outfile.write('>%s' % (eol_, ))
- self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print)
- showIndent(outfile, level, pretty_print)
- outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
- else:
- outfile.write('/>%s' % (eol_, ))
- def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='VrfAssignRuleType'):
- pass
- def exportChildren(self, outfile, level, namespace_='', name_='VrfAssignRuleType', fromsubclass_=False, pretty_print=True):
- if pretty_print:
- eol_ = '\n'
- else:
- eol_ = ''
- if self.match_condition is not None:
- self.match_condition.export(outfile, level, namespace_, name_='match-condition', pretty_print=pretty_print)
- if self.vlan_tag is not None:
- showIndent(outfile, level, pretty_print)
- outfile.write('<%svlan-tag>%s</%svlan-tag>%s' % (namespace_, self.gds_format_integer(self.vlan_tag, input_name='vlan-tag'), namespace_, eol_))
- if self.routing_instance is not None:
- showIndent(outfile, level, pretty_print)
- outfile.write('<%srouting-instance>%s</%srouting-instance>%s' % (namespace_, self.gds_format_string(quote_xml(self.routing_instance).encode(ExternalEncoding), input_name='routing-instance'), namespace_, eol_))
- if self.ignore_acl is not None:
- showIndent(outfile, level, pretty_print)
- outfile.write('<%signore-acl>%s</%signore-acl>%s' % (namespace_, self.gds_format_boolean(self.gds_str_lower(str(self.ignore_acl)), input_name='ignore-acl'), namespace_, eol_))
- def hasContent_(self):
- if (
- self.match_condition is not None or
- self.vlan_tag is not None or
- self.routing_instance is not None or
- self.ignore_acl is not None
- ):
- return True
- else:
- return False
- def exportLiteral(self, outfile, level, name_='VrfAssignRuleType'):
- level += 1
- self.exportLiteralAttributes(outfile, level, [], name_)
- if self.hasContent_():
- self.exportLiteralChildren(outfile, level, name_)
- def exportLiteralAttributes(self, outfile, level, already_processed, name_):
- pass
- def exportLiteralChildren(self, outfile, level, name_):
- if self.match_condition is not None:
- showIndent(outfile, level)
- outfile.write('match_condition=model_.MatchConditionType(\n')
- self.match_condition.exportLiteral(outfile, level, name_='match_condition')
- showIndent(outfile, level)
- outfile.write('),\n')
- if self.vlan_tag is not None:
- showIndent(outfile, level)
- outfile.write('vlan_tag=%d,\n' % self.vlan_tag)
- if self.routing_instance is not None:
- showIndent(outfile, level)
- outfile.write('routing_instance=%s,\n' % quote_python(self.routing_instance).encode(ExternalEncoding))
- if self.ignore_acl is not None:
- showIndent(outfile, level)
- outfile.write('ignore_acl=%s,\n' % self.ignore_acl)
- def exportDict(self, name_='VrfAssignRuleType'):
- obj_json = json.dumps(self, default=lambda o: dict((k, v) for k, v in o.__dict__.iteritems()))
- obj_dict = json.loads(obj_json)
- return {name_: obj_dict}
- def build(self, node):
- self.buildAttributes(node, node.attrib, [])
- for child in node:
- nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
- self.buildChildren(child, node, nodeName_)
- def buildAttributes(self, node, attrs, already_processed):
- pass
- def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
- if nodeName_ == 'match-condition':
- obj_ = MatchConditionType.factory()
- obj_.build(child_)
- self.set_match_condition(obj_)
- elif nodeName_ == 'vlan-tag':
- sval_ = child_.text
- try:
- ival_ = int(sval_)
- except (TypeError, ValueError), exp:
- raise_parse_error(child_, 'requires integer: %s' % exp)
- ival_ = self.gds_validate_integer(ival_, node, 'vlan_tag')
- self.vlan_tag = ival_
- elif nodeName_ == 'routing-instance':
- routing_instance_ = child_.text
- routing_instance_ = self.gds_validate_string(routing_instance_, node, 'routing_instance')
- self.routing_instance = routing_instance_
- elif nodeName_ == 'ignore-acl':
- sval_ = child_.text
- if sval_ in ('true', '1'):
- ival_ = True
- elif sval_ in ('false', '0'):
- ival_ = False
- else:
- raise_parse_error(child_, 'requires boolean')
- ival_ = self.gds_validate_boolean(ival_, node, 'ignore_acl')
- self.ignore_acl = ival_
-# end class VrfAssignRuleType
-
-
-class VrfAssignTableType(GeneratedsSuper):
- """
- VrfAssignTableType class definition from :doc:`vnc_cfg.xsd`
- """
- subclass = None
- superclass = None
- def __init__(self, vrf_assign_rule=None, **kwargs):
- if (vrf_assign_rule is None) or (vrf_assign_rule == []):
- self.vrf_assign_rule = []
- else:
- if isinstance(vrf_assign_rule[0], dict):
- objs = [VrfAssignRuleType(**elem) for elem in vrf_assign_rule]
- self.vrf_assign_rule = objs
- else:
- self.vrf_assign_rule = vrf_assign_rule
- def factory(*args_, **kwargs_):
- if VrfAssignTableType.subclass:
- return VrfAssignTableType.subclass(*args_, **kwargs_)
- else:
- return VrfAssignTableType(*args_, **kwargs_)
- factory = staticmethod(factory)
- def get_vrf_assign_rule(self): return self.vrf_assign_rule
- def set_vrf_assign_rule(self, vrf_assign_rule): self.vrf_assign_rule = vrf_assign_rule
- def add_vrf_assign_rule(self, value): self.vrf_assign_rule.append(value)
- def insert_vrf_assign_rule(self, index, value): self.vrf_assign_rule[index] = value
- def delete_vrf_assign_rule(self, value): self.vrf_assign_rule.remove(value)
- def __eq__(self, other):
- if isinstance(other, self.__class__):
- return (self.vrf_assign_rule == other.vrf_assign_rule)
- return NotImplemented
- def __ne__(self, other):
- if isinstance(other, self.__class__):
- return not self.__eq__(other)
- return NotImplemented
-
- @classmethod
- def populate (cls, *a, **kwa):
- obj = cls (*a, **kwa)
- obj.set_vrf_assign_rule ([VrfAssignRuleType.populate ()])
- return obj
- def export(self, outfile, level=1, namespace_='', name_='VrfAssignTableType', namespacedef_='', pretty_print=True):
- if pretty_print:
- eol_ = '\n'
- else:
- eol_ = ''
- showIndent(outfile, level, pretty_print)
- outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
- already_processed = []
- self.exportAttributes(outfile, level, already_processed, namespace_, name_='VrfAssignTableType')
- if self.hasContent_():
- outfile.write('>%s' % (eol_, ))
- self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print)
- showIndent(outfile, level, pretty_print)
- outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
- else:
- outfile.write('/>%s' % (eol_, ))
- def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='VrfAssignTableType'):
- pass
- def exportChildren(self, outfile, level, namespace_='', name_='VrfAssignTableType', fromsubclass_=False, pretty_print=True):
- if pretty_print:
- eol_ = '\n'
- else:
- eol_ = ''
- for vrf_assign_rule_ in self.vrf_assign_rule:
- if isinstance(vrf_assign_rule_, dict):
- vrf_assign_rule_ = VrfAssignRuleType(**vrf_assign_rule_)
- vrf_assign_rule_.export(outfile, level, namespace_, name_='vrf-assign-rule', pretty_print=pretty_print)
- def hasContent_(self):
- if (
- self.vrf_assign_rule
- ):
- return True
- else:
- return False
- def exportLiteral(self, outfile, level, name_='VrfAssignTableType'):
- level += 1
- self.exportLiteralAttributes(outfile, level, [], name_)
- if self.hasContent_():
- self.exportLiteralChildren(outfile, level, name_)
- def exportLiteralAttributes(self, outfile, level, already_processed, name_):
- pass
- def exportLiteralChildren(self, outfile, level, name_):
- showIndent(outfile, level)
- outfile.write('vrf_assign_rule=[\n')
- level += 1
- for vrf_assign_rule_ in self.vrf_assign_rule:
- showIndent(outfile, level)
- outfile.write('model_.VrfAssignRuleType(\n')
- vrf_assign_rule_.exportLiteral(outfile, level, name_='VrfAssignRuleType')
- showIndent(outfile, level)
- outfile.write('),\n')
- level -= 1
- showIndent(outfile, level)
- outfile.write('],\n')
- def exportDict(self, name_='VrfAssignTableType'):
- obj_json = json.dumps(self, default=lambda o: dict((k, v) for k, v in o.__dict__.iteritems()))
- obj_dict = json.loads(obj_json)
- return {name_: obj_dict}
- def build(self, node):
- self.buildAttributes(node, node.attrib, [])
- for child in node:
- nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
- self.buildChildren(child, node, nodeName_)
- def buildAttributes(self, node, attrs, already_processed):
- pass
- def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
- if nodeName_ == 'vrf-assign-rule':
- obj_ = VrfAssignRuleType.factory()
- obj_.build(child_)
- self.vrf_assign_rule.append(obj_)
-# end class VrfAssignTableType
-
-
-class InterfaceMirrorType(GeneratedsSuper):
- """
- InterfaceMirrorType class definition from :doc:`vnc_cfg.xsd`
- """
- subclass = None
- superclass = None
- def __init__(self, traffic_direction=None, mirror_to=None, **kwargs):
- self.traffic_direction = traffic_direction
- if isinstance(mirror_to, dict):
- obj = MirrorActionType(**mirror_to)
- self.mirror_to = obj
- else:
- self.mirror_to = mirror_to
- def factory(*args_, **kwargs_):
- if InterfaceMirrorType.subclass:
- return InterfaceMirrorType.subclass(*args_, **kwargs_)
- else:
- return InterfaceMirrorType(*args_, **kwargs_)
- factory = staticmethod(factory)
- def get_traffic_direction(self): return self.traffic_direction
- def set_traffic_direction(self, traffic_direction): self.traffic_direction = traffic_direction
- def validate_TrafficDirectionType(self, value):
- # Validate type TrafficDirectionType, a restriction on xsd:string.
- error = False
- if isinstance(value, list):
- error = set(value) - set([u'ingress', u'egress', u'both'])
- else:
- error = value not in [u'ingress', u'egress', u'both']
- if error:
- raise ValueError("TrafficDirectionType must be one of [u'ingress', u'egress', u'both']")
- def get_mirror_to(self): return self.mirror_to
- def set_mirror_to(self, mirror_to): self.mirror_to = mirror_to
- def __eq__(self, other):
- if isinstance(other, self.__class__):
- return (self.traffic_direction == other.traffic_direction and
- self.mirror_to == other.mirror_to)
- return NotImplemented
- def __ne__(self, other):
- if isinstance(other, self.__class__):
- return not self.__eq__(other)
- return NotImplemented
-
- @classmethod
- def populate (cls, *a, **kwa):
- obj = cls (*a, **kwa)
- obj.set_traffic_direction (obj.populate_string ("traffic_direction"))
- obj.set_mirror_to (MirrorActionType.populate ())
- return obj
- def export(self, outfile, level=1, namespace_='', name_='InterfaceMirrorType', namespacedef_='', pretty_print=True):
- if pretty_print:
- eol_ = '\n'
- else:
- eol_ = ''
- showIndent(outfile, level, pretty_print)
- outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
- already_processed = []
- self.exportAttributes(outfile, level, already_processed, namespace_, name_='InterfaceMirrorType')
- if self.hasContent_():
- outfile.write('>%s' % (eol_, ))
- self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print)
- showIndent(outfile, level, pretty_print)
- outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
- else:
- outfile.write('/>%s' % (eol_, ))
- def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='InterfaceMirrorType'):
- pass
- def exportChildren(self, outfile, level, namespace_='', name_='InterfaceMirrorType', fromsubclass_=False, pretty_print=True):
- if pretty_print:
- eol_ = '\n'
- else:
- eol_ = ''
- if self.traffic_direction is not None:
- showIndent(outfile, level, pretty_print)
- outfile.write('<%straffic-direction>%s</%straffic-direction>%s' % (namespace_, self.gds_format_string(quote_xml(self.traffic_direction).encode(ExternalEncoding), input_name='traffic-direction'), namespace_, eol_))
- if self.mirror_to is not None:
- self.mirror_to.export(outfile, level, namespace_, name_='mirror-to', pretty_print=pretty_print)
- def hasContent_(self):
- if (
- self.traffic_direction is not None or
- self.mirror_to is not None
- ):
- return True
- else:
- return False
- def exportLiteral(self, outfile, level, name_='InterfaceMirrorType'):
- level += 1
- self.exportLiteralAttributes(outfile, level, [], name_)
- if self.hasContent_():
- self.exportLiteralChildren(outfile, level, name_)
- def exportLiteralAttributes(self, outfile, level, already_processed, name_):
- pass
- def exportLiteralChildren(self, outfile, level, name_):
- if self.traffic_direction is not None:
- showIndent(outfile, level)
- outfile.write('traffic_direction=%s,\n' % quote_python(self.traffic_direction).encode(ExternalEncoding))
- if self.mirror_to is not None:
- showIndent(outfile, level)
- outfile.write('mirror_to=model_.MirrorActionType(\n')
- self.mirror_to.exportLiteral(outfile, level, name_='mirror_to')
- showIndent(outfile, level)
- outfile.write('),\n')
- def exportDict(self, name_='InterfaceMirrorType'):
- obj_json = json.dumps(self, default=lambda o: dict((k, v) for k, v in o.__dict__.iteritems()))
- obj_dict = json.loads(obj_json)
- return {name_: obj_dict}
- def build(self, node):
- self.buildAttributes(node, node.attrib, [])
- for child in node:
- nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
- self.buildChildren(child, node, nodeName_)
- def buildAttributes(self, node, attrs, already_processed):
- pass
- def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
- if nodeName_ == 'traffic-direction':
- traffic_direction_ = child_.text
- traffic_direction_ = self.gds_validate_string(traffic_direction_, node, 'traffic_direction')
- self.traffic_direction = traffic_direction_
- self.validate_TrafficDirectionType(self.traffic_direction) # validate type TrafficDirectionType
- elif nodeName_ == 'mirror-to':
- obj_ = MirrorActionType.factory()
- obj_.build(child_)
- self.set_mirror_to(obj_)
-# end class InterfaceMirrorType
-
-
-class VirtualMachineInterfacePropertiesType(GeneratedsSuper):
- """
- VirtualMachineInterfacePropertiesType class definition from :doc:`vnc_cfg.xsd`
- """
- subclass = None
- superclass = None
- def __init__(self, service_interface_type=None, interface_mirror=None, local_preference=None, sub_interface_vlan_tag=None, **kwargs):
- self.service_interface_type = service_interface_type
- if isinstance(interface_mirror, dict):
- obj = InterfaceMirrorType(**interface_mirror)
- self.interface_mirror = obj
- else:
- self.interface_mirror = interface_mirror
- self.local_preference = local_preference
- self.sub_interface_vlan_tag = sub_interface_vlan_tag
- def factory(*args_, **kwargs_):
- if VirtualMachineInterfacePropertiesType.subclass:
- return VirtualMachineInterfacePropertiesType.subclass(*args_, **kwargs_)
- else:
- return VirtualMachineInterfacePropertiesType(*args_, **kwargs_)
- factory = staticmethod(factory)
- def get_service_interface_type(self): return self.service_interface_type
- def set_service_interface_type(self, service_interface_type): self.service_interface_type = service_interface_type
- def validate_ServiceInterfaceType(self, value):
- # Validate type ServiceInterfaceType, a restriction on xsd:string.
- error = False
- if isinstance(value, list):
- error = set(value) - set([u'management', u'left', u'right', u'other'])
- else:
- error = value not in [u'management', u'left', u'right', u'other']
- if error:
- raise ValueError("ServiceInterfaceType must be one of [u'management', u'left', u'right', u'other']")
- def get_interface_mirror(self): return self.interface_mirror
- def set_interface_mirror(self, interface_mirror): self.interface_mirror = interface_mirror
- def get_local_preference(self): return self.local_preference
- def set_local_preference(self, local_preference): self.local_preference = local_preference
- def get_sub_interface_vlan_tag(self): return self.sub_interface_vlan_tag
- def set_sub_interface_vlan_tag(self, sub_interface_vlan_tag): self.sub_interface_vlan_tag = sub_interface_vlan_tag
- def __eq__(self, other):
- if isinstance(other, self.__class__):
- return (self.service_interface_type == other.service_interface_type and
- self.interface_mirror == other.interface_mirror and
- self.local_preference == other.local_preference and
- self.sub_interface_vlan_tag == other.sub_interface_vlan_tag)
- return NotImplemented
- def __ne__(self, other):
- if isinstance(other, self.__class__):
- return not self.__eq__(other)
- return NotImplemented
-
- @classmethod
- def populate (cls, *a, **kwa):
- obj = cls (*a, **kwa)
- obj.set_service_interface_type (obj.populate_string ("service_interface_type"))
- obj.set_interface_mirror (InterfaceMirrorType.populate ())
- obj.set_local_preference (obj.populate_integer ("local_preference"))
- obj.set_sub_interface_vlan_tag (obj.populate_integer ("sub_interface_vlan_tag"))
- return obj
- def export(self, outfile, level=1, namespace_='', name_='VirtualMachineInterfacePropertiesType', namespacedef_='', pretty_print=True):
- if pretty_print:
- eol_ = '\n'
- else:
- eol_ = ''
- showIndent(outfile, level, pretty_print)
- outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
- already_processed = []
- self.exportAttributes(outfile, level, already_processed, namespace_, name_='VirtualMachineInterfacePropertiesType')
- if self.hasContent_():
- outfile.write('>%s' % (eol_, ))
- self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print)
- showIndent(outfile, level, pretty_print)
- outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
- else:
- outfile.write('/>%s' % (eol_, ))
- def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='VirtualMachineInterfacePropertiesType'):
- pass
- def exportChildren(self, outfile, level, namespace_='', name_='VirtualMachineInterfacePropertiesType', fromsubclass_=False, pretty_print=True):
- if pretty_print:
- eol_ = '\n'
- else:
- eol_ = ''
- if self.service_interface_type is not None:
- showIndent(outfile, level, pretty_print)
- outfile.write('<%sservice-interface-type>%s</%sservice-interface-type>%s' % (namespace_, self.gds_format_string(quote_xml(self.service_interface_type).encode(ExternalEncoding), input_name='service-interface-type'), namespace_, eol_))
- if self.interface_mirror is not None:
- self.interface_mirror.export(outfile, level, namespace_, name_='interface-mirror', pretty_print=pretty_print)
- if self.local_preference is not None:
- showIndent(outfile, level, pretty_print)
- outfile.write('<%slocal-preference>%s</%slocal-preference>%s' % (namespace_, self.gds_format_integer(self.local_preference, input_name='local-preference'), namespace_, eol_))
- if self.sub_interface_vlan_tag is not None:
- showIndent(outfile, level, pretty_print)
- outfile.write('<%ssub-interface-vlan-tag>%s</%ssub-interface-vlan-tag>%s' % (namespace_, self.gds_format_integer(self.sub_interface_vlan_tag, input_name='sub-interface-vlan-tag'), namespace_, eol_))
- def hasContent_(self):
- if (
- self.service_interface_type is not None or
- self.interface_mirror is not None or
- self.local_preference is not None or
- self.sub_interface_vlan_tag is not None
- ):
- return True
- else:
- return False
- def exportLiteral(self, outfile, level, name_='VirtualMachineInterfacePropertiesType'):
- level += 1
- self.exportLiteralAttributes(outfile, level, [], name_)
- if self.hasContent_():
- self.exportLiteralChildren(outfile, level, name_)
- def exportLiteralAttributes(self, outfile, level, already_processed, name_):
- pass
- def exportLiteralChildren(self, outfile, level, name_):
- if self.service_interface_type is not None:
- showIndent(outfile, level)
- outfile.write('service_interface_type=%s,\n' % quote_python(self.service_interface_type).encode(ExternalEncoding))
- if self.interface_mirror is not None:
- showIndent(outfile, level)
- outfile.write('interface_mirror=model_.InterfaceMirrorType(\n')
- self.interface_mirror.exportLiteral(outfile, level, name_='interface_mirror')
- showIndent(outfile, level)
- outfile.write('),\n')
- if self.local_preference is not None:
- showIndent(outfile, level)
- outfile.write('local_preference=%d,\n' % self.local_preference)
- if self.sub_interface_vlan_tag is not None:
- showIndent(outfile, level)
- outfile.write('sub_interface_vlan_tag=%d,\n' % self.sub_interface_vlan_tag)
- def exportDict(self, name_='VirtualMachineInterfacePropertiesType'):
- obj_json = json.dumps(self, default=lambda o: dict((k, v) for k, v in o.__dict__.iteritems()))
- obj_dict = json.loads(obj_json)
- return {name_: obj_dict}
- def build(self, node):
- self.buildAttributes(node, node.attrib, [])
- for child in node:
- nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
- self.buildChildren(child, node, nodeName_)
- def buildAttributes(self, node, attrs, already_processed):
- pass
- def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
- if nodeName_ == 'service-interface-type':
- service_interface_type_ = child_.text
- service_interface_type_ = self.gds_validate_string(service_interface_type_, node, 'service_interface_type')
- self.service_interface_type = service_interface_type_
- self.validate_ServiceInterfaceType(self.service_interface_type) # validate type ServiceInterfaceType
- elif nodeName_ == 'interface-mirror':
- obj_ = InterfaceMirrorType.factory()
- obj_.build(child_)
- self.set_interface_mirror(obj_)
- elif nodeName_ == 'local-preference':
- sval_ = child_.text
- try:
- ival_ = int(sval_)
- except (TypeError, ValueError), exp:
- raise_parse_error(child_, 'requires integer: %s' % exp)
- ival_ = self.gds_validate_integer(ival_, node, 'local_preference')
- self.local_preference = ival_
- elif nodeName_ == 'sub-interface-vlan-tag':
- sval_ = child_.text
- try:
- ival_ = int(sval_)
- except (TypeError, ValueError), exp:
- raise_parse_error(child_, 'requires integer: %s' % exp)
- ival_ = self.gds_validate_integer(ival_, node, 'sub_interface_vlan_tag')
- self.sub_interface_vlan_tag = ival_
-# end class VirtualMachineInterfacePropertiesType
-
-
-class ServiceTemplateInterfaceType(GeneratedsSuper):
- """
- ServiceTemplateInterfaceType class definition from :doc:`vnc_cfg.xsd`
- """
- subclass = None
- superclass = None
- def __init__(self, service_interface_type=None, shared_ip=False, static_route_enable=False, **kwargs):
- self.service_interface_type = service_interface_type
- self.shared_ip = shared_ip
- self.static_route_enable = static_route_enable
- def factory(*args_, **kwargs_):
- if ServiceTemplateInterfaceType.subclass:
- return ServiceTemplateInterfaceType.subclass(*args_, **kwargs_)
- else:
- return ServiceTemplateInterfaceType(*args_, **kwargs_)
- factory = staticmethod(factory)
- def get_service_interface_type(self): return self.service_interface_type
- def set_service_interface_type(self, service_interface_type): self.service_interface_type = service_interface_type
- def validate_ServiceInterfaceType(self, value):
- # Validate type ServiceInterfaceType, a restriction on xsd:string.
- error = False
- if isinstance(value, list):
- error = set(value) - set([u'management', u'left', u'right', u'other'])
- else:
- error = value not in [u'management', u'left', u'right', u'other']
- if error:
- raise ValueError("ServiceInterfaceType must be one of [u'management', u'left', u'right', u'other']")
- def get_shared_ip(self): return self.shared_ip
- def set_shared_ip(self, shared_ip): self.shared_ip = shared_ip
- def get_static_route_enable(self): return self.static_route_enable
- def set_static_route_enable(self, static_route_enable): self.static_route_enable = static_route_enable
- def __eq__(self, other):
- if isinstance(other, self.__class__):
- return (self.service_interface_type == other.service_interface_type and
- self.shared_ip == other.shared_ip and
- self.static_route_enable == other.static_route_enable)
- return NotImplemented
- def __ne__(self, other):
- if isinstance(other, self.__class__):
- return not self.__eq__(other)
- return NotImplemented
-
- @classmethod
- def populate (cls, *a, **kwa):
- obj = cls (*a, **kwa)
- obj.set_service_interface_type (obj.populate_string ("service_interface_type"))
- obj.set_shared_ip (obj.populate_boolean ("shared_ip"))
- obj.set_static_route_enable (obj.populate_boolean ("static_route_enable"))
- return obj
- def export(self, outfile, level=1, namespace_='', name_='ServiceTemplateInterfaceType', namespacedef_='', pretty_print=True):
- if pretty_print:
- eol_ = '\n'
- else:
- eol_ = ''
- showIndent(outfile, level, pretty_print)
- outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
- already_processed = []
- self.exportAttributes(outfile, level, already_processed, namespace_, name_='ServiceTemplateInterfaceType')
- if self.hasContent_():
- outfile.write('>%s' % (eol_, ))
- self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print)
- showIndent(outfile, level, pretty_print)
- outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
- else:
- outfile.write('/>%s' % (eol_, ))
- def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='ServiceTemplateInterfaceType'):
- pass
- def exportChildren(self, outfile, level, namespace_='', name_='ServiceTemplateInterfaceType', fromsubclass_=False, pretty_print=True):
- if pretty_print:
- eol_ = '\n'
- else:
- eol_ = ''
- if self.service_interface_type is not None:
- showIndent(outfile, level, pretty_print)
- outfile.write('<%sservice-interface-type>%s</%sservice-interface-type>%s' % (namespace_, self.gds_format_string(quote_xml(self.service_interface_type).encode(ExternalEncoding), input_name='service-interface-type'), namespace_, eol_))
- if self.shared_ip is not None:
- showIndent(outfile, level, pretty_print)
- outfile.write('<%sshared-ip>%s</%sshared-ip>%s' % (namespace_, self.gds_format_boolean(self.gds_str_lower(str(self.shared_ip)), input_name='shared-ip'), namespace_, eol_))
- if self.static_route_enable is not None:
- showIndent(outfile, level, pretty_print)
- outfile.write('<%sstatic-route-enable>%s</%sstatic-route-enable>%s' % (namespace_, self.gds_format_boolean(self.gds_str_lower(str(self.static_route_enable)), input_name='static-route-enable'), namespace_, eol_))
- def hasContent_(self):
- if (
- self.service_interface_type is not None or
- self.shared_ip is not None or
- self.static_route_enable is not None
- ):
- return True
- else:
- return False
- def exportLiteral(self, outfile, level, name_='ServiceTemplateInterfaceType'):
- level += 1
- self.exportLiteralAttributes(outfile, level, [], name_)
- if self.hasContent_():
- self.exportLiteralChildren(outfile, level, name_)
- def exportLiteralAttributes(self, outfile, level, already_processed, name_):
- pass
- def exportLiteralChildren(self, outfile, level, name_):
- if self.service_interface_type is not None:
- showIndent(outfile, level)
- outfile.write('service_interface_type=%s,\n' % quote_python(self.service_interface_type).encode(ExternalEncoding))
- if self.shared_ip is not None:
- showIndent(outfile, level)
- outfile.write('shared_ip=%s,\n' % self.shared_ip)
- if self.static_route_enable is not None:
- showIndent(outfile, level)
- outfile.write('static_route_enable=%s,\n' % self.static_route_enable)
- def exportDict(self, name_='ServiceTemplateInterfaceType'):
- obj_json = json.dumps(self, default=lambda o: dict((k, v) for k, v in o.__dict__.iteritems()))
- obj_dict = json.loads(obj_json)
- return {name_: obj_dict}
- def build(self, node):
- self.buildAttributes(node, node.attrib, [])
- for child in node:
- nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
- self.buildChildren(child, node, nodeName_)
- def buildAttributes(self, node, attrs, already_processed):
- pass
- def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
- if nodeName_ == 'service-interface-type':
- service_interface_type_ = child_.text
- service_interface_type_ = self.gds_validate_string(service_interface_type_, node, 'service_interface_type')
- self.service_interface_type = service_interface_type_
- self.validate_ServiceInterfaceType(self.service_interface_type) # validate type ServiceInterfaceType
- elif nodeName_ == 'shared-ip':
- sval_ = child_.text
- if sval_ in ('true', '1'):
- ival_ = True
- elif sval_ in ('false', '0'):
- ival_ = False
- else:
- raise_parse_error(child_, 'requires boolean')
- ival_ = self.gds_validate_boolean(ival_, node, 'shared_ip')
- self.shared_ip = ival_
- elif nodeName_ == 'static-route-enable':
- sval_ = child_.text
- if sval_ in ('true', '1'):
- ival_ = True
- elif sval_ in ('false', '0'):
- ival_ = False
- else:
- raise_parse_error(child_, 'requires boolean')
- ival_ = self.gds_validate_boolean(ival_, node, 'static_route_enable')
- self.static_route_enable = ival_
-# end class ServiceTemplateInterfaceType
-
-
-class ServiceInstanceInterfaceType(GeneratedsSuper):
- """
- ServiceInstanceInterfaceType class definition from :doc:`vnc_cfg.xsd`
- """
- subclass = None
- superclass = None
- def __init__(self, virtual_network=None, ip_address=None, static_routes=None, **kwargs):
- self.virtual_network = virtual_network
- self.ip_address = ip_address
- if isinstance(static_routes, dict):
- obj = RouteTableType(**static_routes)
- self.static_routes = obj
- else:
- self.static_routes = static_routes
- def factory(*args_, **kwargs_):
- if ServiceInstanceInterfaceType.subclass:
- return ServiceInstanceInterfaceType.subclass(*args_, **kwargs_)
- else:
- return ServiceInstanceInterfaceType(*args_, **kwargs_)
- factory = staticmethod(factory)
- def get_virtual_network(self): return self.virtual_network
- def set_virtual_network(self, virtual_network): self.virtual_network = virtual_network
- def get_ip_address(self): return self.ip_address
- def set_ip_address(self, ip_address): self.ip_address = ip_address
- def validate_IpAddressType(self, value):
- # Validate type IpAddressType, a restriction on xsd:string.
- pass
- def get_static_routes(self): return self.static_routes
- def set_static_routes(self, static_routes): self.static_routes = static_routes
- def __eq__(self, other):
- if isinstance(other, self.__class__):
- return (self.virtual_network == other.virtual_network and
- self.ip_address == other.ip_address and
- self.static_routes == other.static_routes)
- return NotImplemented
- def __ne__(self, other):
- if isinstance(other, self.__class__):
- return not self.__eq__(other)
- return NotImplemented
-
- @classmethod
- def populate (cls, *a, **kwa):
- obj = cls (*a, **kwa)
- obj.set_virtual_network (obj.populate_string ("virtual_network"))
- obj.set_ip_address (obj.populate_string ("ip_address"))
- obj.set_static_routes (RouteTableType.populate ())
- return obj
- def export(self, outfile, level=1, namespace_='', name_='ServiceInstanceInterfaceType', namespacedef_='', pretty_print=True):
- if pretty_print:
- eol_ = '\n'
- else:
- eol_ = ''
- showIndent(outfile, level, pretty_print)
- outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
- already_processed = []
- self.exportAttributes(outfile, level, already_processed, namespace_, name_='ServiceInstanceInterfaceType')
- if self.hasContent_():
- outfile.write('>%s' % (eol_, ))
- self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print)
- showIndent(outfile, level, pretty_print)
- outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
- else:
- outfile.write('/>%s' % (eol_, ))
- def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='ServiceInstanceInterfaceType'):
- pass
- def exportChildren(self, outfile, level, namespace_='', name_='ServiceInstanceInterfaceType', fromsubclass_=False, pretty_print=True):
- if pretty_print:
- eol_ = '\n'
- else:
- eol_ = ''
- if self.virtual_network is not None:
- showIndent(outfile, level, pretty_print)
- outfile.write('<%svirtual-network>%s</%svirtual-network>%s' % (namespace_, self.gds_format_string(quote_xml(self.virtual_network).encode(ExternalEncoding), input_name='virtual-network'), namespace_, eol_))
- if self.ip_address is not None:
- showIndent(outfile, level, pretty_print)
- outfile.write('<%sip-address>%s</%sip-address>%s' % (namespace_, self.gds_format_string(quote_xml(self.ip_address).encode(ExternalEncoding), input_name='ip-address'), namespace_, eol_))
- if self.static_routes is not None:
- self.static_routes.export(outfile, level, namespace_, name_='static-routes', pretty_print=pretty_print)
- def hasContent_(self):
- if (
- self.virtual_network is not None or
- self.ip_address is not None or
- self.static_routes is not None
- ):
- return True
- else:
- return False
- def exportLiteral(self, outfile, level, name_='ServiceInstanceInterfaceType'):
- level += 1
- self.exportLiteralAttributes(outfile, level, [], name_)
- if self.hasContent_():
- self.exportLiteralChildren(outfile, level, name_)
- def exportLiteralAttributes(self, outfile, level, already_processed, name_):
- pass
- def exportLiteralChildren(self, outfile, level, name_):
- if self.virtual_network is not None:
- showIndent(outfile, level)
- outfile.write('virtual_network=%s,\n' % quote_python(self.virtual_network).encode(ExternalEncoding))
- if self.ip_address is not None:
- showIndent(outfile, level)
- outfile.write('ip_address=%s,\n' % quote_python(self.ip_address).encode(ExternalEncoding))
- if self.static_routes is not None:
- showIndent(outfile, level)
- outfile.write('static_routes=model_.RouteTableType(\n')
- self.static_routes.exportLiteral(outfile, level, name_='static_routes')
- showIndent(outfile, level)
- outfile.write('),\n')
- def exportDict(self, name_='ServiceInstanceInterfaceType'):
- obj_json = json.dumps(self, default=lambda o: dict((k, v) for k, v in o.__dict__.iteritems()))
- obj_dict = json.loads(obj_json)
- return {name_: obj_dict}
- def build(self, node):
- self.buildAttributes(node, node.attrib, [])
- for child in node:
- nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
- self.buildChildren(child, node, nodeName_)
- def buildAttributes(self, node, attrs, already_processed):
- pass
- def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
- if nodeName_ == 'virtual-network':
- virtual_network_ = child_.text
- virtual_network_ = self.gds_validate_string(virtual_network_, node, 'virtual_network')
- self.virtual_network = virtual_network_
- elif nodeName_ == 'ip-address':
- ip_address_ = child_.text
- ip_address_ = self.gds_validate_string(ip_address_, node, 'ip_address')
- self.ip_address = ip_address_
- self.validate_IpAddressType(self.ip_address) # validate type IpAddressType
- elif nodeName_ == 'static-routes':
- obj_ = RouteTableType.factory()
- obj_.build(child_)
- self.set_static_routes(obj_)
-# end class ServiceInstanceInterfaceType
-
-
-class virtual_machine_interface_sub_interface(GeneratedsSuper):
- """
- virtual_machine_interface_sub_interface class definition from :doc:`vnc_cfg.xsd`
- """
- subclass = None
- superclass = None
- def __init__(self, **kwargs):
- pass
- def factory(*args_, **kwargs_):
- if virtual_machine_interface_sub_interface.subclass:
- return virtual_machine_interface_sub_interface.subclass(*args_, **kwargs_)
- else:
- return virtual_machine_interface_sub_interface(*args_, **kwargs_)
- factory = staticmethod(factory)
- def __eq__(self, other): return True
- def __ne__(self, other): return False
- @classmethod
- def populate (cls, *a, **kwa):
- obj = cls (*a, **kwa)
- return obj
- def export(self, outfile, level=1, namespace_='', name_='virtual-machine-interface-sub-interface', namespacedef_='', pretty_print=True):
- if pretty_print:
- eol_ = '\n'
- else:
- eol_ = ''
- showIndent(outfile, level, pretty_print)
- outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
- already_processed = []
- self.exportAttributes(outfile, level, already_processed, namespace_, name_='virtual-machine-interface-sub-interface')
- if self.hasContent_():
- outfile.write('>%s' % (eol_, ))
- self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print)
- outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
- else:
- outfile.write('/>%s' % (eol_, ))
- def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='virtual-machine-interface-sub-interface'):
- pass
- def exportChildren(self, outfile, level, namespace_='', name_='virtual-machine-interface-sub-interface', fromsubclass_=False, pretty_print=True):
- pass
- def hasContent_(self):
- if (
-
- ):
- return True
- else:
- return False
- def exportLiteral(self, outfile, level, name_='virtual-machine-interface-sub-interface'):
- level += 1
- self.exportLiteralAttributes(outfile, level, [], name_)
- if self.hasContent_():
- self.exportLiteralChildren(outfile, level, name_)
- def exportLiteralAttributes(self, outfile, level, already_processed, name_):
- pass
- def exportLiteralChildren(self, outfile, level, name_):
- pass
- def exportDict(self, name_='virtual-machine-interface-sub-interface'):
- obj_json = json.dumps(self, default=lambda o: dict((k, v) for k, v in o.__dict__.iteritems()))
- obj_dict = json.loads(obj_json)
- return {name_: obj_dict}
- def build(self, node):
- self.buildAttributes(node, node.attrib, [])
- for child in node:
- nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
- self.buildChildren(child, node, nodeName_)
- def buildAttributes(self, node, attrs, already_processed):
- pass
- def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
- pass
-# end class virtual_machine_interface_sub_interface
-
-
-class virtual_machine_virtual_machine_interface(GeneratedsSuper):
- """
- virtual_machine_virtual_machine_interface class definition from :doc:`vnc_cfg.xsd`
- """
- subclass = None
- superclass = None
- def __init__(self, **kwargs):
- pass
- def factory(*args_, **kwargs_):
- if virtual_machine_virtual_machine_interface.subclass:
- return virtual_machine_virtual_machine_interface.subclass(*args_, **kwargs_)
- else:
- return virtual_machine_virtual_machine_interface(*args_, **kwargs_)
- factory = staticmethod(factory)
- def __eq__(self, other): return True
- def __ne__(self, other): return False
- @classmethod
- def populate (cls, *a, **kwa):
- obj = cls (*a, **kwa)
- return obj
- def export(self, outfile, level=1, namespace_='', name_='virtual-machine-virtual-machine-interface', namespacedef_='', pretty_print=True):
- if pretty_print:
- eol_ = '\n'
- else:
- eol_ = ''
- showIndent(outfile, level, pretty_print)
- outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
- already_processed = []
- self.exportAttributes(outfile, level, already_processed, namespace_, name_='virtual-machine-virtual-machine-interface')
- if self.hasContent_():
- outfile.write('>%s' % (eol_, ))
- self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print)
- outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
- else:
- outfile.write('/>%s' % (eol_, ))
- def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='virtual-machine-virtual-machine-interface'):
- pass
- def exportChildren(self, outfile, level, namespace_='', name_='virtual-machine-virtual-machine-interface', fromsubclass_=False, pretty_print=True):
- pass
- def hasContent_(self):
- if (
-
- ):
- return True
- else:
- return False
- def exportLiteral(self, outfile, level, name_='virtual-machine-virtual-machine-interface'):
- level += 1
- self.exportLiteralAttributes(outfile, level, [], name_)
- if self.hasContent_():
- self.exportLiteralChildren(outfile, level, name_)
- def exportLiteralAttributes(self, outfile, level, already_processed, name_):
- pass
- def exportLiteralChildren(self, outfile, level, name_):
- pass
- def exportDict(self, name_='virtual-machine-virtual-machine-interface'):
- obj_json = json.dumps(self, default=lambda o: dict((k, v) for k, v in o.__dict__.iteritems()))
- obj_dict = json.loads(obj_json)
- return {name_: obj_dict}
- def build(self, node):
- self.buildAttributes(node, node.attrib, [])
- for child in node:
- nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
- self.buildChildren(child, node, nodeName_)
- def buildAttributes(self, node, attrs, already_processed):
- pass
- def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
- pass
-# end class virtual_machine_virtual_machine_interface
-
-
-class project_virtual_machine_interface(GeneratedsSuper):
- """
- project_virtual_machine_interface class definition from :doc:`vnc_cfg.xsd`
- """
- subclass = None
- superclass = None
- def __init__(self, **kwargs):
- pass
- def factory(*args_, **kwargs_):
- if project_virtual_machine_interface.subclass:
- return project_virtual_machine_interface.subclass(*args_, **kwargs_)
- else:
- return project_virtual_machine_interface(*args_, **kwargs_)
- factory = staticmethod(factory)
- def __eq__(self, other): return True
- def __ne__(self, other): return False
- @classmethod
- def populate (cls, *a, **kwa):
- obj = cls (*a, **kwa)
- return obj
- def export(self, outfile, level=1, namespace_='', name_='project-virtual-machine-interface', namespacedef_='', pretty_print=True):
- if pretty_print:
- eol_ = '\n'
- else:
- eol_ = ''
- showIndent(outfile, level, pretty_print)
- outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
- already_processed = []
- self.exportAttributes(outfile, level, already_processed, namespace_, name_='project-virtual-machine-interface')
- if self.hasContent_():
- outfile.write('>%s' % (eol_, ))
- self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print)
- outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
- else:
- outfile.write('/>%s' % (eol_, ))
- def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='project-virtual-machine-interface'):
- pass
- def exportChildren(self, outfile, level, namespace_='', name_='project-virtual-machine-interface', fromsubclass_=False, pretty_print=True):
- pass
- def hasContent_(self):
- if (
-
- ):
- return True
- else:
- return False
- def exportLiteral(self, outfile, level, name_='project-virtual-machine-interface'):
- level += 1
- self.exportLiteralAttributes(outfile, level, [], name_)
- if self.hasContent_():
- self.exportLiteralChildren(outfile, level, name_)
- def exportLiteralAttributes(self, outfile, level, already_processed, name_):
- pass
- def exportLiteralChildren(self, outfile, level, name_):
- pass
- def exportDict(self, name_='project-virtual-machine-interface'):
- obj_json = json.dumps(self, default=lambda o: dict((k, v) for k, v in o.__dict__.iteritems()))
- obj_dict = json.loads(obj_json)
- return {name_: obj_dict}
- def build(self, node):
- self.buildAttributes(node, node.attrib, [])
- for child in node:
- nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
- self.buildChildren(child, node, nodeName_)
- def buildAttributes(self, node, attrs, already_processed):
- pass
- def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
- pass
-# end class project_virtual_machine_interface
-
-
-class virtual_machine_interface_virtual_machine(GeneratedsSuper):
- """
- virtual_machine_interface_virtual_machine class definition from :doc:`vnc_cfg.xsd`
- """
- subclass = None
- superclass = None
- def __init__(self, **kwargs):
- pass
- def factory(*args_, **kwargs_):
- if virtual_machine_interface_virtual_machine.subclass:
- return virtual_machine_interface_virtual_machine.subclass(*args_, **kwargs_)
- else:
- return virtual_machine_interface_virtual_machine(*args_, **kwargs_)
- factory = staticmethod(factory)
- def __eq__(self, other): return True
- def __ne__(self, other): return False
- @classmethod
- def populate (cls, *a, **kwa):
- obj = cls (*a, **kwa)
- return obj
- def export(self, outfile, level=1, namespace_='', name_='virtual-machine-interface-virtual-machine', namespacedef_='', pretty_print=True):
- if pretty_print:
- eol_ = '\n'
- else:
- eol_ = ''
- showIndent(outfile, level, pretty_print)
- outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
- already_processed = []
- self.exportAttributes(outfile, level, already_processed, namespace_, name_='virtual-machine-interface-virtual-machine')
- if self.hasContent_():
- outfile.write('>%s' % (eol_, ))
- self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print)
- outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
- else:
- outfile.write('/>%s' % (eol_, ))
- def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='virtual-machine-interface-virtual-machine'):
- pass
- def exportChildren(self, outfile, level, namespace_='', name_='virtual-machine-interface-virtual-machine', fromsubclass_=False, pretty_print=True):
- pass
- def hasContent_(self):
- if (
-
- ):
- return True
- else:
- return False
- def exportLiteral(self, outfile, level, name_='virtual-machine-interface-virtual-machine'):
- level += 1
- self.exportLiteralAttributes(outfile, level, [], name_)
- if self.hasContent_():
- self.exportLiteralChildren(outfile, level, name_)
- def exportLiteralAttributes(self, outfile, level, already_processed, name_):
- pass
- def exportLiteralChildren(self, outfile, level, name_):
- pass
- def exportDict(self, name_='virtual-machine-interface-virtual-machine'):
- obj_json = json.dumps(self, default=lambda o: dict((k, v) for k, v in o.__dict__.iteritems()))
- obj_dict = json.loads(obj_json)
- return {name_: obj_dict}
- def build(self, node):
- self.buildAttributes(node, node.attrib, [])
- for child in node:
- nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
- self.buildChildren(child, node, nodeName_)
- def buildAttributes(self, node, attrs, already_processed):
- pass
- def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
- pass
-# end class virtual_machine_interface_virtual_machine
-
-
-class virtual_machine_interface_virtual_network(GeneratedsSuper):
- """
- virtual_machine_interface_virtual_network class definition from :doc:`vnc_cfg.xsd`
- """
- subclass = None
- superclass = None
- def __init__(self, **kwargs):
- pass
- def factory(*args_, **kwargs_):
- if virtual_machine_interface_virtual_network.subclass:
- return virtual_machine_interface_virtual_network.subclass(*args_, **kwargs_)
- else:
- return virtual_machine_interface_virtual_network(*args_, **kwargs_)
- factory = staticmethod(factory)
- def __eq__(self, other): return True
- def __ne__(self, other): return False
- @classmethod
- def populate (cls, *a, **kwa):
- obj = cls (*a, **kwa)
- return obj
- def export(self, outfile, level=1, namespace_='', name_='virtual-machine-interface-virtual-network', namespacedef_='', pretty_print=True):
- if pretty_print:
- eol_ = '\n'
- else:
- eol_ = ''
- showIndent(outfile, level, pretty_print)
- outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
- already_processed = []
- self.exportAttributes(outfile, level, already_processed, namespace_, name_='virtual-machine-interface-virtual-network')
- if self.hasContent_():
- outfile.write('>%s' % (eol_, ))
- self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print)
- outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
- else:
- outfile.write('/>%s' % (eol_, ))
- def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='virtual-machine-interface-virtual-network'):
- pass
- def exportChildren(self, outfile, level, namespace_='', name_='virtual-machine-interface-virtual-network', fromsubclass_=False, pretty_print=True):
- pass
- def hasContent_(self):
- if (
-
- ):
- return True
- else:
- return False
- def exportLiteral(self, outfile, level, name_='virtual-machine-interface-virtual-network'):
- level += 1
- self.exportLiteralAttributes(outfile, level, [], name_)
- if self.hasContent_():
- self.exportLiteralChildren(outfile, level, name_)
- def exportLiteralAttributes(self, outfile, level, already_processed, name_):
- pass
- def exportLiteralChildren(self, outfile, level, name_):
- pass
- def exportDict(self, name_='virtual-machine-interface-virtual-network'):
- obj_json = json.dumps(self, default=lambda o: dict((k, v) for k, v in o.__dict__.iteritems()))
- obj_dict = json.loads(obj_json)
- return {name_: obj_dict}
- def build(self, node):
- self.buildAttributes(node, node.attrib, [])
- for child in node:
- nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
- self.buildChildren(child, node, nodeName_)
- def buildAttributes(self, node, attrs, already_processed):
- pass
- def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
- pass
-# end class virtual_machine_interface_virtual_network
-
-
-class PolicyBasedForwardingRuleType(GeneratedsSuper):
- """
- PolicyBasedForwardingRuleType class definition from :doc:`vnc_cfg.xsd`
- """
- subclass = None
- superclass = None
- def __init__(self, direction=None, vlan_tag=None, src_mac=None, dst_mac=None, mpls_label=None, service_chain_address=None, protocol=None, **kwargs):
- self.direction = direction
- self.vlan_tag = vlan_tag
- self.src_mac = src_mac
- self.dst_mac = dst_mac
- self.mpls_label = mpls_label
- self.service_chain_address = service_chain_address
- self.protocol = protocol
- def factory(*args_, **kwargs_):
- if PolicyBasedForwardingRuleType.subclass:
- return PolicyBasedForwardingRuleType.subclass(*args_, **kwargs_)
- else:
- return PolicyBasedForwardingRuleType(*args_, **kwargs_)
- factory = staticmethod(factory)
- def get_direction(self): return self.direction
- def set_direction(self, direction): self.direction = direction
- def validate_TrafficDirectionType(self, value):
- # Validate type TrafficDirectionType, a restriction on xsd:string.
- error = False
- if isinstance(value, list):
- error = set(value) - set([u'ingress', u'egress', u'both'])
- else:
- error = value not in [u'ingress', u'egress', u'both']
- if error:
- raise ValueError("TrafficDirectionType must be one of [u'ingress', u'egress', u'both']")
- def get_vlan_tag(self): return self.vlan_tag
- def set_vlan_tag(self, vlan_tag): self.vlan_tag = vlan_tag
- def get_src_mac(self): return self.src_mac
- def set_src_mac(self, src_mac): self.src_mac = src_mac
- def get_dst_mac(self): return self.dst_mac
- def set_dst_mac(self, dst_mac): self.dst_mac = dst_mac
- def get_mpls_label(self): return self.mpls_label
- def set_mpls_label(self, mpls_label): self.mpls_label = mpls_label
- def get_service_chain_address(self): return self.service_chain_address
- def set_service_chain_address(self, service_chain_address): self.service_chain_address = service_chain_address
- def validate_IpAddress(self, value):
- # Validate type IpAddress, a restriction on xsd:string.
- pass
- def get_protocol(self): return self.protocol
- def set_protocol(self, protocol): self.protocol = protocol
- def __eq__(self, other):
- if isinstance(other, self.__class__):
- return (self.direction == other.direction and
- self.vlan_tag == other.vlan_tag and
- self.src_mac == other.src_mac and
- self.dst_mac == other.dst_mac and
- self.mpls_label == other.mpls_label and
- self.service_chain_address == other.service_chain_address and
- self.protocol == other.protocol)
- return NotImplemented
- def __ne__(self, other):
- if isinstance(other, self.__class__):
- return not self.__eq__(other)
- return NotImplemented
-
- @classmethod
- def populate (cls, *a, **kwa):
- obj = cls (*a, **kwa)
- obj.set_direction (obj.populate_string ("direction"))
- obj.set_vlan_tag (obj.populate_integer ("vlan_tag"))
- obj.set_src_mac (obj.populate_string ("src_mac"))
- obj.set_dst_mac (obj.populate_string ("dst_mac"))
- obj.set_mpls_label (obj.populate_integer ("mpls_label"))
- obj.set_service_chain_address (obj.populate_string ("service_chain_address"))
- obj.set_protocol (obj.populate_string ("protocol"))
- return obj
- def export(self, outfile, level=1, namespace_='', name_='PolicyBasedForwardingRuleType', namespacedef_='', pretty_print=True):
- if pretty_print:
- eol_ = '\n'
- else:
- eol_ = ''
- showIndent(outfile, level, pretty_print)
- outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
- already_processed = []
- self.exportAttributes(outfile, level, already_processed, namespace_, name_='PolicyBasedForwardingRuleType')
- if self.hasContent_():
- outfile.write('>%s' % (eol_, ))
- self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print)
- showIndent(outfile, level, pretty_print)
- outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
- else:
- outfile.write('/>%s' % (eol_, ))
- def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='PolicyBasedForwardingRuleType'):
- pass
- def exportChildren(self, outfile, level, namespace_='', name_='PolicyBasedForwardingRuleType', fromsubclass_=False, pretty_print=True):
- if pretty_print:
- eol_ = '\n'
- else:
- eol_ = ''
- if self.direction is not None:
- showIndent(outfile, level, pretty_print)
- outfile.write('<%sdirection>%s</%sdirection>%s' % (namespace_, self.gds_format_string(quote_xml(self.direction).encode(ExternalEncoding), input_name='direction'), namespace_, eol_))
- if self.vlan_tag is not None:
- showIndent(outfile, level, pretty_print)
- outfile.write('<%svlan-tag>%s</%svlan-tag>%s' % (namespace_, self.gds_format_integer(self.vlan_tag, input_name='vlan-tag'), namespace_, eol_))
- if self.src_mac is not None:
- showIndent(outfile, level, pretty_print)
- outfile.write('<%ssrc-mac>%s</%ssrc-mac>%s' % (namespace_, self.gds_format_string(quote_xml(self.src_mac).encode(ExternalEncoding), input_name='src-mac'), namespace_, eol_))
- if self.dst_mac is not None:
- showIndent(outfile, level, pretty_print)
- outfile.write('<%sdst-mac>%s</%sdst-mac>%s' % (namespace_, self.gds_format_string(quote_xml(self.dst_mac).encode(ExternalEncoding), input_name='dst-mac'), namespace_, eol_))
- if self.mpls_label is not None:
- showIndent(outfile, level, pretty_print)
- outfile.write('<%smpls-label>%s</%smpls-label>%s' % (namespace_, self.gds_format_integer(self.mpls_label, input_name='mpls-label'), namespace_, eol_))
- if self.service_chain_address is not None:
- showIndent(outfile, level, pretty_print)
- outfile.write('<%sservice-chain-address>%s</%sservice-chain-address>%s' % (namespace_, self.gds_format_string(quote_xml(self.service_chain_address).encode(ExternalEncoding), input_name='service-chain-address'), namespace_, eol_))
- if self.protocol is not None:
- showIndent(outfile, level, pretty_print)
- outfile.write('<%sprotocol>%s</%sprotocol>%s' % (namespace_, self.gds_format_string(quote_xml(self.protocol).encode(ExternalEncoding), input_name='protocol'), namespace_, eol_))
- def hasContent_(self):
- if (
- self.direction is not None or
- self.vlan_tag is not None or
- self.src_mac is not None or
- self.dst_mac is not None or
- self.mpls_label is not None or
- self.service_chain_address is not None or
- self.protocol is not None
- ):
- return True
- else:
- return False
- def exportLiteral(self, outfile, level, name_='PolicyBasedForwardingRuleType'):
- level += 1
- self.exportLiteralAttributes(outfile, level, [], name_)
- if self.hasContent_():
- self.exportLiteralChildren(outfile, level, name_)
- def exportLiteralAttributes(self, outfile, level, already_processed, name_):
- pass
- def exportLiteralChildren(self, outfile, level, name_):
- if self.direction is not None:
- showIndent(outfile, level)
- outfile.write('direction=%s,\n' % quote_python(self.direction).encode(ExternalEncoding))
- if self.vlan_tag is not None:
- showIndent(outfile, level)
- outfile.write('vlan_tag=%d,\n' % self.vlan_tag)
- if self.src_mac is not None:
- showIndent(outfile, level)
- outfile.write('src_mac=%s,\n' % quote_python(self.src_mac).encode(ExternalEncoding))
- if self.dst_mac is not None:
- showIndent(outfile, level)
- outfile.write('dst_mac=%s,\n' % quote_python(self.dst_mac).encode(ExternalEncoding))
- if self.mpls_label is not None:
- showIndent(outfile, level)
- outfile.write('mpls_label=%d,\n' % self.mpls_label)
- if self.service_chain_address is not None:
- showIndent(outfile, level)
- outfile.write('service_chain_address=%s,\n' % quote_python(self.service_chain_address).encode(ExternalEncoding))
- if self.protocol is not None:
- showIndent(outfile, level)
- outfile.write('protocol=%s,\n' % quote_python(self.protocol).encode(ExternalEncoding))
- def exportDict(self, name_='PolicyBasedForwardingRuleType'):
- obj_json = json.dumps(self, default=lambda o: dict((k, v) for k, v in o.__dict__.iteritems()))
- obj_dict = json.loads(obj_json)
- return {name_: obj_dict}
- def build(self, node):
- self.buildAttributes(node, node.attrib, [])
- for child in node:
- nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
- self.buildChildren(child, node, nodeName_)
- def buildAttributes(self, node, attrs, already_processed):
- pass
- def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
- if nodeName_ == 'direction':
- direction_ = child_.text
- direction_ = self.gds_validate_string(direction_, node, 'direction')
- self.direction = direction_
- self.validate_TrafficDirectionType(self.direction) # validate type TrafficDirectionType
- elif nodeName_ == 'vlan-tag':
- sval_ = child_.text
- try:
- ival_ = int(sval_)
- except (TypeError, ValueError), exp:
- raise_parse_error(child_, 'requires integer: %s' % exp)
- ival_ = self.gds_validate_integer(ival_, node, 'vlan_tag')
- self.vlan_tag = ival_
- elif nodeName_ == 'src-mac':
- src_mac_ = child_.text
- src_mac_ = self.gds_validate_string(src_mac_, node, 'src_mac')
- self.src_mac = src_mac_
- elif nodeName_ == 'dst-mac':
- dst_mac_ = child_.text
- dst_mac_ = self.gds_validate_string(dst_mac_, node, 'dst_mac')
- self.dst_mac = dst_mac_
- elif nodeName_ == 'mpls-label':
- sval_ = child_.text
- try:
- ival_ = int(sval_)
- except (TypeError, ValueError), exp:
- raise_parse_error(child_, 'requires integer: %s' % exp)
- ival_ = self.gds_validate_integer(ival_, node, 'mpls_label')
- self.mpls_label = ival_
- elif nodeName_ == 'service-chain-address':
- service_chain_address_ = child_.text
- service_chain_address_ = self.gds_validate_string(service_chain_address_, node, 'service_chain_address')
- self.service_chain_address = service_chain_address_
- self.validate_IpAddress(self.service_chain_address) # validate type IpAddress
- elif nodeName_ == 'protocol':
- protocol_ = child_.text
- protocol_ = self.gds_validate_string(protocol_, node, 'protocol')
- self.protocol = protocol_
-# end class PolicyBasedForwardingRuleType
-
-
-class instance_ip_virtual_network(GeneratedsSuper):
- """
- instance_ip_virtual_network class definition from :doc:`vnc_cfg.xsd`
- """
- subclass = None
- superclass = None
- def __init__(self, **kwargs):
- pass
- def factory(*args_, **kwargs_):
- if instance_ip_virtual_network.subclass:
- return instance_ip_virtual_network.subclass(*args_, **kwargs_)
- else:
- return instance_ip_virtual_network(*args_, **kwargs_)
- factory = staticmethod(factory)
- def __eq__(self, other): return True
- def __ne__(self, other): return False
- @classmethod
- def populate (cls, *a, **kwa):
- obj = cls (*a, **kwa)
- return obj
- def export(self, outfile, level=1, namespace_='', name_='instance-ip-virtual-network', namespacedef_='', pretty_print=True):
- if pretty_print:
- eol_ = '\n'
- else:
- eol_ = ''
- showIndent(outfile, level, pretty_print)
- outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
- already_processed = []
- self.exportAttributes(outfile, level, already_processed, namespace_, name_='instance-ip-virtual-network')
- if self.hasContent_():
- outfile.write('>%s' % (eol_, ))
- self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print)
- outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
- else:
- outfile.write('/>%s' % (eol_, ))
- def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='instance-ip-virtual-network'):
- pass
- def exportChildren(self, outfile, level, namespace_='', name_='instance-ip-virtual-network', fromsubclass_=False, pretty_print=True):
- pass
- def hasContent_(self):
- if (
-
- ):
- return True
- else:
- return False
- def exportLiteral(self, outfile, level, name_='instance-ip-virtual-network'):
- level += 1
- self.exportLiteralAttributes(outfile, level, [], name_)
- if self.hasContent_():
- self.exportLiteralChildren(outfile, level, name_)
- def exportLiteralAttributes(self, outfile, level, already_processed, name_):
- pass
- def exportLiteralChildren(self, outfile, level, name_):
- pass
- def exportDict(self, name_='instance-ip-virtual-network'):
- obj_json = json.dumps(self, default=lambda o: dict((k, v) for k, v in o.__dict__.iteritems()))
- obj_dict = json.loads(obj_json)
- return {name_: obj_dict}
- def build(self, node):
- self.buildAttributes(node, node.attrib, [])
- for child in node:
- nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
- self.buildChildren(child, node, nodeName_)
- def buildAttributes(self, node, attrs, already_processed):
- pass
- def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
- pass
-# end class instance_ip_virtual_network
-
-
-class instance_ip_virtual_machine_interface(GeneratedsSuper):
- """
- instance_ip_virtual_machine_interface class definition from :doc:`vnc_cfg.xsd`
- """
- subclass = None
- superclass = None
- def __init__(self, **kwargs):
- pass
- def factory(*args_, **kwargs_):
- if instance_ip_virtual_machine_interface.subclass:
- return instance_ip_virtual_machine_interface.subclass(*args_, **kwargs_)
- else:
- return instance_ip_virtual_machine_interface(*args_, **kwargs_)
- factory = staticmethod(factory)
- def __eq__(self, other): return True
- def __ne__(self, other): return False
- @classmethod
- def populate (cls, *a, **kwa):
- obj = cls (*a, **kwa)
- return obj
- def export(self, outfile, level=1, namespace_='', name_='instance-ip-virtual-machine-interface', namespacedef_='', pretty_print=True):
- if pretty_print:
- eol_ = '\n'
- else:
- eol_ = ''
- showIndent(outfile, level, pretty_print)
- outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
- already_processed = []
- self.exportAttributes(outfile, level, already_processed, namespace_, name_='instance-ip-virtual-machine-interface')
- if self.hasContent_():
- outfile.write('>%s' % (eol_, ))
- self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print)
- outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
- else:
- outfile.write('/>%s' % (eol_, ))
- def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='instance-ip-virtual-machine-interface'):
- pass
- def exportChildren(self, outfile, level, namespace_='', name_='instance-ip-virtual-machine-interface', fromsubclass_=False, pretty_print=True):
- pass
- def hasContent_(self):
- if (
-
- ):
- return True
- else:
- return False
- def exportLiteral(self, outfile, level, name_='instance-ip-virtual-machine-interface'):
- level += 1
- self.exportLiteralAttributes(outfile, level, [], name_)
- if self.hasContent_():
- self.exportLiteralChildren(outfile, level, name_)
- def exportLiteralAttributes(self, outfile, level, already_processed, name_):
- pass
- def exportLiteralChildren(self, outfile, level, name_):
- pass
- def exportDict(self, name_='instance-ip-virtual-machine-interface'):
- obj_json = json.dumps(self, default=lambda o: dict((k, v) for k, v in o.__dict__.iteritems()))
- obj_dict = json.loads(obj_json)
- return {name_: obj_dict}
- def build(self, node):
- self.buildAttributes(node, node.attrib, [])
- for child in node:
- nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
- self.buildChildren(child, node, nodeName_)
- def buildAttributes(self, node, attrs, already_processed):
- pass
- def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
- pass
-# end class instance_ip_virtual_machine_interface
-
-
-class subnet_virtual_machine_interface(GeneratedsSuper):
- """
- subnet_virtual_machine_interface class definition from :doc:`vnc_cfg.xsd`
- """
- subclass = None
- superclass = None
- def __init__(self, **kwargs):
- pass
- def factory(*args_, **kwargs_):
- if subnet_virtual_machine_interface.subclass:
- return subnet_virtual_machine_interface.subclass(*args_, **kwargs_)
- else:
- return subnet_virtual_machine_interface(*args_, **kwargs_)
- factory = staticmethod(factory)
- def __eq__(self, other): return True
- def __ne__(self, other): return False
- @classmethod
- def populate (cls, *a, **kwa):
- obj = cls (*a, **kwa)
- return obj
- def export(self, outfile, level=1, namespace_='', name_='subnet-virtual-machine-interface', namespacedef_='', pretty_print=True):
- if pretty_print:
- eol_ = '\n'
- else:
- eol_ = ''
- showIndent(outfile, level, pretty_print)
- outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
- already_processed = []
- self.exportAttributes(outfile, level, already_processed, namespace_, name_='subnet-virtual-machine-interface')
- if self.hasContent_():
- outfile.write('>%s' % (eol_, ))
- self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print)
- outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
- else:
- outfile.write('/>%s' % (eol_, ))
- def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='subnet-virtual-machine-interface'):
- pass
- def exportChildren(self, outfile, level, namespace_='', name_='subnet-virtual-machine-interface', fromsubclass_=False, pretty_print=True):
- pass
- def hasContent_(self):
- if (
-
- ):
- return True
- else:
- return False
- def exportLiteral(self, outfile, level, name_='subnet-virtual-machine-interface'):
- level += 1
- self.exportLiteralAttributes(outfile, level, [], name_)
- if self.hasContent_():
- self.exportLiteralChildren(outfile, level, name_)
- def exportLiteralAttributes(self, outfile, level, already_processed, name_):
- pass
- def exportLiteralChildren(self, outfile, level, name_):
- pass
- def exportDict(self, name_='subnet-virtual-machine-interface'):
- obj_json = json.dumps(self, default=lambda o: dict((k, v) for k, v in o.__dict__.iteritems()))
- obj_dict = json.loads(obj_json)
- return {name_: obj_dict}
- def build(self, node):
- self.buildAttributes(node, node.attrib, [])
- for child in node:
- nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
- self.buildChildren(child, node, nodeName_)
- def buildAttributes(self, node, attrs, already_processed):
- pass
- def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
- pass
-# end class subnet_virtual_machine_interface
-
-
-class virtual_network_floating_ip_pool(GeneratedsSuper):
- """
- virtual_network_floating_ip_pool class definition from :doc:`vnc_cfg.xsd`
- """
- subclass = None
- superclass = None
- def __init__(self, **kwargs):
- pass
- def factory(*args_, **kwargs_):
- if virtual_network_floating_ip_pool.subclass:
- return virtual_network_floating_ip_pool.subclass(*args_, **kwargs_)
- else:
- return virtual_network_floating_ip_pool(*args_, **kwargs_)
- factory = staticmethod(factory)
- def __eq__(self, other): return True
- def __ne__(self, other): return False
- @classmethod
- def populate (cls, *a, **kwa):
- obj = cls (*a, **kwa)
- return obj
- def export(self, outfile, level=1, namespace_='', name_='virtual-network-floating-ip-pool', namespacedef_='', pretty_print=True):
- if pretty_print:
- eol_ = '\n'
- else:
- eol_ = ''
- showIndent(outfile, level, pretty_print)
- outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
- already_processed = []
- self.exportAttributes(outfile, level, already_processed, namespace_, name_='virtual-network-floating-ip-pool')
- if self.hasContent_():
- outfile.write('>%s' % (eol_, ))
- self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print)
- outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
- else:
- outfile.write('/>%s' % (eol_, ))
- def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='virtual-network-floating-ip-pool'):
- pass
- def exportChildren(self, outfile, level, namespace_='', name_='virtual-network-floating-ip-pool', fromsubclass_=False, pretty_print=True):
- pass
- def hasContent_(self):
- if (
-
- ):
- return True
- else:
- return False
- def exportLiteral(self, outfile, level, name_='virtual-network-floating-ip-pool'):
- level += 1
- self.exportLiteralAttributes(outfile, level, [], name_)
- if self.hasContent_():
- self.exportLiteralChildren(outfile, level, name_)
- def exportLiteralAttributes(self, outfile, level, already_processed, name_):
- pass
- def exportLiteralChildren(self, outfile, level, name_):
- pass
- def exportDict(self, name_='virtual-network-floating-ip-pool'):
- obj_json = json.dumps(self, default=lambda o: dict((k, v) for k, v in o.__dict__.iteritems()))
- obj_dict = json.loads(obj_json)
- return {name_: obj_dict}
- def build(self, node):
- self.buildAttributes(node, node.attrib, [])
- for child in node:
- nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
- self.buildChildren(child, node, nodeName_)
- def buildAttributes(self, node, attrs, already_processed):
- pass
- def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
- pass
-# end class virtual_network_floating_ip_pool
-
-
-class project_floating_ip_pool(GeneratedsSuper):
- """
- project_floating_ip_pool class definition from :doc:`vnc_cfg.xsd`
- """
- subclass = None
- superclass = None
- def __init__(self, **kwargs):
- pass
- def factory(*args_, **kwargs_):
- if project_floating_ip_pool.subclass:
- return project_floating_ip_pool.subclass(*args_, **kwargs_)
- else:
- return project_floating_ip_pool(*args_, **kwargs_)
- factory = staticmethod(factory)
- def __eq__(self, other): return True
- def __ne__(self, other): return False
- @classmethod
- def populate (cls, *a, **kwa):
- obj = cls (*a, **kwa)
- return obj
- def export(self, outfile, level=1, namespace_='', name_='project-floating-ip-pool', namespacedef_='', pretty_print=True):
- if pretty_print:
- eol_ = '\n'
- else:
- eol_ = ''
- showIndent(outfile, level, pretty_print)
- outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
- already_processed = []
- self.exportAttributes(outfile, level, already_processed, namespace_, name_='project-floating-ip-pool')
- if self.hasContent_():
- outfile.write('>%s' % (eol_, ))
- self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print)
- outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
- else:
- outfile.write('/>%s' % (eol_, ))
- def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='project-floating-ip-pool'):
- pass
- def exportChildren(self, outfile, level, namespace_='', name_='project-floating-ip-pool', fromsubclass_=False, pretty_print=True):
- pass
- def hasContent_(self):
- if (
-
- ):
- return True
- else:
- return False
- def exportLiteral(self, outfile, level, name_='project-floating-ip-pool'):
- level += 1
- self.exportLiteralAttributes(outfile, level, [], name_)
- if self.hasContent_():
- self.exportLiteralChildren(outfile, level, name_)
- def exportLiteralAttributes(self, outfile, level, already_processed, name_):
- pass
- def exportLiteralChildren(self, outfile, level, name_):
- pass
- def exportDict(self, name_='project-floating-ip-pool'):
- obj_json = json.dumps(self, default=lambda o: dict((k, v) for k, v in o.__dict__.iteritems()))
- obj_dict = json.loads(obj_json)
- return {name_: obj_dict}
- def build(self, node):
- self.buildAttributes(node, node.attrib, [])
- for child in node:
- nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
- self.buildChildren(child, node, nodeName_)
- def buildAttributes(self, node, attrs, already_processed):
- pass
- def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
- pass
-# end class project_floating_ip_pool
-
-
-class floating_ip_project(GeneratedsSuper):
- """
- floating_ip_project class definition from :doc:`vnc_cfg.xsd`
- """
- subclass = None
- superclass = None
- def __init__(self, **kwargs):
- pass
- def factory(*args_, **kwargs_):
- if floating_ip_project.subclass:
- return floating_ip_project.subclass(*args_, **kwargs_)
- else:
- return floating_ip_project(*args_, **kwargs_)
- factory = staticmethod(factory)
- def __eq__(self, other): return True
- def __ne__(self, other): return False
- @classmethod
- def populate (cls, *a, **kwa):
- obj = cls (*a, **kwa)
- return obj
- def export(self, outfile, level=1, namespace_='', name_='floating-ip-project', namespacedef_='', pretty_print=True):
- if pretty_print:
- eol_ = '\n'
- else:
- eol_ = ''
- showIndent(outfile, level, pretty_print)
- outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
- already_processed = []
- self.exportAttributes(outfile, level, already_processed, namespace_, name_='floating-ip-project')
- if self.hasContent_():
- outfile.write('>%s' % (eol_, ))
- self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print)
- outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
- else:
- outfile.write('/>%s' % (eol_, ))
- def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='floating-ip-project'):
- pass
- def exportChildren(self, outfile, level, namespace_='', name_='floating-ip-project', fromsubclass_=False, pretty_print=True):
- pass
- def hasContent_(self):
- if (
-
- ):
- return True
- else:
- return False
- def exportLiteral(self, outfile, level, name_='floating-ip-project'):
- level += 1
- self.exportLiteralAttributes(outfile, level, [], name_)
- if self.hasContent_():
- self.exportLiteralChildren(outfile, level, name_)
- def exportLiteralAttributes(self, outfile, level, already_processed, name_):
- pass
- def exportLiteralChildren(self, outfile, level, name_):
- pass
- def exportDict(self, name_='floating-ip-project'):
- obj_json = json.dumps(self, default=lambda o: dict((k, v) for k, v in o.__dict__.iteritems()))
- obj_dict = json.loads(obj_json)
- return {name_: obj_dict}
- def build(self, node):
- self.buildAttributes(node, node.attrib, [])
- for child in node:
- nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
- self.buildChildren(child, node, nodeName_)
- def buildAttributes(self, node, attrs, already_processed):
- pass
- def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
- pass
-# end class floating_ip_project
-
-
-class floating_ip_pool_floating_ip(GeneratedsSuper):
- """
- floating_ip_pool_floating_ip class definition from :doc:`vnc_cfg.xsd`
- """
- subclass = None
- superclass = None
- def __init__(self, **kwargs):
- pass
- def factory(*args_, **kwargs_):
- if floating_ip_pool_floating_ip.subclass:
- return floating_ip_pool_floating_ip.subclass(*args_, **kwargs_)
- else:
- return floating_ip_pool_floating_ip(*args_, **kwargs_)
- factory = staticmethod(factory)
- def __eq__(self, other): return True
- def __ne__(self, other): return False
- @classmethod
- def populate (cls, *a, **kwa):
- obj = cls (*a, **kwa)
- return obj
- def export(self, outfile, level=1, namespace_='', name_='floating-ip-pool-floating-ip', namespacedef_='', pretty_print=True):
- if pretty_print:
- eol_ = '\n'
- else:
- eol_ = ''
- showIndent(outfile, level, pretty_print)
- outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
- already_processed = []
- self.exportAttributes(outfile, level, already_processed, namespace_, name_='floating-ip-pool-floating-ip')
- if self.hasContent_():
- outfile.write('>%s' % (eol_, ))
- self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print)
- outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
- else:
- outfile.write('/>%s' % (eol_, ))
- def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='floating-ip-pool-floating-ip'):
- pass
- def exportChildren(self, outfile, level, namespace_='', name_='floating-ip-pool-floating-ip', fromsubclass_=False, pretty_print=True):
- pass
- def hasContent_(self):
- if (
-
- ):
- return True
- else:
- return False
- def exportLiteral(self, outfile, level, name_='floating-ip-pool-floating-ip'):
- level += 1
- self.exportLiteralAttributes(outfile, level, [], name_)
- if self.hasContent_():
- self.exportLiteralChildren(outfile, level, name_)
- def exportLiteralAttributes(self, outfile, level, already_processed, name_):
- pass
- def exportLiteralChildren(self, outfile, level, name_):
- pass
- def exportDict(self, name_='floating-ip-pool-floating-ip'):
- obj_json = json.dumps(self, default=lambda o: dict((k, v) for k, v in o.__dict__.iteritems()))
- obj_dict = json.loads(obj_json)
- return {name_: obj_dict}
- def build(self, node):
- self.buildAttributes(node, node.attrib, [])
- for child in node:
- nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
- self.buildChildren(child, node, nodeName_)
- def buildAttributes(self, node, attrs, already_processed):
- pass
- def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
- pass
-# end class floating_ip_pool_floating_ip
-
-
-class floating_ip_virtual_machine_interface(GeneratedsSuper):
- """
- floating_ip_virtual_machine_interface class definition from :doc:`vnc_cfg.xsd`
- """
- subclass = None
- superclass = None
- def __init__(self, **kwargs):
- pass
- def factory(*args_, **kwargs_):
- if floating_ip_virtual_machine_interface.subclass:
- return floating_ip_virtual_machine_interface.subclass(*args_, **kwargs_)
- else:
- return floating_ip_virtual_machine_interface(*args_, **kwargs_)
- factory = staticmethod(factory)
- def __eq__(self, other): return True
- def __ne__(self, other): return False
- @classmethod
- def populate (cls, *a, **kwa):
- obj = cls (*a, **kwa)
- return obj
- def export(self, outfile, level=1, namespace_='', name_='floating-ip-virtual-machine-interface', namespacedef_='', pretty_print=True):
- if pretty_print:
- eol_ = '\n'
- else:
- eol_ = ''
- showIndent(outfile, level, pretty_print)
- outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
- already_processed = []
- self.exportAttributes(outfile, level, already_processed, namespace_, name_='floating-ip-virtual-machine-interface')
- if self.hasContent_():
- outfile.write('>%s' % (eol_, ))
- self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print)
- outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
- else:
- outfile.write('/>%s' % (eol_, ))
- def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='floating-ip-virtual-machine-interface'):
- pass
- def exportChildren(self, outfile, level, namespace_='', name_='floating-ip-virtual-machine-interface', fromsubclass_=False, pretty_print=True):
- pass
- def hasContent_(self):
- if (
-
- ):
- return True
- else:
- return False
- def exportLiteral(self, outfile, level, name_='floating-ip-virtual-machine-interface'):
- level += 1
- self.exportLiteralAttributes(outfile, level, [], name_)
- if self.hasContent_():
- self.exportLiteralChildren(outfile, level, name_)
- def exportLiteralAttributes(self, outfile, level, already_processed, name_):
- pass
- def exportLiteralChildren(self, outfile, level, name_):
- pass
- def exportDict(self, name_='floating-ip-virtual-machine-interface'):
- obj_json = json.dumps(self, default=lambda o: dict((k, v) for k, v in o.__dict__.iteritems()))
- obj_dict = json.loads(obj_json)
- return {name_: obj_dict}
- def build(self, node):
- self.buildAttributes(node, node.attrib, [])
- for child in node:
- nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
- self.buildChildren(child, node, nodeName_)
- def buildAttributes(self, node, attrs, already_processed):
- pass
- def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
- pass
-# end class floating_ip_virtual_machine_interface
-
-
-class global_system_config_physical_router(GeneratedsSuper):
- """
- global_system_config_physical_router class definition from :doc:`vnc_cfg.xsd`
- """
- subclass = None
- superclass = None
- def __init__(self, **kwargs):
- pass
- def factory(*args_, **kwargs_):
- if global_system_config_physical_router.subclass:
- return global_system_config_physical_router.subclass(*args_, **kwargs_)
- else:
- return global_system_config_physical_router(*args_, **kwargs_)
- factory = staticmethod(factory)
- def __eq__(self, other): return True
- def __ne__(self, other): return False
- @classmethod
- def populate (cls, *a, **kwa):
- obj = cls (*a, **kwa)
- return obj
- def export(self, outfile, level=1, namespace_='', name_='global-system-config-physical-router', namespacedef_='', pretty_print=True):
- if pretty_print:
- eol_ = '\n'
- else:
- eol_ = ''
- showIndent(outfile, level, pretty_print)
- outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
- already_processed = []
- self.exportAttributes(outfile, level, already_processed, namespace_, name_='global-system-config-physical-router')
- if self.hasContent_():
- outfile.write('>%s' % (eol_, ))
- self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print)
- outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
- else:
- outfile.write('/>%s' % (eol_, ))
- def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='global-system-config-physical-router'):
- pass
- def exportChildren(self, outfile, level, namespace_='', name_='global-system-config-physical-router', fromsubclass_=False, pretty_print=True):
- pass
- def hasContent_(self):
- if (
-
- ):
- return True
- else:
- return False
- def exportLiteral(self, outfile, level, name_='global-system-config-physical-router'):
- level += 1
- self.exportLiteralAttributes(outfile, level, [], name_)
- if self.hasContent_():
- self.exportLiteralChildren(outfile, level, name_)
- def exportLiteralAttributes(self, outfile, level, already_processed, name_):
- pass
- def exportLiteralChildren(self, outfile, level, name_):
- pass
- def exportDict(self, name_='global-system-config-physical-router'):
- obj_json = json.dumps(self, default=lambda o: dict((k, v) for k, v in o.__dict__.iteritems()))
- obj_dict = json.loads(obj_json)
- return {name_: obj_dict}
- def build(self, node):
- self.buildAttributes(node, node.attrib, [])
- for child in node:
- nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
- self.buildChildren(child, node, nodeName_)
- def buildAttributes(self, node, attrs, already_processed):
- pass
- def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
- pass
-# end class global_system_config_physical_router
-
-
-class physical_router_virtual_router(GeneratedsSuper):
- """
- physical_router_virtual_router class definition from :doc:`vnc_cfg.xsd`
- """
- subclass = None
- superclass = None
- def __init__(self, **kwargs):
- pass
- def factory(*args_, **kwargs_):
- if physical_router_virtual_router.subclass:
- return physical_router_virtual_router.subclass(*args_, **kwargs_)
- else:
- return physical_router_virtual_router(*args_, **kwargs_)
- factory = staticmethod(factory)
- def __eq__(self, other): return True
- def __ne__(self, other): return False
- @classmethod
- def populate (cls, *a, **kwa):
- obj = cls (*a, **kwa)
- return obj
- def export(self, outfile, level=1, namespace_='', name_='physical-router-virtual-router', namespacedef_='', pretty_print=True):
- if pretty_print:
- eol_ = '\n'
- else:
- eol_ = ''
- showIndent(outfile, level, pretty_print)
- outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
- already_processed = []
- self.exportAttributes(outfile, level, already_processed, namespace_, name_='physical-router-virtual-router')
- if self.hasContent_():
- outfile.write('>%s' % (eol_, ))
- self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print)
- outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
- else:
- outfile.write('/>%s' % (eol_, ))
- def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='physical-router-virtual-router'):
- pass
- def exportChildren(self, outfile, level, namespace_='', name_='physical-router-virtual-router', fromsubclass_=False, pretty_print=True):
- pass
- def hasContent_(self):
- if (
-
- ):
- return True
- else:
- return False
- def exportLiteral(self, outfile, level, name_='physical-router-virtual-router'):
- level += 1
- self.exportLiteralAttributes(outfile, level, [], name_)
- if self.hasContent_():
- self.exportLiteralChildren(outfile, level, name_)
- def exportLiteralAttributes(self, outfile, level, already_processed, name_):
- pass
- def exportLiteralChildren(self, outfile, level, name_):
- pass
- def exportDict(self, name_='physical-router-virtual-router'):
- obj_json = json.dumps(self, default=lambda o: dict((k, v) for k, v in o.__dict__.iteritems()))
- obj_dict = json.loads(obj_json)
- return {name_: obj_dict}
- def build(self, node):
- self.buildAttributes(node, node.attrib, [])
- for child in node:
- nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
- self.buildChildren(child, node, nodeName_)
- def buildAttributes(self, node, attrs, already_processed):
- pass
- def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
- pass
-# end class physical_router_virtual_router
-
-
-class physical_router_bgp_router(GeneratedsSuper):
- """
- physical_router_bgp_router class definition from :doc:`vnc_cfg.xsd`
- """
- subclass = None
- superclass = None
- def __init__(self, **kwargs):
- pass
- def factory(*args_, **kwargs_):
- if physical_router_bgp_router.subclass:
- return physical_router_bgp_router.subclass(*args_, **kwargs_)
- else:
- return physical_router_bgp_router(*args_, **kwargs_)
- factory = staticmethod(factory)
- def __eq__(self, other): return True
- def __ne__(self, other): return False
- @classmethod
- def populate (cls, *a, **kwa):
- obj = cls (*a, **kwa)
- return obj
- def export(self, outfile, level=1, namespace_='', name_='physical-router-bgp-router', namespacedef_='', pretty_print=True):
- if pretty_print:
- eol_ = '\n'
- else:
- eol_ = ''
- showIndent(outfile, level, pretty_print)
- outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
- already_processed = []
- self.exportAttributes(outfile, level, already_processed, namespace_, name_='physical-router-bgp-router')
- if self.hasContent_():
- outfile.write('>%s' % (eol_, ))
- self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print)
- outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
- else:
- outfile.write('/>%s' % (eol_, ))
- def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='physical-router-bgp-router'):
- pass
- def exportChildren(self, outfile, level, namespace_='', name_='physical-router-bgp-router', fromsubclass_=False, pretty_print=True):
- pass
- def hasContent_(self):
- if (
-
- ):
- return True
- else:
- return False
- def exportLiteral(self, outfile, level, name_='physical-router-bgp-router'):
- level += 1
- self.exportLiteralAttributes(outfile, level, [], name_)
- if self.hasContent_():
- self.exportLiteralChildren(outfile, level, name_)
- def exportLiteralAttributes(self, outfile, level, already_processed, name_):
- pass
- def exportLiteralChildren(self, outfile, level, name_):
- pass
- def exportDict(self, name_='physical-router-bgp-router'):
- obj_json = json.dumps(self, default=lambda o: dict((k, v) for k, v in o.__dict__.iteritems()))
- obj_dict = json.loads(obj_json)
- return {name_: obj_dict}
- def build(self, node):
- self.buildAttributes(node, node.attrib, [])
- for child in node:
- nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
- self.buildChildren(child, node, nodeName_)
- def buildAttributes(self, node, attrs, already_processed):
- pass
- def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
- pass
-# end class physical_router_bgp_router
-
-
-class physical_router_virtual_network(GeneratedsSuper):
- """
- physical_router_virtual_network class definition from :doc:`vnc_cfg.xsd`
- """
- subclass = None
- superclass = None
- def __init__(self, **kwargs):
- pass
- def factory(*args_, **kwargs_):
- if physical_router_virtual_network.subclass:
- return physical_router_virtual_network.subclass(*args_, **kwargs_)
- else:
- return physical_router_virtual_network(*args_, **kwargs_)
- factory = staticmethod(factory)
- def __eq__(self, other): return True
- def __ne__(self, other): return False
- @classmethod
- def populate (cls, *a, **kwa):
- obj = cls (*a, **kwa)
- return obj
- def export(self, outfile, level=1, namespace_='', name_='physical-router-virtual-network', namespacedef_='', pretty_print=True):
- if pretty_print:
- eol_ = '\n'
- else:
- eol_ = ''
- showIndent(outfile, level, pretty_print)
- outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
- already_processed = []
- self.exportAttributes(outfile, level, already_processed, namespace_, name_='physical-router-virtual-network')
- if self.hasContent_():
- outfile.write('>%s' % (eol_, ))
- self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print)
- outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
- else:
- outfile.write('/>%s' % (eol_, ))
- def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='physical-router-virtual-network'):
- pass
- def exportChildren(self, outfile, level, namespace_='', name_='physical-router-virtual-network', fromsubclass_=False, pretty_print=True):
- pass
- def hasContent_(self):
- if (
-
- ):
- return True
- else:
- return False
- def exportLiteral(self, outfile, level, name_='physical-router-virtual-network'):
- level += 1
- self.exportLiteralAttributes(outfile, level, [], name_)
- if self.hasContent_():
- self.exportLiteralChildren(outfile, level, name_)
- def exportLiteralAttributes(self, outfile, level, already_processed, name_):
- pass
- def exportLiteralChildren(self, outfile, level, name_):
- pass
- def exportDict(self, name_='physical-router-virtual-network'):
- obj_json = json.dumps(self, default=lambda o: dict((k, v) for k, v in o.__dict__.iteritems()))
- obj_dict = json.loads(obj_json)
- return {name_: obj_dict}
- def build(self, node):
- self.buildAttributes(node, node.attrib, [])
- for child in node:
- nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
- self.buildChildren(child, node, nodeName_)
- def buildAttributes(self, node, attrs, already_processed):
- pass
- def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
- pass
-# end class physical_router_virtual_network
-
-
-class physical_router_physical_interface(GeneratedsSuper):
- """
- physical_router_physical_interface class definition from :doc:`vnc_cfg.xsd`
- """
- subclass = None
- superclass = None
- def __init__(self, **kwargs):
- pass
- def factory(*args_, **kwargs_):
- if physical_router_physical_interface.subclass:
- return physical_router_physical_interface.subclass(*args_, **kwargs_)
- else:
- return physical_router_physical_interface(*args_, **kwargs_)
- factory = staticmethod(factory)
- def __eq__(self, other): return True
- def __ne__(self, other): return False
- @classmethod
- def populate (cls, *a, **kwa):
- obj = cls (*a, **kwa)
- return obj
- def export(self, outfile, level=1, namespace_='', name_='physical-router-physical-interface', namespacedef_='', pretty_print=True):
- if pretty_print:
- eol_ = '\n'
- else:
- eol_ = ''
- showIndent(outfile, level, pretty_print)
- outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
- already_processed = []
- self.exportAttributes(outfile, level, already_processed, namespace_, name_='physical-router-physical-interface')
- if self.hasContent_():
- outfile.write('>%s' % (eol_, ))
- self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print)
- outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
- else:
- outfile.write('/>%s' % (eol_, ))
- def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='physical-router-physical-interface'):
- pass
- def exportChildren(self, outfile, level, namespace_='', name_='physical-router-physical-interface', fromsubclass_=False, pretty_print=True):
- pass
- def hasContent_(self):
- if (
-
- ):
- return True
- else:
- return False
- def exportLiteral(self, outfile, level, name_='physical-router-physical-interface'):
- level += 1
- self.exportLiteralAttributes(outfile, level, [], name_)
- if self.hasContent_():
- self.exportLiteralChildren(outfile, level, name_)
- def exportLiteralAttributes(self, outfile, level, already_processed, name_):
- pass
- def exportLiteralChildren(self, outfile, level, name_):
- pass
- def exportDict(self, name_='physical-router-physical-interface'):
- obj_json = json.dumps(self, default=lambda o: dict((k, v) for k, v in o.__dict__.iteritems()))
- obj_dict = json.loads(obj_json)
- return {name_: obj_dict}
- def build(self, node):
- self.buildAttributes(node, node.attrib, [])
- for child in node:
- nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
- self.buildChildren(child, node, nodeName_)
- def buildAttributes(self, node, attrs, already_processed):
- pass
- def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
- pass
-# end class physical_router_physical_interface
-
-
-class physical_router_logical_interface(GeneratedsSuper):
- """
- physical_router_logical_interface class definition from :doc:`vnc_cfg.xsd`
- """
- subclass = None
- superclass = None
- def __init__(self, **kwargs):
- pass
- def factory(*args_, **kwargs_):
- if physical_router_logical_interface.subclass:
- return physical_router_logical_interface.subclass(*args_, **kwargs_)
- else:
- return physical_router_logical_interface(*args_, **kwargs_)
- factory = staticmethod(factory)
- def __eq__(self, other): return True
- def __ne__(self, other): return False
- @classmethod
- def populate (cls, *a, **kwa):
- obj = cls (*a, **kwa)
- return obj
- def export(self, outfile, level=1, namespace_='', name_='physical-router-logical-interface', namespacedef_='', pretty_print=True):
- if pretty_print:
- eol_ = '\n'
- else:
- eol_ = ''
- showIndent(outfile, level, pretty_print)
- outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
- already_processed = []
- self.exportAttributes(outfile, level, already_processed, namespace_, name_='physical-router-logical-interface')
- if self.hasContent_():
- outfile.write('>%s' % (eol_, ))
- self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print)
- outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
- else:
- outfile.write('/>%s' % (eol_, ))
- def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='physical-router-logical-interface'):
- pass
- def exportChildren(self, outfile, level, namespace_='', name_='physical-router-logical-interface', fromsubclass_=False, pretty_print=True):
- pass
- def hasContent_(self):
- if (
-
- ):
- return True
- else:
- return False
- def exportLiteral(self, outfile, level, name_='physical-router-logical-interface'):
- level += 1
- self.exportLiteralAttributes(outfile, level, [], name_)
- if self.hasContent_():
- self.exportLiteralChildren(outfile, level, name_)
- def exportLiteralAttributes(self, outfile, level, already_processed, name_):
- pass
- def exportLiteralChildren(self, outfile, level, name_):
- pass
- def exportDict(self, name_='physical-router-logical-interface'):
- obj_json = json.dumps(self, default=lambda o: dict((k, v) for k, v in o.__dict__.iteritems()))
- obj_dict = json.loads(obj_json)
- return {name_: obj_dict}
- def build(self, node):
- self.buildAttributes(node, node.attrib, [])
- for child in node:
- nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
- self.buildChildren(child, node, nodeName_)
- def buildAttributes(self, node, attrs, already_processed):
- pass
- def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
- pass
-# end class physical_router_logical_interface
-
-
-class physical_interface_logical_interface(GeneratedsSuper):
- """
- physical_interface_logical_interface class definition from :doc:`vnc_cfg.xsd`
- """
- subclass = None
- superclass = None
- def __init__(self, **kwargs):
- pass
- def factory(*args_, **kwargs_):
- if physical_interface_logical_interface.subclass:
- return physical_interface_logical_interface.subclass(*args_, **kwargs_)
- else:
- return physical_interface_logical_interface(*args_, **kwargs_)
- factory = staticmethod(factory)
- def __eq__(self, other): return True
- def __ne__(self, other): return False
- @classmethod
- def populate (cls, *a, **kwa):
- obj = cls (*a, **kwa)
- return obj
- def export(self, outfile, level=1, namespace_='', name_='physical-interface-logical-interface', namespacedef_='', pretty_print=True):
- if pretty_print:
- eol_ = '\n'
- else:
- eol_ = ''
- showIndent(outfile, level, pretty_print)
- outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
- already_processed = []
- self.exportAttributes(outfile, level, already_processed, namespace_, name_='physical-interface-logical-interface')
- if self.hasContent_():
- outfile.write('>%s' % (eol_, ))
- self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print)
- outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
- else:
- outfile.write('/>%s' % (eol_, ))
- def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='physical-interface-logical-interface'):
- pass
- def exportChildren(self, outfile, level, namespace_='', name_='physical-interface-logical-interface', fromsubclass_=False, pretty_print=True):
- pass
- def hasContent_(self):
- if (
-
- ):
- return True
- else:
- return False
- def exportLiteral(self, outfile, level, name_='physical-interface-logical-interface'):
- level += 1
- self.exportLiteralAttributes(outfile, level, [], name_)
- if self.hasContent_():
- self.exportLiteralChildren(outfile, level, name_)
- def exportLiteralAttributes(self, outfile, level, already_processed, name_):
- pass
- def exportLiteralChildren(self, outfile, level, name_):
- pass
- def exportDict(self, name_='physical-interface-logical-interface'):
- obj_json = json.dumps(self, default=lambda o: dict((k, v) for k, v in o.__dict__.iteritems()))
- obj_dict = json.loads(obj_json)
- return {name_: obj_dict}
- def build(self, node):
- self.buildAttributes(node, node.attrib, [])
- for child in node:
- nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
- self.buildChildren(child, node, nodeName_)
- def buildAttributes(self, node, attrs, already_processed):
- pass
- def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
- pass
-# end class physical_interface_logical_interface
-
-
-class UserCredentials(GeneratedsSuper):
- """
- UserCredentials class definition from :doc:`vnc_cfg.xsd`
- """
- subclass = None
- superclass = None
- def __init__(self, username=None, password=None, **kwargs):
- self.username = username
- self.password = password
- def factory(*args_, **kwargs_):
- if UserCredentials.subclass:
- return UserCredentials.subclass(*args_, **kwargs_)
- else:
- return UserCredentials(*args_, **kwargs_)
- factory = staticmethod(factory)
- def get_username(self): return self.username
- def set_username(self, username): self.username = username
- def get_password(self): return self.password
- def set_password(self, password): self.password = password
- def __eq__(self, other):
- if isinstance(other, self.__class__):
- return (self.username == other.username and
- self.password == other.password)
- return NotImplemented
- def __ne__(self, other):
- if isinstance(other, self.__class__):
- return not self.__eq__(other)
- return NotImplemented
-
- @classmethod
- def populate (cls, *a, **kwa):
- obj = cls (*a, **kwa)
- obj.set_username (obj.populate_string ("username"))
- obj.set_password (obj.populate_string ("password"))
- return obj
- def export(self, outfile, level=1, namespace_='', name_='UserCredentials', namespacedef_='', pretty_print=True):
- if pretty_print:
- eol_ = '\n'
- else:
- eol_ = ''
- showIndent(outfile, level, pretty_print)
- outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
- already_processed = []
- self.exportAttributes(outfile, level, already_processed, namespace_, name_='UserCredentials')
- if self.hasContent_():
- outfile.write('>%s' % (eol_, ))
- self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print)
- showIndent(outfile, level, pretty_print)
- outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
- else:
- outfile.write('/>%s' % (eol_, ))
- def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='UserCredentials'):
- pass
- def exportChildren(self, outfile, level, namespace_='', name_='UserCredentials', fromsubclass_=False, pretty_print=True):
- if pretty_print:
- eol_ = '\n'
- else:
- eol_ = ''
- if self.username is not None:
- showIndent(outfile, level, pretty_print)
- outfile.write('<%susername>%s</%susername>%s' % (namespace_, self.gds_format_string(quote_xml(self.username).encode(ExternalEncoding), input_name='username'), namespace_, eol_))
- if self.password is not None:
- showIndent(outfile, level, pretty_print)
- outfile.write('<%spassword>%s</%spassword>%s' % (namespace_, self.gds_format_string(quote_xml(self.password).encode(ExternalEncoding), input_name='password'), namespace_, eol_))
- def hasContent_(self):
- if (
- self.username is not None or
- self.password is not None
- ):
- return True
- else:
- return False
- def exportLiteral(self, outfile, level, name_='UserCredentials'):
- level += 1
- self.exportLiteralAttributes(outfile, level, [], name_)
- if self.hasContent_():
- self.exportLiteralChildren(outfile, level, name_)
- def exportLiteralAttributes(self, outfile, level, already_processed, name_):
- pass
- def exportLiteralChildren(self, outfile, level, name_):
- if self.username is not None:
- showIndent(outfile, level)
- outfile.write('username=%s,\n' % quote_python(self.username).encode(ExternalEncoding))
- if self.password is not None:
- showIndent(outfile, level)
- outfile.write('password=%s,\n' % quote_python(self.password).encode(ExternalEncoding))
- def exportDict(self, name_='UserCredentials'):
- obj_json = json.dumps(self, default=lambda o: dict((k, v) for k, v in o.__dict__.iteritems()))
- obj_dict = json.loads(obj_json)
- return {name_: obj_dict}
- def build(self, node):
- self.buildAttributes(node, node.attrib, [])
- for child in node:
- nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
- self.buildChildren(child, node, nodeName_)
- def buildAttributes(self, node, attrs, already_processed):
- pass
- def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
- if nodeName_ == 'username':
- username_ = child_.text
- username_ = self.gds_validate_string(username_, node, 'username')
- self.username = username_
- elif nodeName_ == 'password':
- password_ = child_.text
- password_ = self.gds_validate_string(password_, node, 'password')
- self.password = password_
-# end class UserCredentials
-
-
-class SNMPCredentials(GeneratedsSuper):
- """
- SNMPCredentials class definition from :doc:`vnc_cfg.xsd`
- """
- subclass = None
- superclass = None
- def __init__(self, version=2, local_port=None, retries=None, timeout=None, v2_community=None, v3_security_name=None, v3_security_level=None, v3_security_engine_id=None, v3_context=None, v3_context_engine_id=None, v3_authentication_protocol=None, v3_authentication_password=None, v3_privacy_protocol=None, v3_privacy_password=None, v3_engine_id=None, v3_engine_boots=None, v3_engine_time=None, **kwargs):
- self.version = version
- self.local_port = local_port
- self.retries = retries
- self.timeout = timeout
- self.v2_community = v2_community
- self.v3_security_name = v3_security_name
- self.v3_security_level = v3_security_level
- self.v3_security_engine_id = v3_security_engine_id
- self.v3_context = v3_context
- self.v3_context_engine_id = v3_context_engine_id
- self.v3_authentication_protocol = v3_authentication_protocol
- self.v3_authentication_password = v3_authentication_password
- self.v3_privacy_protocol = v3_privacy_protocol
- self.v3_privacy_password = v3_privacy_password
- self.v3_engine_id = v3_engine_id
- self.v3_engine_boots = v3_engine_boots
- self.v3_engine_time = v3_engine_time
- def factory(*args_, **kwargs_):
- if SNMPCredentials.subclass:
- return SNMPCredentials.subclass(*args_, **kwargs_)
- else:
- return SNMPCredentials(*args_, **kwargs_)
- factory = staticmethod(factory)
- def get_version(self): return self.version
- def set_version(self, version): self.version = version
- def get_local_port(self): return self.local_port
- def set_local_port(self, local_port): self.local_port = local_port
- def get_retries(self): return self.retries
- def set_retries(self, retries): self.retries = retries
- def get_timeout(self): return self.timeout
- def set_timeout(self, timeout): self.timeout = timeout
- def get_v2_community(self): return self.v2_community
- def set_v2_community(self, v2_community): self.v2_community = v2_community
- def get_v3_security_name(self): return self.v3_security_name
- def set_v3_security_name(self, v3_security_name): self.v3_security_name = v3_security_name
- def get_v3_security_level(self): return self.v3_security_level
- def set_v3_security_level(self, v3_security_level): self.v3_security_level = v3_security_level
- def get_v3_security_engine_id(self): return self.v3_security_engine_id
- def set_v3_security_engine_id(self, v3_security_engine_id): self.v3_security_engine_id = v3_security_engine_id
- def get_v3_context(self): return self.v3_context
- def set_v3_context(self, v3_context): self.v3_context = v3_context
- def get_v3_context_engine_id(self): return self.v3_context_engine_id
- def set_v3_context_engine_id(self, v3_context_engine_id): self.v3_context_engine_id = v3_context_engine_id
- def get_v3_authentication_protocol(self): return self.v3_authentication_protocol
- def set_v3_authentication_protocol(self, v3_authentication_protocol): self.v3_authentication_protocol = v3_authentication_protocol
- def get_v3_authentication_password(self): return self.v3_authentication_password
- def set_v3_authentication_password(self, v3_authentication_password): self.v3_authentication_password = v3_authentication_password
- def get_v3_privacy_protocol(self): return self.v3_privacy_protocol
- def set_v3_privacy_protocol(self, v3_privacy_protocol): self.v3_privacy_protocol = v3_privacy_protocol
- def get_v3_privacy_password(self): return self.v3_privacy_password
- def set_v3_privacy_password(self, v3_privacy_password): self.v3_privacy_password = v3_privacy_password
- def get_v3_engine_id(self): return self.v3_engine_id
- def set_v3_engine_id(self, v3_engine_id): self.v3_engine_id = v3_engine_id
- def get_v3_engine_boots(self): return self.v3_engine_boots
- def set_v3_engine_boots(self, v3_engine_boots): self.v3_engine_boots = v3_engine_boots
- def get_v3_engine_time(self): return self.v3_engine_time
- def set_v3_engine_time(self, v3_engine_time): self.v3_engine_time = v3_engine_time
- def __eq__(self, other):
- if isinstance(other, self.__class__):
- return (self.version == other.version and
- self.local_port == other.local_port and
- self.retries == other.retries and
- self.timeout == other.timeout and
- self.v2_community == other.v2_community and
- self.v3_security_name == other.v3_security_name and
- self.v3_security_level == other.v3_security_level and
- self.v3_security_engine_id == other.v3_security_engine_id and
- self.v3_context == other.v3_context and
- self.v3_context_engine_id == other.v3_context_engine_id and
- self.v3_authentication_protocol == other.v3_authentication_protocol and
- self.v3_authentication_password == other.v3_authentication_password and
- self.v3_privacy_protocol == other.v3_privacy_protocol and
- self.v3_privacy_password == other.v3_privacy_password and
- self.v3_engine_id == other.v3_engine_id and
- self.v3_engine_boots == other.v3_engine_boots and
- self.v3_engine_time == other.v3_engine_time)
- return NotImplemented
- def __ne__(self, other):
- if isinstance(other, self.__class__):
- return not self.__eq__(other)
- return NotImplemented
-
- @classmethod
- def populate (cls, *a, **kwa):
- obj = cls (*a, **kwa)
- obj.set_version (obj.populate_integer ("version"))
- obj.set_local_port (obj.populate_integer ("local_port"))
- obj.set_retries (obj.populate_integer ("retries"))
- obj.set_timeout (obj.populate_integer ("timeout"))
- obj.set_v2_community (obj.populate_string ("v2_community"))
- obj.set_v3_security_name (obj.populate_string ("v3_security_name"))
- obj.set_v3_security_level (obj.populate_string ("v3_security_level"))
- obj.set_v3_security_engine_id (obj.populate_string ("v3_security_engine_id"))
- obj.set_v3_context (obj.populate_string ("v3_context"))
- obj.set_v3_context_engine_id (obj.populate_string ("v3_context_engine_id"))
- obj.set_v3_authentication_protocol (obj.populate_string ("v3_authentication_protocol"))
- obj.set_v3_authentication_password (obj.populate_string ("v3_authentication_password"))
- obj.set_v3_privacy_protocol (obj.populate_string ("v3_privacy_protocol"))
- obj.set_v3_privacy_password (obj.populate_string ("v3_privacy_password"))
- obj.set_v3_engine_id (obj.populate_string ("v3_engine_id"))
- obj.set_v3_engine_boots (obj.populate_integer ("v3_engine_boots"))
- obj.set_v3_engine_time (obj.populate_integer ("v3_engine_time"))
- return obj
- def export(self, outfile, level=1, namespace_='', name_='SNMPCredentials', namespacedef_='', pretty_print=True):
- if pretty_print:
- eol_ = '\n'
- else:
- eol_ = ''
- showIndent(outfile, level, pretty_print)
- outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
- already_processed = []
- self.exportAttributes(outfile, level, already_processed, namespace_, name_='SNMPCredentials')
- if self.hasContent_():
- outfile.write('>%s' % (eol_, ))
- self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print)
- showIndent(outfile, level, pretty_print)
- outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
- else:
- outfile.write('/>%s' % (eol_, ))
- def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='SNMPCredentials'):
- pass
- def exportChildren(self, outfile, level, namespace_='', name_='SNMPCredentials', fromsubclass_=False, pretty_print=True):
- if pretty_print:
- eol_ = '\n'
- else:
- eol_ = ''
- if self.version is not None:
- showIndent(outfile, level, pretty_print)
- outfile.write('<%sversion>%s</%sversion>%s' % (namespace_, self.gds_format_integer(self.version, input_name='version'), namespace_, eol_))
- if self.local_port is not None:
- showIndent(outfile, level, pretty_print)
- outfile.write('<%slocal-port>%s</%slocal-port>%s' % (namespace_, self.gds_format_integer(self.local_port, input_name='local-port'), namespace_, eol_))
- if self.retries is not None:
- showIndent(outfile, level, pretty_print)
- outfile.write('<%sretries>%s</%sretries>%s' % (namespace_, self.gds_format_integer(self.retries, input_name='retries'), namespace_, eol_))
- if self.timeout is not None:
- showIndent(outfile, level, pretty_print)
- outfile.write('<%stimeout>%s</%stimeout>%s' % (namespace_, self.gds_format_integer(self.timeout, input_name='timeout'), namespace_, eol_))
- if self.v2_community is not None:
- showIndent(outfile, level, pretty_print)
- outfile.write('<%sv2-community>%s</%sv2-community>%s' % (namespace_, self.gds_format_string(quote_xml(self.v2_community).encode(ExternalEncoding), input_name='v2-community'), namespace_, eol_))
- if self.v3_security_name is not None:
- showIndent(outfile, level, pretty_print)
- outfile.write('<%sv3-security-name>%s</%sv3-security-name>%s' % (namespace_, self.gds_format_string(quote_xml(self.v3_security_name).encode(ExternalEncoding), input_name='v3-security-name'), namespace_, eol_))
- if self.v3_security_level is not None:
- showIndent(outfile, level, pretty_print)
- outfile.write('<%sv3-security-level>%s</%sv3-security-level>%s' % (namespace_, self.gds_format_string(quote_xml(self.v3_security_level).encode(ExternalEncoding), input_name='v3-security-level'), namespace_, eol_))
- if self.v3_security_engine_id is not None:
- showIndent(outfile, level, pretty_print)
- outfile.write('<%sv3-security-engine-id>%s</%sv3-security-engine-id>%s' % (namespace_, self.gds_format_string(quote_xml(self.v3_security_engine_id).encode(ExternalEncoding), input_name='v3-security-engine-id'), namespace_, eol_))
- if self.v3_context is not None:
- showIndent(outfile, level, pretty_print)
- outfile.write('<%sv3-context>%s</%sv3-context>%s' % (namespace_, self.gds_format_string(quote_xml(self.v3_context).encode(ExternalEncoding), input_name='v3-context'), namespace_, eol_))
- if self.v3_context_engine_id is not None:
- showIndent(outfile, level, pretty_print)
- outfile.write('<%sv3-context-engine-id>%s</%sv3-context-engine-id>%s' % (namespace_, self.gds_format_string(quote_xml(self.v3_context_engine_id).encode(ExternalEncoding), input_name='v3-context-engine-id'), namespace_, eol_))
- if self.v3_authentication_protocol is not None:
- showIndent(outfile, level, pretty_print)
- outfile.write('<%sv3-authentication-protocol>%s</%sv3-authentication-protocol>%s' % (namespace_, self.gds_format_string(quote_xml(self.v3_authentication_protocol).encode(ExternalEncoding), input_name='v3-authentication-protocol'), namespace_, eol_))
- if self.v3_authentication_password is not None:
- showIndent(outfile, level, pretty_print)
- outfile.write('<%sv3-authentication-password>%s</%sv3-authentication-password>%s' % (namespace_, self.gds_format_string(quote_xml(self.v3_authentication_password).encode(ExternalEncoding), input_name='v3-authentication-password'), namespace_, eol_))
- if self.v3_privacy_protocol is not None:
- showIndent(outfile, level, pretty_print)
- outfile.write('<%sv3-privacy-protocol>%s</%sv3-privacy-protocol>%s' % (namespace_, self.gds_format_string(quote_xml(self.v3_privacy_protocol).encode(ExternalEncoding), input_name='v3-privacy-protocol'), namespace_, eol_))
- if self.v3_privacy_password is not None:
- showIndent(outfile, level, pretty_print)
- outfile.write('<%sv3-privacy-password>%s</%sv3-privacy-password>%s' % (namespace_, self.gds_format_string(quote_xml(self.v3_privacy_password).encode(ExternalEncoding), input_name='v3-privacy-password'), namespace_, eol_))
- if self.v3_engine_id is not None:
- showIndent(outfile, level, pretty_print)
- outfile.write('<%sv3-engine-id>%s</%sv3-engine-id>%s' % (namespace_, self.gds_format_string(quote_xml(self.v3_engine_id).encode(ExternalEncoding), input_name='v3-engine-id'), namespace_, eol_))
- if self.v3_engine_boots is not None:
- showIndent(outfile, level, pretty_print)
- outfile.write('<%sv3-engine-boots>%s</%sv3-engine-boots>%s' % (namespace_, self.gds_format_integer(self.v3_engine_boots, input_name='v3-engine-boots'), namespace_, eol_))
- if self.v3_engine_time is not None:
- showIndent(outfile, level, pretty_print)
- outfile.write('<%sv3-engine-time>%s</%sv3-engine-time>%s' % (namespace_, self.gds_format_integer(self.v3_engine_time, input_name='v3-engine-time'), namespace_, eol_))
- def hasContent_(self):
- if (
- self.version is not None or
- self.local_port is not None or
- self.retries is not None or
- self.timeout is not None or
- self.v2_community is not None or
- self.v3_security_name is not None or
- self.v3_security_level is not None or
- self.v3_security_engine_id is not None or
- self.v3_context is not None or
- self.v3_context_engine_id is not None or
- self.v3_authentication_protocol is not None or
- self.v3_authentication_password is not None or
- self.v3_privacy_protocol is not None or
- self.v3_privacy_password is not None or
- self.v3_engine_id is not None or
- self.v3_engine_boots is not None or
- self.v3_engine_time is not None
- ):
- return True
- else:
- return False
- def exportLiteral(self, outfile, level, name_='SNMPCredentials'):
- level += 1
- self.exportLiteralAttributes(outfile, level, [], name_)
- if self.hasContent_():
- self.exportLiteralChildren(outfile, level, name_)
- def exportLiteralAttributes(self, outfile, level, already_processed, name_):
- pass
- def exportLiteralChildren(self, outfile, level, name_):
- if self.version is not None:
- showIndent(outfile, level)
- outfile.write('version=%d,\n' % self.version)
- if self.local_port is not None:
- showIndent(outfile, level)
- outfile.write('local_port=%d,\n' % self.local_port)
- if self.retries is not None:
- showIndent(outfile, level)
- outfile.write('retries=%d,\n' % self.retries)
- if self.timeout is not None:
- showIndent(outfile, level)
- outfile.write('timeout=%d,\n' % self.timeout)
- if self.v2_community is not None:
- showIndent(outfile, level)
- outfile.write('v2_community=%s,\n' % quote_python(self.v2_community).encode(ExternalEncoding))
- if self.v3_security_name is not None:
- showIndent(outfile, level)
- outfile.write('v3_security_name=%s,\n' % quote_python(self.v3_security_name).encode(ExternalEncoding))
- if self.v3_security_level is not None:
- showIndent(outfile, level)
- outfile.write('v3_security_level=%s,\n' % quote_python(self.v3_security_level).encode(ExternalEncoding))
- if self.v3_security_engine_id is not None:
- showIndent(outfile, level)
- outfile.write('v3_security_engine_id=%s,\n' % quote_python(self.v3_security_engine_id).encode(ExternalEncoding))
- if self.v3_context is not None:
- showIndent(outfile, level)
- outfile.write('v3_context=%s,\n' % quote_python(self.v3_context).encode(ExternalEncoding))
- if self.v3_context_engine_id is not None:
- showIndent(outfile, level)
- outfile.write('v3_context_engine_id=%s,\n' % quote_python(self.v3_context_engine_id).encode(ExternalEncoding))
- if self.v3_authentication_protocol is not None:
- showIndent(outfile, level)
- outfile.write('v3_authentication_protocol=%s,\n' % quote_python(self.v3_authentication_protocol).encode(ExternalEncoding))
- if self.v3_authentication_password is not None:
- showIndent(outfile, level)
- outfile.write('v3_authentication_password=%s,\n' % quote_python(self.v3_authentication_password).encode(ExternalEncoding))
- if self.v3_privacy_protocol is not None:
- showIndent(outfile, level)
- outfile.write('v3_privacy_protocol=%s,\n' % quote_python(self.v3_privacy_protocol).encode(ExternalEncoding))
- if self.v3_privacy_password is not None:
- showIndent(outfile, level)
- outfile.write('v3_privacy_password=%s,\n' % quote_python(self.v3_privacy_password).encode(ExternalEncoding))
- if self.v3_engine_id is not None:
- showIndent(outfile, level)
- outfile.write('v3_engine_id=%s,\n' % quote_python(self.v3_engine_id).encode(ExternalEncoding))
- if self.v3_engine_boots is not None:
- showIndent(outfile, level)
- outfile.write('v3_engine_boots=%d,\n' % self.v3_engine_boots)
- if self.v3_engine_time is not None:
- showIndent(outfile, level)
- outfile.write('v3_engine_time=%d,\n' % self.v3_engine_time)
- def exportDict(self, name_='SNMPCredentials'):
- obj_json = json.dumps(self, default=lambda o: dict((k, v) for k, v in o.__dict__.iteritems()))
- obj_dict = json.loads(obj_json)
- return {name_: obj_dict}
- def build(self, node):
- self.buildAttributes(node, node.attrib, [])
- for child in node:
- nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
- self.buildChildren(child, node, nodeName_)
- def buildAttributes(self, node, attrs, already_processed):
- pass
- def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
- if nodeName_ == 'version':
- sval_ = child_.text
- try:
- ival_ = int(sval_)
- except (TypeError, ValueError), exp:
- raise_parse_error(child_, 'requires integer: %s' % exp)
- ival_ = self.gds_validate_integer(ival_, node, 'version')
- self.version = ival_
- elif nodeName_ == 'local-port':
- sval_ = child_.text
- try:
- ival_ = int(sval_)
- except (TypeError, ValueError), exp:
- raise_parse_error(child_, 'requires integer: %s' % exp)
- ival_ = self.gds_validate_integer(ival_, node, 'local_port')
- self.local_port = ival_
- elif nodeName_ == 'retries':
- sval_ = child_.text
- try:
- ival_ = int(sval_)
- except (TypeError, ValueError), exp:
- raise_parse_error(child_, 'requires integer: %s' % exp)
- ival_ = self.gds_validate_integer(ival_, node, 'retries')
- self.retries = ival_
- elif nodeName_ == 'timeout':
- sval_ = child_.text
- try:
- ival_ = int(sval_)
- except (TypeError, ValueError), exp:
- raise_parse_error(child_, 'requires integer: %s' % exp)
- ival_ = self.gds_validate_integer(ival_, node, 'timeout')
- self.timeout = ival_
- elif nodeName_ == 'v2-community':
- v2_community_ = child_.text
- v2_community_ = self.gds_validate_string(v2_community_, node, 'v2_community')
- self.v2_community = v2_community_
- elif nodeName_ == 'v3-security-name':
- v3_security_name_ = child_.text
- v3_security_name_ = self.gds_validate_string(v3_security_name_, node, 'v3_security_name')
- self.v3_security_name = v3_security_name_
- elif nodeName_ == 'v3-security-level':
- v3_security_level_ = child_.text
- v3_security_level_ = self.gds_validate_string(v3_security_level_, node, 'v3_security_level')
- self.v3_security_level = v3_security_level_
- elif nodeName_ == 'v3-security-engine-id':
- v3_security_engine_id_ = child_.text
- v3_security_engine_id_ = self.gds_validate_string(v3_security_engine_id_, node, 'v3_security_engine_id')
- self.v3_security_engine_id = v3_security_engine_id_
- elif nodeName_ == 'v3-context':
- v3_context_ = child_.text
- v3_context_ = self.gds_validate_string(v3_context_, node, 'v3_context')
- self.v3_context = v3_context_
- elif nodeName_ == 'v3-context-engine-id':
- v3_context_engine_id_ = child_.text
- v3_context_engine_id_ = self.gds_validate_string(v3_context_engine_id_, node, 'v3_context_engine_id')
- self.v3_context_engine_id = v3_context_engine_id_
- elif nodeName_ == 'v3-authentication-protocol':
- v3_authentication_protocol_ = child_.text
- v3_authentication_protocol_ = self.gds_validate_string(v3_authentication_protocol_, node, 'v3_authentication_protocol')
- self.v3_authentication_protocol = v3_authentication_protocol_
- elif nodeName_ == 'v3-authentication-password':
- v3_authentication_password_ = child_.text
- v3_authentication_password_ = self.gds_validate_string(v3_authentication_password_, node, 'v3_authentication_password')
- self.v3_authentication_password = v3_authentication_password_
- elif nodeName_ == 'v3-privacy-protocol':
- v3_privacy_protocol_ = child_.text
- v3_privacy_protocol_ = self.gds_validate_string(v3_privacy_protocol_, node, 'v3_privacy_protocol')
- self.v3_privacy_protocol = v3_privacy_protocol_
- elif nodeName_ == 'v3-privacy-password':
- v3_privacy_password_ = child_.text
- v3_privacy_password_ = self.gds_validate_string(v3_privacy_password_, node, 'v3_privacy_password')
- self.v3_privacy_password = v3_privacy_password_
- elif nodeName_ == 'v3-engine-id':
- v3_engine_id_ = child_.text
- v3_engine_id_ = self.gds_validate_string(v3_engine_id_, node, 'v3_engine_id')
- self.v3_engine_id = v3_engine_id_
- elif nodeName_ == 'v3-engine-boots':
- sval_ = child_.text
- try:
- ival_ = int(sval_)
- except (TypeError, ValueError), exp:
- raise_parse_error(child_, 'requires integer: %s' % exp)
- ival_ = self.gds_validate_integer(ival_, node, 'v3_engine_boots')
- self.v3_engine_boots = ival_
- elif nodeName_ == 'v3-engine-time':
- sval_ = child_.text
- try:
- ival_ = int(sval_)
- except (TypeError, ValueError), exp:
- raise_parse_error(child_, 'requires integer: %s' % exp)
- ival_ = self.gds_validate_integer(ival_, node, 'v3_engine_time')
- self.v3_engine_time = ival_
-# end class SNMPCredentials
-
-
-class JunosServicePorts(GeneratedsSuper):
- """
- JunosServicePorts class definition from :doc:`vnc_cfg.xsd`
- """
- subclass = None
- superclass = None
- def __init__(self, service_port=None, **kwargs):
- if (service_port is None) or (service_port == []):
- self.service_port = []
- else:
- self.service_port = service_port
- def factory(*args_, **kwargs_):
- if JunosServicePorts.subclass:
- return JunosServicePorts.subclass(*args_, **kwargs_)
- else:
- return JunosServicePorts(*args_, **kwargs_)
- factory = staticmethod(factory)
- def get_service_port(self): return self.service_port
- def set_service_port(self, service_port): self.service_port = service_port
- def add_service_port(self, value): self.service_port.append(value)
- def insert_service_port(self, index, value): self.service_port[index] = value
- def delete_service_port(self, value): self.service_port.remove(value)
- def __eq__(self, other):
- if isinstance(other, self.__class__):
- return (self.service_port == other.service_port)
- return NotImplemented
- def __ne__(self, other):
- if isinstance(other, self.__class__):
- return not self.__eq__(other)
- return NotImplemented
-
- @classmethod
- def populate (cls, *a, **kwa):
- obj = cls (*a, **kwa)
- obj.set_service_port ([obj.populate_string ("service_port")])
- return obj
- def export(self, outfile, level=1, namespace_='', name_='JunosServicePorts', namespacedef_='', pretty_print=True):
- if pretty_print:
- eol_ = '\n'
- else:
- eol_ = ''
- showIndent(outfile, level, pretty_print)
- outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
- already_processed = []
- self.exportAttributes(outfile, level, already_processed, namespace_, name_='JunosServicePorts')
- if self.hasContent_():
- outfile.write('>%s' % (eol_, ))
- self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print)
- showIndent(outfile, level, pretty_print)
- outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
- else:
- outfile.write('/>%s' % (eol_, ))
- def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='JunosServicePorts'):
- pass
- def exportChildren(self, outfile, level, namespace_='', name_='JunosServicePorts', fromsubclass_=False, pretty_print=True):
- if pretty_print:
- eol_ = '\n'
- else:
- eol_ = ''
- for service_port_ in self.service_port:
- showIndent(outfile, level, pretty_print)
- outfile.write('<%sservice-port>%s</%sservice-port>%s' % (namespace_, self.gds_format_string(quote_xml(service_port_).encode(ExternalEncoding), input_name='service-port'), namespace_, eol_))
- def hasContent_(self):
- if (
- self.service_port
- ):
- return True
- else:
- return False
- def exportLiteral(self, outfile, level, name_='JunosServicePorts'):
- level += 1
- self.exportLiteralAttributes(outfile, level, [], name_)
- if self.hasContent_():
- self.exportLiteralChildren(outfile, level, name_)
- def exportLiteralAttributes(self, outfile, level, already_processed, name_):
- pass
- def exportLiteralChildren(self, outfile, level, name_):
- showIndent(outfile, level)
- outfile.write('service_port=[\n')
- level += 1
- for service_port_ in self.service_port:
- showIndent(outfile, level)
- outfile.write('%s,\n' % quote_python(service_port_).encode(ExternalEncoding))
- level -= 1
- showIndent(outfile, level)
- outfile.write('],\n')
- def exportDict(self, name_='JunosServicePorts'):
- obj_json = json.dumps(self, default=lambda o: dict((k, v) for k, v in o.__dict__.iteritems()))
- obj_dict = json.loads(obj_json)
- return {name_: obj_dict}
- def build(self, node):
- self.buildAttributes(node, node.attrib, [])
- for child in node:
- nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
- self.buildChildren(child, node, nodeName_)
- def buildAttributes(self, node, attrs, already_processed):
- pass
- def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
- if nodeName_ == 'service-port':
- service_port_ = child_.text
- service_port_ = self.gds_validate_string(service_port_, node, 'service_port')
- self.service_port.append(service_port_)
-# end class JunosServicePorts
-
-
-class logical_interface_virtual_machine_interface(GeneratedsSuper):
- """
- logical_interface_virtual_machine_interface class definition from :doc:`vnc_cfg.xsd`
- """
- subclass = None
- superclass = None
- def __init__(self, **kwargs):
- pass
- def factory(*args_, **kwargs_):
- if logical_interface_virtual_machine_interface.subclass:
- return logical_interface_virtual_machine_interface.subclass(*args_, **kwargs_)
- else:
- return logical_interface_virtual_machine_interface(*args_, **kwargs_)
- factory = staticmethod(factory)
- def __eq__(self, other): return True
- def __ne__(self, other): return False
- @classmethod
- def populate (cls, *a, **kwa):
- obj = cls (*a, **kwa)
- return obj
- def export(self, outfile, level=1, namespace_='', name_='logical-interface-virtual-machine-interface', namespacedef_='', pretty_print=True):
- if pretty_print:
- eol_ = '\n'
- else:
- eol_ = ''
- showIndent(outfile, level, pretty_print)
- outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
- already_processed = []
- self.exportAttributes(outfile, level, already_processed, namespace_, name_='logical-interface-virtual-machine-interface')
- if self.hasContent_():
- outfile.write('>%s' % (eol_, ))
- self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print)
- outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
- else:
- outfile.write('/>%s' % (eol_, ))
- def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='logical-interface-virtual-machine-interface'):
- pass
- def exportChildren(self, outfile, level, namespace_='', name_='logical-interface-virtual-machine-interface', fromsubclass_=False, pretty_print=True):
- pass
- def hasContent_(self):
- if (
-
- ):
- return True
- else:
- return False
- def exportLiteral(self, outfile, level, name_='logical-interface-virtual-machine-interface'):
- level += 1
- self.exportLiteralAttributes(outfile, level, [], name_)
- if self.hasContent_():
- self.exportLiteralChildren(outfile, level, name_)
- def exportLiteralAttributes(self, outfile, level, already_processed, name_):
- pass
- def exportLiteralChildren(self, outfile, level, name_):
- pass
- def exportDict(self, name_='logical-interface-virtual-machine-interface'):
- obj_json = json.dumps(self, default=lambda o: dict((k, v) for k, v in o.__dict__.iteritems()))
- obj_dict = json.loads(obj_json)
- return {name_: obj_dict}
- def build(self, node):
- self.buildAttributes(node, node.attrib, [])
- for child in node:
- nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
- self.buildChildren(child, node, nodeName_)
- def buildAttributes(self, node, attrs, already_processed):
- pass
- def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
- pass
-# end class logical_interface_virtual_machine_interface
-
-
-class global_system_config_virtual_router(GeneratedsSuper):
- """
- global_system_config_virtual_router class definition from :doc:`vnc_cfg.xsd`
- """
- subclass = None
- superclass = None
- def __init__(self, **kwargs):
- pass
- def factory(*args_, **kwargs_):
- if global_system_config_virtual_router.subclass:
- return global_system_config_virtual_router.subclass(*args_, **kwargs_)
- else:
- return global_system_config_virtual_router(*args_, **kwargs_)
- factory = staticmethod(factory)
- def __eq__(self, other): return True
- def __ne__(self, other): return False
- @classmethod
- def populate (cls, *a, **kwa):
- obj = cls (*a, **kwa)
- return obj
- def export(self, outfile, level=1, namespace_='', name_='global-system-config-virtual-router', namespacedef_='', pretty_print=True):
- if pretty_print:
- eol_ = '\n'
- else:
- eol_ = ''
- showIndent(outfile, level, pretty_print)
- outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
- already_processed = []
- self.exportAttributes(outfile, level, already_processed, namespace_, name_='global-system-config-virtual-router')
- if self.hasContent_():
- outfile.write('>%s' % (eol_, ))
- self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print)
- outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
- else:
- outfile.write('/>%s' % (eol_, ))
- def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='global-system-config-virtual-router'):
- pass
- def exportChildren(self, outfile, level, namespace_='', name_='global-system-config-virtual-router', fromsubclass_=False, pretty_print=True):
- pass
- def hasContent_(self):
- if (
-
- ):
- return True
- else:
- return False
- def exportLiteral(self, outfile, level, name_='global-system-config-virtual-router'):
- level += 1
- self.exportLiteralAttributes(outfile, level, [], name_)
- if self.hasContent_():
- self.exportLiteralChildren(outfile, level, name_)
- def exportLiteralAttributes(self, outfile, level, already_processed, name_):
- pass
- def exportLiteralChildren(self, outfile, level, name_):
- pass
- def exportDict(self, name_='global-system-config-virtual-router'):
- obj_json = json.dumps(self, default=lambda o: dict((k, v) for k, v in o.__dict__.iteritems()))
- obj_dict = json.loads(obj_json)
- return {name_: obj_dict}
- def build(self, node):
- self.buildAttributes(node, node.attrib, [])
- for child in node:
- nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
- self.buildChildren(child, node, nodeName_)
- def buildAttributes(self, node, attrs, already_processed):
- pass
- def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
- pass
-# end class global_system_config_virtual_router
-
-
-class virtual_router_bgp_router(GeneratedsSuper):
- """
- virtual_router_bgp_router class definition from :doc:`vnc_cfg.xsd`
- """
- subclass = None
- superclass = None
- def __init__(self, **kwargs):
- pass
- def factory(*args_, **kwargs_):
- if virtual_router_bgp_router.subclass:
- return virtual_router_bgp_router.subclass(*args_, **kwargs_)
- else:
- return virtual_router_bgp_router(*args_, **kwargs_)
- factory = staticmethod(factory)
- def __eq__(self, other): return True
- def __ne__(self, other): return False
- @classmethod
- def populate (cls, *a, **kwa):
- obj = cls (*a, **kwa)
- return obj
- def export(self, outfile, level=1, namespace_='', name_='virtual-router-bgp-router', namespacedef_='', pretty_print=True):
- if pretty_print:
- eol_ = '\n'
- else:
- eol_ = ''
- showIndent(outfile, level, pretty_print)
- outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
- already_processed = []
- self.exportAttributes(outfile, level, already_processed, namespace_, name_='virtual-router-bgp-router')
- if self.hasContent_():
- outfile.write('>%s' % (eol_, ))
- self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print)
- outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
- else:
- outfile.write('/>%s' % (eol_, ))
- def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='virtual-router-bgp-router'):
- pass
- def exportChildren(self, outfile, level, namespace_='', name_='virtual-router-bgp-router', fromsubclass_=False, pretty_print=True):
- pass
- def hasContent_(self):
- if (
-
- ):
- return True
- else:
- return False
- def exportLiteral(self, outfile, level, name_='virtual-router-bgp-router'):
- level += 1
- self.exportLiteralAttributes(outfile, level, [], name_)
- if self.hasContent_():
- self.exportLiteralChildren(outfile, level, name_)
- def exportLiteralAttributes(self, outfile, level, already_processed, name_):
- pass
- def exportLiteralChildren(self, outfile, level, name_):
- pass
- def exportDict(self, name_='virtual-router-bgp-router'):
- obj_json = json.dumps(self, default=lambda o: dict((k, v) for k, v in o.__dict__.iteritems()))
- obj_dict = json.loads(obj_json)
- return {name_: obj_dict}
- def build(self, node):
- self.buildAttributes(node, node.attrib, [])
- for child in node:
- nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
- self.buildChildren(child, node, nodeName_)
- def buildAttributes(self, node, attrs, already_processed):
- pass
- def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
- pass
-# end class virtual_router_bgp_router
-
-
-class virtual_router_virtual_machine(GeneratedsSuper):
- """
- virtual_router_virtual_machine class definition from :doc:`vnc_cfg.xsd`
- """
- subclass = None
- superclass = None
- def __init__(self, **kwargs):
- pass
- def factory(*args_, **kwargs_):
- if virtual_router_virtual_machine.subclass:
- return virtual_router_virtual_machine.subclass(*args_, **kwargs_)
- else:
- return virtual_router_virtual_machine(*args_, **kwargs_)
- factory = staticmethod(factory)
- def __eq__(self, other): return True
- def __ne__(self, other): return False
- @classmethod
- def populate (cls, *a, **kwa):
- obj = cls (*a, **kwa)
- return obj
- def export(self, outfile, level=1, namespace_='', name_='virtual-router-virtual-machine', namespacedef_='', pretty_print=True):
- if pretty_print:
- eol_ = '\n'
- else:
- eol_ = ''
- showIndent(outfile, level, pretty_print)
- outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
- already_processed = []
- self.exportAttributes(outfile, level, already_processed, namespace_, name_='virtual-router-virtual-machine')
- if self.hasContent_():
- outfile.write('>%s' % (eol_, ))
- self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print)
- outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
- else:
- outfile.write('/>%s' % (eol_, ))
- def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='virtual-router-virtual-machine'):
- pass
- def exportChildren(self, outfile, level, namespace_='', name_='virtual-router-virtual-machine', fromsubclass_=False, pretty_print=True):
- pass
- def hasContent_(self):
- if (
-
- ):
- return True
- else:
- return False
- def exportLiteral(self, outfile, level, name_='virtual-router-virtual-machine'):
- level += 1
- self.exportLiteralAttributes(outfile, level, [], name_)
- if self.hasContent_():
- self.exportLiteralChildren(outfile, level, name_)
- def exportLiteralAttributes(self, outfile, level, already_processed, name_):
- pass
- def exportLiteralChildren(self, outfile, level, name_):
- pass
- def exportDict(self, name_='virtual-router-virtual-machine'):
- obj_json = json.dumps(self, default=lambda o: dict((k, v) for k, v in o.__dict__.iteritems()))
- obj_dict = json.loads(obj_json)
- return {name_: obj_dict}
- def build(self, node):
- self.buildAttributes(node, node.attrib, [])
- for child in node:
- nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
- self.buildChildren(child, node, nodeName_)
- def buildAttributes(self, node, attrs, already_processed):
- pass
- def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
- pass
-# end class virtual_router_virtual_machine
-
-
-class virtual_network_routing_instance(GeneratedsSuper):
- """
- virtual_network_routing_instance class definition from :doc:`vnc_cfg.xsd`
- """
- subclass = None
- superclass = None
- def __init__(self, **kwargs):
- pass
- def factory(*args_, **kwargs_):
- if virtual_network_routing_instance.subclass:
- return virtual_network_routing_instance.subclass(*args_, **kwargs_)
- else:
- return virtual_network_routing_instance(*args_, **kwargs_)
- factory = staticmethod(factory)
- def __eq__(self, other): return True
- def __ne__(self, other): return False
- @classmethod
- def populate (cls, *a, **kwa):
- obj = cls (*a, **kwa)
- return obj
- def export(self, outfile, level=1, namespace_='', name_='virtual-network-routing-instance', namespacedef_='', pretty_print=True):
- if pretty_print:
- eol_ = '\n'
- else:
- eol_ = ''
- showIndent(outfile, level, pretty_print)
- outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
- already_processed = []
- self.exportAttributes(outfile, level, already_processed, namespace_, name_='virtual-network-routing-instance')
- if self.hasContent_():
- outfile.write('>%s' % (eol_, ))
- self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print)
- outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
- else:
- outfile.write('/>%s' % (eol_, ))
- def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='virtual-network-routing-instance'):
- pass
- def exportChildren(self, outfile, level, namespace_='', name_='virtual-network-routing-instance', fromsubclass_=False, pretty_print=True):
- pass
- def hasContent_(self):
- if (
-
- ):
- return True
- else:
- return False
- def exportLiteral(self, outfile, level, name_='virtual-network-routing-instance'):
- level += 1
- self.exportLiteralAttributes(outfile, level, [], name_)
- if self.hasContent_():
- self.exportLiteralChildren(outfile, level, name_)
- def exportLiteralAttributes(self, outfile, level, already_processed, name_):
- pass
- def exportLiteralChildren(self, outfile, level, name_):
- pass
- def exportDict(self, name_='virtual-network-routing-instance'):
- obj_json = json.dumps(self, default=lambda o: dict((k, v) for k, v in o.__dict__.iteritems()))
- obj_dict = json.loads(obj_json)
- return {name_: obj_dict}
- def build(self, node):
- self.buildAttributes(node, node.attrib, [])
- for child in node:
- nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
- self.buildChildren(child, node, nodeName_)
- def buildAttributes(self, node, attrs, already_processed):
- pass
- def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
- pass
-# end class virtual_network_routing_instance
-
-
-class customer_attachment_virtual_machine_interface(GeneratedsSuper):
- """
- customer_attachment_virtual_machine_interface class definition from :doc:`vnc_cfg.xsd`
- """
- subclass = None
- superclass = None
- def __init__(self, **kwargs):
- pass
- def factory(*args_, **kwargs_):
- if customer_attachment_virtual_machine_interface.subclass:
- return customer_attachment_virtual_machine_interface.subclass(*args_, **kwargs_)
- else:
- return customer_attachment_virtual_machine_interface(*args_, **kwargs_)
- factory = staticmethod(factory)
- def __eq__(self, other): return True
- def __ne__(self, other): return False
- @classmethod
- def populate (cls, *a, **kwa):
- obj = cls (*a, **kwa)
- return obj
- def export(self, outfile, level=1, namespace_='', name_='customer-attachment-virtual-machine-interface', namespacedef_='', pretty_print=True):
- if pretty_print:
- eol_ = '\n'
- else:
- eol_ = ''
- showIndent(outfile, level, pretty_print)
- outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
- already_processed = []
- self.exportAttributes(outfile, level, already_processed, namespace_, name_='customer-attachment-virtual-machine-interface')
- if self.hasContent_():
- outfile.write('>%s' % (eol_, ))
- self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print)
- outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
- else:
- outfile.write('/>%s' % (eol_, ))
- def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='customer-attachment-virtual-machine-interface'):
- pass
- def exportChildren(self, outfile, level, namespace_='', name_='customer-attachment-virtual-machine-interface', fromsubclass_=False, pretty_print=True):
- pass
- def hasContent_(self):
- if (
-
- ):
- return True
- else:
- return False
- def exportLiteral(self, outfile, level, name_='customer-attachment-virtual-machine-interface'):
- level += 1
- self.exportLiteralAttributes(outfile, level, [], name_)
- if self.hasContent_():
- self.exportLiteralChildren(outfile, level, name_)
- def exportLiteralAttributes(self, outfile, level, already_processed, name_):
- pass
- def exportLiteralChildren(self, outfile, level, name_):
- pass
- def exportDict(self, name_='customer-attachment-virtual-machine-interface'):
- obj_json = json.dumps(self, default=lambda o: dict((k, v) for k, v in o.__dict__.iteritems()))
- obj_dict = json.loads(obj_json)
- return {name_: obj_dict}
- def build(self, node):
- self.buildAttributes(node, node.attrib, [])
- for child in node:
- nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
- self.buildChildren(child, node, nodeName_)
- def buildAttributes(self, node, attrs, already_processed):
- pass
- def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
- pass
-# end class customer_attachment_virtual_machine_interface
-
-
-class customer_attachment_floating_ip(GeneratedsSuper):
- """
- customer_attachment_floating_ip class definition from :doc:`vnc_cfg.xsd`
- """
- subclass = None
- superclass = None
- def __init__(self, **kwargs):
- pass
- def factory(*args_, **kwargs_):
- if customer_attachment_floating_ip.subclass:
- return customer_attachment_floating_ip.subclass(*args_, **kwargs_)
- else:
- return customer_attachment_floating_ip(*args_, **kwargs_)
- factory = staticmethod(factory)
- def __eq__(self, other): return True
- def __ne__(self, other): return False
- @classmethod
- def populate (cls, *a, **kwa):
- obj = cls (*a, **kwa)
- return obj
- def export(self, outfile, level=1, namespace_='', name_='customer-attachment-floating-ip', namespacedef_='', pretty_print=True):
- if pretty_print:
- eol_ = '\n'
- else:
- eol_ = ''
- showIndent(outfile, level, pretty_print)
- outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
- already_processed = []
- self.exportAttributes(outfile, level, already_processed, namespace_, name_='customer-attachment-floating-ip')
- if self.hasContent_():
- outfile.write('>%s' % (eol_, ))
- self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print)
- outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
- else:
- outfile.write('/>%s' % (eol_, ))
- def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='customer-attachment-floating-ip'):
- pass
- def exportChildren(self, outfile, level, namespace_='', name_='customer-attachment-floating-ip', fromsubclass_=False, pretty_print=True):
- pass
- def hasContent_(self):
- if (
-
- ):
- return True
- else:
- return False
- def exportLiteral(self, outfile, level, name_='customer-attachment-floating-ip'):
- level += 1
- self.exportLiteralAttributes(outfile, level, [], name_)
- if self.hasContent_():
- self.exportLiteralChildren(outfile, level, name_)
- def exportLiteralAttributes(self, outfile, level, already_processed, name_):
- pass
- def exportLiteralChildren(self, outfile, level, name_):
- pass
- def exportDict(self, name_='customer-attachment-floating-ip'):
- obj_json = json.dumps(self, default=lambda o: dict((k, v) for k, v in o.__dict__.iteritems()))
- obj_dict = json.loads(obj_json)
- return {name_: obj_dict}
- def build(self, node):
- self.buildAttributes(node, node.attrib, [])
- for child in node:
- nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
- self.buildChildren(child, node, nodeName_)
- def buildAttributes(self, node, attrs, already_processed):
- pass
- def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
- pass
-# end class customer_attachment_floating_ip
-
-
-class provider_attachment_virtual_router(GeneratedsSuper):
- """
- provider_attachment_virtual_router class definition from :doc:`vnc_cfg.xsd`
- """
- subclass = None
- superclass = None
- def __init__(self, **kwargs):
- pass
- def factory(*args_, **kwargs_):
- if provider_attachment_virtual_router.subclass:
- return provider_attachment_virtual_router.subclass(*args_, **kwargs_)
- else:
- return provider_attachment_virtual_router(*args_, **kwargs_)
- factory = staticmethod(factory)
- def __eq__(self, other): return True
- def __ne__(self, other): return False
- @classmethod
- def populate (cls, *a, **kwa):
- obj = cls (*a, **kwa)
- return obj
- def export(self, outfile, level=1, namespace_='', name_='provider-attachment-virtual-router', namespacedef_='', pretty_print=True):
- if pretty_print:
- eol_ = '\n'
- else:
- eol_ = ''
- showIndent(outfile, level, pretty_print)
- outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
- already_processed = []
- self.exportAttributes(outfile, level, already_processed, namespace_, name_='provider-attachment-virtual-router')
- if self.hasContent_():
- outfile.write('>%s' % (eol_, ))
- self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print)
- outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
- else:
- outfile.write('/>%s' % (eol_, ))
- def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='provider-attachment-virtual-router'):
- pass
- def exportChildren(self, outfile, level, namespace_='', name_='provider-attachment-virtual-router', fromsubclass_=False, pretty_print=True):
- pass
- def hasContent_(self):
- if (
-
- ):
- return True
- else:
- return False
- def exportLiteral(self, outfile, level, name_='provider-attachment-virtual-router'):
- level += 1
- self.exportLiteralAttributes(outfile, level, [], name_)
- if self.hasContent_():
- self.exportLiteralChildren(outfile, level, name_)
- def exportLiteralAttributes(self, outfile, level, already_processed, name_):
- pass
- def exportLiteralChildren(self, outfile, level, name_):
- pass
- def exportDict(self, name_='provider-attachment-virtual-router'):
- obj_json = json.dumps(self, default=lambda o: dict((k, v) for k, v in o.__dict__.iteritems()))
- obj_dict = json.loads(obj_json)
- return {name_: obj_dict}
- def build(self, node):
- self.buildAttributes(node, node.attrib, [])
- for child in node:
- nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
- self.buildChildren(child, node, nodeName_)
- def buildAttributes(self, node, attrs, already_processed):
- pass
- def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
- pass
-# end class provider_attachment_virtual_router
-
-
-class ServiceScaleOutType(GeneratedsSuper):
- """
- ServiceScaleOutType class definition from :doc:`vnc_cfg.xsd`
- """
- subclass = None
- superclass = None
- def __init__(self, max_instances=1, auto_scale=False, **kwargs):
- self.max_instances = max_instances
- self.auto_scale = auto_scale
- def factory(*args_, **kwargs_):
- if ServiceScaleOutType.subclass:
- return ServiceScaleOutType.subclass(*args_, **kwargs_)
- else:
- return ServiceScaleOutType(*args_, **kwargs_)
- factory = staticmethod(factory)
- def get_max_instances(self): return self.max_instances
- def set_max_instances(self, max_instances): self.max_instances = max_instances
- def get_auto_scale(self): return self.auto_scale
- def set_auto_scale(self, auto_scale): self.auto_scale = auto_scale
- def __eq__(self, other):
- if isinstance(other, self.__class__):
- return (self.max_instances == other.max_instances and
- self.auto_scale == other.auto_scale)
- return NotImplemented
- def __ne__(self, other):
- if isinstance(other, self.__class__):
- return not self.__eq__(other)
- return NotImplemented
-
- @classmethod
- def populate (cls, *a, **kwa):
- obj = cls (*a, **kwa)
- obj.set_max_instances (obj.populate_integer ("max_instances"))
- obj.set_auto_scale (obj.populate_boolean ("auto_scale"))
- return obj
- def export(self, outfile, level=1, namespace_='', name_='ServiceScaleOutType', namespacedef_='', pretty_print=True):
- if pretty_print:
- eol_ = '\n'
- else:
- eol_ = ''
- showIndent(outfile, level, pretty_print)
- outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
- already_processed = []
- self.exportAttributes(outfile, level, already_processed, namespace_, name_='ServiceScaleOutType')
- if self.hasContent_():
- outfile.write('>%s' % (eol_, ))
- self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print)
- showIndent(outfile, level, pretty_print)
- outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
- else:
- outfile.write('/>%s' % (eol_, ))
- def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='ServiceScaleOutType'):
- pass
- def exportChildren(self, outfile, level, namespace_='', name_='ServiceScaleOutType', fromsubclass_=False, pretty_print=True):
- if pretty_print:
- eol_ = '\n'
- else:
- eol_ = ''
- if self.max_instances is not None:
- showIndent(outfile, level, pretty_print)
- outfile.write('<%smax-instances>%s</%smax-instances>%s' % (namespace_, self.gds_format_integer(self.max_instances, input_name='max-instances'), namespace_, eol_))
- if self.auto_scale is not None:
- showIndent(outfile, level, pretty_print)
- outfile.write('<%sauto-scale>%s</%sauto-scale>%s' % (namespace_, self.gds_format_boolean(self.gds_str_lower(str(self.auto_scale)), input_name='auto-scale'), namespace_, eol_))
- def hasContent_(self):
- if (
- self.max_instances is not None or
- self.auto_scale is not None
- ):
- return True
- else:
- return False
- def exportLiteral(self, outfile, level, name_='ServiceScaleOutType'):
- level += 1
- self.exportLiteralAttributes(outfile, level, [], name_)
- if self.hasContent_():
- self.exportLiteralChildren(outfile, level, name_)
- def exportLiteralAttributes(self, outfile, level, already_processed, name_):
- pass
- def exportLiteralChildren(self, outfile, level, name_):
- if self.max_instances is not None:
- showIndent(outfile, level)
- outfile.write('max_instances=%d,\n' % self.max_instances)
- if self.auto_scale is not None:
- showIndent(outfile, level)
- outfile.write('auto_scale=%s,\n' % self.auto_scale)
- def exportDict(self, name_='ServiceScaleOutType'):
- obj_json = json.dumps(self, default=lambda o: dict((k, v) for k, v in o.__dict__.iteritems()))
- obj_dict = json.loads(obj_json)
- return {name_: obj_dict}
- def build(self, node):
- self.buildAttributes(node, node.attrib, [])
- for child in node:
- nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
- self.buildChildren(child, node, nodeName_)
- def buildAttributes(self, node, attrs, already_processed):
- pass
- def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
- if nodeName_ == 'max-instances':
- sval_ = child_.text
- try:
- ival_ = int(sval_)
- except (TypeError, ValueError), exp:
- raise_parse_error(child_, 'requires integer: %s' % exp)
- ival_ = self.gds_validate_integer(ival_, node, 'max_instances')
- self.max_instances = ival_
- elif nodeName_ == 'auto-scale':
- sval_ = child_.text
- if sval_ in ('true', '1'):
- ival_ = True
- elif sval_ in ('false', '0'):
- ival_ = False
- else:
- raise_parse_error(child_, 'requires boolean')
- ival_ = self.gds_validate_boolean(ival_, node, 'auto_scale')
- self.auto_scale = ival_
-# end class ServiceScaleOutType
-
-
-class ServiceTemplateType(GeneratedsSuper):
- """
- ServiceTemplateType class definition from :doc:`vnc_cfg.xsd`
- """
- subclass = None
- superclass = None
- def __init__(self, service_mode=None, service_type=None, image_name=None, service_scaling=False, interface_type=None, flavor=None, ordered_interfaces=False, service_virtualization_type=None, availability_zone_enable=False, vrouter_instance_type=None, instance_data=None, **kwargs):
- self.service_mode = service_mode
- self.service_type = service_type
- self.image_name = image_name
- self.service_scaling = service_scaling
- if (interface_type is None) or (interface_type == []):
- self.interface_type = []
- else:
- if isinstance(interface_type[0], dict):
- objs = [ServiceTemplateInterfaceType(**elem) for elem in interface_type]
- self.interface_type = objs
- else:
- self.interface_type = interface_type
- self.flavor = flavor
- self.ordered_interfaces = ordered_interfaces
- self.service_virtualization_type = service_virtualization_type
- self.availability_zone_enable = availability_zone_enable
- self.vrouter_instance_type = vrouter_instance_type
- self.instance_data = instance_data
- def factory(*args_, **kwargs_):
- if ServiceTemplateType.subclass:
- return ServiceTemplateType.subclass(*args_, **kwargs_)
- else:
- return ServiceTemplateType(*args_, **kwargs_)
- factory = staticmethod(factory)
- def get_service_mode(self): return self.service_mode
- def set_service_mode(self, service_mode): self.service_mode = service_mode
- def validate_ServiceModeType(self, value):
- # Validate type ServiceModeType, a restriction on xsd:string.
- error = False
- if isinstance(value, list):
- error = set(value) - set([u'transparent', u'in-network', u'in-network-nat'])
- else:
- error = value not in [u'transparent', u'in-network', u'in-network-nat']
- if error:
- raise ValueError("ServiceModeType must be one of [u'transparent', u'in-network', u'in-network-nat']")
- def get_service_type(self): return self.service_type
- def set_service_type(self, service_type): self.service_type = service_type
- def validate_ServiceType(self, value):
- # Validate type ServiceType, a restriction on xsd:string.
- error = False
- if isinstance(value, list):
- error = set(value) - set([u'firewall', u'analyzer', u'source-nat', u'loadbalancer'])
- else:
- error = value not in [u'firewall', u'analyzer', u'source-nat', u'loadbalancer']
- if error:
- raise ValueError("ServiceType must be one of [u'firewall', u'analyzer', u'source-nat', u'loadbalancer']")
- def get_image_name(self): return self.image_name
- def set_image_name(self, image_name): self.image_name = image_name
- def get_service_scaling(self): return self.service_scaling
- def set_service_scaling(self, service_scaling): self.service_scaling = service_scaling
- def get_interface_type(self): return self.interface_type
- def set_interface_type(self, interface_type): self.interface_type = interface_type
- def add_interface_type(self, value): self.interface_type.append(value)
- def insert_interface_type(self, index, value): self.interface_type[index] = value
- def delete_interface_type(self, value): self.interface_type.remove(value)
- def get_flavor(self): return self.flavor
- def set_flavor(self, flavor): self.flavor = flavor
- def get_ordered_interfaces(self): return self.ordered_interfaces
- def set_ordered_interfaces(self, ordered_interfaces): self.ordered_interfaces = ordered_interfaces
- def get_service_virtualization_type(self): return self.service_virtualization_type
- def set_service_virtualization_type(self, service_virtualization_type): self.service_virtualization_type = service_virtualization_type
- def validate_ServiceVirtualizationType(self, value):
- # Validate type ServiceVirtualizationType, a restriction on xsd:string.
- error = False
- if isinstance(value, list):
- error = set(value) - set([u'virtual-machine', u'network-namespace', u'vrouter-instance'])
- else:
- error = value not in [u'virtual-machine', u'network-namespace', u'vrouter-instance']
- if error:
- raise ValueError("ServiceVirtualizationType must be one of [u'virtual-machine', u'network-namespace', u'vrouter-instance']")
- def get_availability_zone_enable(self): return self.availability_zone_enable
- def set_availability_zone_enable(self, availability_zone_enable): self.availability_zone_enable = availability_zone_enable
- def get_vrouter_instance_type(self): return self.vrouter_instance_type
- def set_vrouter_instance_type(self, vrouter_instance_type): self.vrouter_instance_type = vrouter_instance_type
- def validate_VRouterInstanceType(self, value):
- # Validate type VRouterInstanceType, a restriction on xsd:string.
- error = False
- if isinstance(value, list):
- error = set(value) - set([u'libvirt-qemu', u'docker'])
- else:
- error = value not in [u'libvirt-qemu', u'docker']
- if error:
- raise ValueError("VRouterInstanceType must be one of [u'libvirt-qemu', u'docker']")
- def get_instance_data(self): return self.instance_data
- def set_instance_data(self, instance_data): self.instance_data = instance_data
- def __eq__(self, other):
- if isinstance(other, self.__class__):
- return (self.service_mode == other.service_mode and
- self.service_type == other.service_type and
- self.image_name == other.image_name and
- self.service_scaling == other.service_scaling and
- self.interface_type == other.interface_type and
- self.flavor == other.flavor and
- self.ordered_interfaces == other.ordered_interfaces and
- self.service_virtualization_type == other.service_virtualization_type and
- self.availability_zone_enable == other.availability_zone_enable and
- self.vrouter_instance_type == other.vrouter_instance_type and
- self.instance_data == other.instance_data)
- return NotImplemented
- def __ne__(self, other):
- if isinstance(other, self.__class__):
- return not self.__eq__(other)
- return NotImplemented
-
- @classmethod
- def populate (cls, *a, **kwa):
- obj = cls (*a, **kwa)
- obj.set_service_mode (obj.populate_string ("service_mode"))
- obj.set_service_type (obj.populate_string ("service_type"))
- obj.set_image_name (obj.populate_string ("image_name"))
- obj.set_service_scaling (obj.populate_boolean ("service_scaling"))
- obj.set_interface_type ([ServiceTemplateInterfaceType.populate ()])
- obj.set_flavor (obj.populate_string ("flavor"))
- obj.set_ordered_interfaces (obj.populate_boolean ("ordered_interfaces"))
- obj.set_service_virtualization_type (obj.populate_string ("service_virtualization_type"))
- obj.set_availability_zone_enable (obj.populate_boolean ("availability_zone_enable"))
- obj.set_vrouter_instance_type (obj.populate_string ("vrouter_instance_type"))
- obj.set_instance_data (obj.populate_string ("instance_data"))
- return obj
- def export(self, outfile, level=1, namespace_='', name_='ServiceTemplateType', namespacedef_='', pretty_print=True):
- if pretty_print:
- eol_ = '\n'
- else:
- eol_ = ''
- showIndent(outfile, level, pretty_print)
- outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
- already_processed = []
- self.exportAttributes(outfile, level, already_processed, namespace_, name_='ServiceTemplateType')
- if self.hasContent_():
- outfile.write('>%s' % (eol_, ))
- self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print)
- showIndent(outfile, level, pretty_print)
- outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
- else:
- outfile.write('/>%s' % (eol_, ))
- def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='ServiceTemplateType'):
- pass
- def exportChildren(self, outfile, level, namespace_='', name_='ServiceTemplateType', fromsubclass_=False, pretty_print=True):
- if pretty_print:
- eol_ = '\n'
- else:
- eol_ = ''
- if self.service_mode is not None:
- showIndent(outfile, level, pretty_print)
- outfile.write('<%sservice-mode>%s</%sservice-mode>%s' % (namespace_, self.gds_format_string(quote_xml(self.service_mode).encode(ExternalEncoding), input_name='service-mode'), namespace_, eol_))
- if self.service_type is not None:
- showIndent(outfile, level, pretty_print)
- outfile.write('<%sservice-type>%s</%sservice-type>%s' % (namespace_, self.gds_format_string(quote_xml(self.service_type).encode(ExternalEncoding), input_name='service-type'), namespace_, eol_))
- if self.image_name is not None:
- showIndent(outfile, level, pretty_print)
- outfile.write('<%simage-name>%s</%simage-name>%s' % (namespace_, self.gds_format_string(quote_xml(self.image_name).encode(ExternalEncoding), input_name='image-name'), namespace_, eol_))
- if self.service_scaling is not None:
- showIndent(outfile, level, pretty_print)
- outfile.write('<%sservice-scaling>%s</%sservice-scaling>%s' % (namespace_, self.gds_format_boolean(self.gds_str_lower(str(self.service_scaling)), input_name='service-scaling'), namespace_, eol_))
- for interface_type_ in self.interface_type:
- if isinstance(interface_type_, dict):
- interface_type_ = ServiceTemplateInterfaceType(**interface_type_)
- interface_type_.export(outfile, level, namespace_, name_='interface-type', pretty_print=pretty_print)
- if self.flavor is not None:
- showIndent(outfile, level, pretty_print)
- outfile.write('<%sflavor>%s</%sflavor>%s' % (namespace_, self.gds_format_string(quote_xml(self.flavor).encode(ExternalEncoding), input_name='flavor'), namespace_, eol_))
- if self.ordered_interfaces is not None:
- showIndent(outfile, level, pretty_print)
- outfile.write('<%sordered-interfaces>%s</%sordered-interfaces>%s' % (namespace_, self.gds_format_boolean(self.gds_str_lower(str(self.ordered_interfaces)), input_name='ordered-interfaces'), namespace_, eol_))
- if self.service_virtualization_type is not None:
- showIndent(outfile, level, pretty_print)
- outfile.write('<%sservice-virtualization-type>%s</%sservice-virtualization-type>%s' % (namespace_, self.gds_format_string(quote_xml(self.service_virtualization_type).encode(ExternalEncoding), input_name='service-virtualization-type'), namespace_, eol_))
- if self.availability_zone_enable is not None:
- showIndent(outfile, level, pretty_print)
- outfile.write('<%savailability-zone-enable>%s</%savailability-zone-enable>%s' % (namespace_, self.gds_format_boolean(self.gds_str_lower(str(self.availability_zone_enable)), input_name='availability-zone-enable'), namespace_, eol_))
- if self.vrouter_instance_type is not None:
- showIndent(outfile, level, pretty_print)
- outfile.write('<%svrouter-instance-type>%s</%svrouter-instance-type>%s' % (namespace_, self.gds_format_string(quote_xml(self.vrouter_instance_type).encode(ExternalEncoding), input_name='vrouter-instance-type'), namespace_, eol_))
- if self.instance_data is not None:
- showIndent(outfile, level, pretty_print)
- outfile.write('<%sinstance-data>%s</%sinstance-data>%s' % (namespace_, self.gds_format_string(quote_xml(self.instance_data).encode(ExternalEncoding), input_name='instance-data'), namespace_, eol_))
- def hasContent_(self):
- if (
- self.service_mode is not None or
- self.service_type is not None or
- self.image_name is not None or
- self.service_scaling is not None or
- self.interface_type or
- self.flavor is not None or
- self.ordered_interfaces is not None or
- self.service_virtualization_type is not None or
- self.availability_zone_enable is not None or
- self.vrouter_instance_type is not None or
- self.instance_data is not None
- ):
- return True
- else:
- return False
- def exportLiteral(self, outfile, level, name_='ServiceTemplateType'):
- level += 1
- self.exportLiteralAttributes(outfile, level, [], name_)
- if self.hasContent_():
- self.exportLiteralChildren(outfile, level, name_)
- def exportLiteralAttributes(self, outfile, level, already_processed, name_):
- pass
- def exportLiteralChildren(self, outfile, level, name_):
- if self.service_mode is not None:
- showIndent(outfile, level)
- outfile.write('service_mode=%s,\n' % quote_python(self.service_mode).encode(ExternalEncoding))
- if self.service_type is not None:
- showIndent(outfile, level)
- outfile.write('service_type=%s,\n' % quote_python(self.service_type).encode(ExternalEncoding))
- if self.image_name is not None:
- showIndent(outfile, level)
- outfile.write('image_name=%s,\n' % quote_python(self.image_name).encode(ExternalEncoding))
- if self.service_scaling is not None:
- showIndent(outfile, level)
- outfile.write('service_scaling=%s,\n' % self.service_scaling)
- showIndent(outfile, level)
- outfile.write('interface_type=[\n')
- level += 1
- for interface_type_ in self.interface_type:
- showIndent(outfile, level)
- outfile.write('model_.ServiceTemplateInterfaceType(\n')
- interface_type_.exportLiteral(outfile, level, name_='ServiceTemplateInterfaceType')
- showIndent(outfile, level)
- outfile.write('),\n')
- level -= 1
- showIndent(outfile, level)
- outfile.write('],\n')
- if self.flavor is not None:
- showIndent(outfile, level)
- outfile.write('flavor=%s,\n' % quote_python(self.flavor).encode(ExternalEncoding))
- if self.ordered_interfaces is not None:
- showIndent(outfile, level)
- outfile.write('ordered_interfaces=%s,\n' % self.ordered_interfaces)
- if self.service_virtualization_type is not None:
- showIndent(outfile, level)
- outfile.write('service_virtualization_type=%s,\n' % quote_python(self.service_virtualization_type).encode(ExternalEncoding))
- if self.availability_zone_enable is not None:
- showIndent(outfile, level)
- outfile.write('availability_zone_enable=%s,\n' % self.availability_zone_enable)
- if self.vrouter_instance_type is not None:
- showIndent(outfile, level)
- outfile.write('vrouter_instance_type=%s,\n' % quote_python(self.vrouter_instance_type).encode(ExternalEncoding))
- if self.instance_data is not None:
- showIndent(outfile, level)
- outfile.write('instance_data=%s,\n' % quote_python(self.instance_data).encode(ExternalEncoding))
- def exportDict(self, name_='ServiceTemplateType'):
- obj_json = json.dumps(self, default=lambda o: dict((k, v) for k, v in o.__dict__.iteritems()))
- obj_dict = json.loads(obj_json)
- return {name_: obj_dict}
- def build(self, node):
- self.buildAttributes(node, node.attrib, [])
- for child in node:
- nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
- self.buildChildren(child, node, nodeName_)
- def buildAttributes(self, node, attrs, already_processed):
- pass
- def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
- if nodeName_ == 'service-mode':
- service_mode_ = child_.text
- service_mode_ = self.gds_validate_string(service_mode_, node, 'service_mode')
- self.service_mode = service_mode_
- self.validate_ServiceModeType(self.service_mode) # validate type ServiceModeType
- elif nodeName_ == 'service-type':
- service_type_ = child_.text
- service_type_ = self.gds_validate_string(service_type_, node, 'service_type')
- self.service_type = service_type_
- self.validate_ServiceType(self.service_type) # validate type ServiceType
- elif nodeName_ == 'image-name':
- image_name_ = child_.text
- image_name_ = self.gds_validate_string(image_name_, node, 'image_name')
- self.image_name = image_name_
- elif nodeName_ == 'service-scaling':
- sval_ = child_.text
- if sval_ in ('true', '1'):
- ival_ = True
- elif sval_ in ('false', '0'):
- ival_ = False
- else:
- raise_parse_error(child_, 'requires boolean')
- ival_ = self.gds_validate_boolean(ival_, node, 'service_scaling')
- self.service_scaling = ival_
- elif nodeName_ == 'interface-type':
- obj_ = ServiceTemplateInterfaceType.factory()
- obj_.build(child_)
- self.interface_type.append(obj_)
- elif nodeName_ == 'flavor':
- flavor_ = child_.text
- flavor_ = self.gds_validate_string(flavor_, node, 'flavor')
- self.flavor = flavor_
- elif nodeName_ == 'ordered-interfaces':
- sval_ = child_.text
- if sval_ in ('true', '1'):
- ival_ = True
- elif sval_ in ('false', '0'):
- ival_ = False
- else:
- raise_parse_error(child_, 'requires boolean')
- ival_ = self.gds_validate_boolean(ival_, node, 'ordered_interfaces')
- self.ordered_interfaces = ival_
- elif nodeName_ == 'service-virtualization-type':
- service_virtualization_type_ = child_.text
- service_virtualization_type_ = self.gds_validate_string(service_virtualization_type_, node, 'service_virtualization_type')
- self.service_virtualization_type = service_virtualization_type_
- self.validate_ServiceVirtualizationType(self.service_virtualization_type) # validate type ServiceVirtualizationType
- elif nodeName_ == 'availability-zone-enable':
- sval_ = child_.text
- if sval_ in ('true', '1'):
- ival_ = True
- elif sval_ in ('false', '0'):
- ival_ = False
- else:
- raise_parse_error(child_, 'requires boolean')
- ival_ = self.gds_validate_boolean(ival_, node, 'availability_zone_enable')
- self.availability_zone_enable = ival_
- elif nodeName_ == 'vrouter-instance-type':
- vrouter_instance_type_ = child_.text
- vrouter_instance_type_ = self.gds_validate_string(vrouter_instance_type_, node, 'vrouter_instance_type')
- self.vrouter_instance_type = vrouter_instance_type_
- self.validate_VRouterInstanceType(self.vrouter_instance_type) # validate type VRouterInstanceType
- elif nodeName_ == 'instance-data':
- instance_data_ = child_.text
- instance_data_ = self.gds_validate_string(instance_data_, node, 'instance_data')
- self.instance_data = instance_data_
-# end class ServiceTemplateType
-
-
-class ServiceInstanceType(GeneratedsSuper):
- """
- ServiceInstanceType class definition from :doc:`vnc_cfg.xsd`
- """
- subclass = None
- superclass = None
- def __init__(self, auto_policy=False, availability_zone=None, management_virtual_network=None, left_virtual_network=None, left_ip_address=None, right_virtual_network=None, right_ip_address=None, interface_list=None, scale_out=None, ha_mode=None, virtual_router_id=None, **kwargs):
- self.auto_policy = auto_policy
- self.availability_zone = availability_zone
- self.management_virtual_network = management_virtual_network
- self.left_virtual_network = left_virtual_network
- self.left_ip_address = left_ip_address
- self.right_virtual_network = right_virtual_network
- self.right_ip_address = right_ip_address
- if (interface_list is None) or (interface_list == []):
- self.interface_list = []
- else:
- if isinstance(interface_list[0], dict):
- objs = [ServiceInstanceInterfaceType(**elem) for elem in interface_list]
- self.interface_list = objs
- else:
- self.interface_list = interface_list
- if isinstance(scale_out, dict):
- obj = ServiceScaleOutType(**scale_out)
- self.scale_out = obj
- else:
- self.scale_out = scale_out
- self.ha_mode = ha_mode
- self.virtual_router_id = virtual_router_id
- def factory(*args_, **kwargs_):
- if ServiceInstanceType.subclass:
- return ServiceInstanceType.subclass(*args_, **kwargs_)
- else:
- return ServiceInstanceType(*args_, **kwargs_)
- factory = staticmethod(factory)
- def get_auto_policy(self): return self.auto_policy
- def set_auto_policy(self, auto_policy): self.auto_policy = auto_policy
- def get_availability_zone(self): return self.availability_zone
- def set_availability_zone(self, availability_zone): self.availability_zone = availability_zone
- def get_management_virtual_network(self): return self.management_virtual_network
- def set_management_virtual_network(self, management_virtual_network): self.management_virtual_network = management_virtual_network
- def get_left_virtual_network(self): return self.left_virtual_network
- def set_left_virtual_network(self, left_virtual_network): self.left_virtual_network = left_virtual_network
- def get_left_ip_address(self): return self.left_ip_address
- def set_left_ip_address(self, left_ip_address): self.left_ip_address = left_ip_address
- def validate_IpAddressType(self, value):
- # Validate type IpAddressType, a restriction on xsd:string.
- pass
- def get_right_virtual_network(self): return self.right_virtual_network
- def set_right_virtual_network(self, right_virtual_network): self.right_virtual_network = right_virtual_network
- def get_right_ip_address(self): return self.right_ip_address
- def set_right_ip_address(self, right_ip_address): self.right_ip_address = right_ip_address
- def get_interface_list(self): return self.interface_list
- def set_interface_list(self, interface_list): self.interface_list = interface_list
- def add_interface_list(self, value): self.interface_list.append(value)
- def insert_interface_list(self, index, value): self.interface_list[index] = value
- def delete_interface_list(self, value): self.interface_list.remove(value)
- def get_scale_out(self): return self.scale_out
- def set_scale_out(self, scale_out): self.scale_out = scale_out
- def get_ha_mode(self): return self.ha_mode
- def set_ha_mode(self, ha_mode): self.ha_mode = ha_mode
- def validate_AddressMode(self, value):
- # Validate type AddressMode, a restriction on xsd:string.
- error = False
- if isinstance(value, list):
- error = set(value) - set([u'active-active', u'active-standby'])
- else:
- error = value not in [u'active-active', u'active-standby']
- if error:
- raise ValueError("AddressMode must be one of [u'active-active', u'active-standby']")
- def get_virtual_router_id(self): return self.virtual_router_id
- def set_virtual_router_id(self, virtual_router_id): self.virtual_router_id = virtual_router_id
- def __eq__(self, other):
- if isinstance(other, self.__class__):
- return (self.auto_policy == other.auto_policy and
- self.availability_zone == other.availability_zone and
- self.management_virtual_network == other.management_virtual_network and
- self.left_virtual_network == other.left_virtual_network and
- self.left_ip_address == other.left_ip_address and
- self.right_virtual_network == other.right_virtual_network and
- self.right_ip_address == other.right_ip_address and
- self.interface_list == other.interface_list and
- self.scale_out == other.scale_out and
- self.ha_mode == other.ha_mode and
- self.virtual_router_id == other.virtual_router_id)
- return NotImplemented
- def __ne__(self, other):
- if isinstance(other, self.__class__):
- return not self.__eq__(other)
- return NotImplemented
-
- @classmethod
- def populate (cls, *a, **kwa):
- obj = cls (*a, **kwa)
- obj.set_auto_policy (obj.populate_boolean ("auto_policy"))
- obj.set_availability_zone (obj.populate_string ("availability_zone"))
- obj.set_management_virtual_network (obj.populate_string ("management_virtual_network"))
- obj.set_left_virtual_network (obj.populate_string ("left_virtual_network"))
- obj.set_left_ip_address (obj.populate_string ("left_ip_address"))
- obj.set_right_virtual_network (obj.populate_string ("right_virtual_network"))
- obj.set_right_ip_address (obj.populate_string ("right_ip_address"))
- obj.set_interface_list ([ServiceInstanceInterfaceType.populate ()])
- obj.set_scale_out (ServiceScaleOutType.populate ())
- obj.set_ha_mode (obj.populate_string ("ha_mode"))
- obj.set_virtual_router_id (obj.populate_string ("virtual_router_id"))
- return obj
- def export(self, outfile, level=1, namespace_='', name_='ServiceInstanceType', namespacedef_='', pretty_print=True):
- if pretty_print:
- eol_ = '\n'
- else:
- eol_ = ''
- showIndent(outfile, level, pretty_print)
- outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
- already_processed = []
- self.exportAttributes(outfile, level, already_processed, namespace_, name_='ServiceInstanceType')
- if self.hasContent_():
- outfile.write('>%s' % (eol_, ))
- self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print)
- showIndent(outfile, level, pretty_print)
- outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
- else:
- outfile.write('/>%s' % (eol_, ))
- def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='ServiceInstanceType'):
- pass
- def exportChildren(self, outfile, level, namespace_='', name_='ServiceInstanceType', fromsubclass_=False, pretty_print=True):
- if pretty_print:
- eol_ = '\n'
- else:
- eol_ = ''
- if self.auto_policy is not None:
- showIndent(outfile, level, pretty_print)
- outfile.write('<%sauto-policy>%s</%sauto-policy>%s' % (namespace_, self.gds_format_boolean(self.gds_str_lower(str(self.auto_policy)), input_name='auto-policy'), namespace_, eol_))
- if self.availability_zone is not None:
- showIndent(outfile, level, pretty_print)
- outfile.write('<%savailability-zone>%s</%savailability-zone>%s' % (namespace_, self.gds_format_string(quote_xml(self.availability_zone).encode(ExternalEncoding), input_name='availability-zone'), namespace_, eol_))
- if self.management_virtual_network is not None:
- showIndent(outfile, level, pretty_print)
- outfile.write('<%smanagement-virtual-network>%s</%smanagement-virtual-network>%s' % (namespace_, self.gds_format_string(quote_xml(self.management_virtual_network).encode(ExternalEncoding), input_name='management-virtual-network'), namespace_, eol_))
- if self.left_virtual_network is not None:
- showIndent(outfile, level, pretty_print)
- outfile.write('<%sleft-virtual-network>%s</%sleft-virtual-network>%s' % (namespace_, self.gds_format_string(quote_xml(self.left_virtual_network).encode(ExternalEncoding), input_name='left-virtual-network'), namespace_, eol_))
- if self.left_ip_address is not None:
- showIndent(outfile, level, pretty_print)
- outfile.write('<%sleft-ip-address>%s</%sleft-ip-address>%s' % (namespace_, self.gds_format_string(quote_xml(self.left_ip_address).encode(ExternalEncoding), input_name='left-ip-address'), namespace_, eol_))
- if self.right_virtual_network is not None:
- showIndent(outfile, level, pretty_print)
- outfile.write('<%sright-virtual-network>%s</%sright-virtual-network>%s' % (namespace_, self.gds_format_string(quote_xml(self.right_virtual_network).encode(ExternalEncoding), input_name='right-virtual-network'), namespace_, eol_))
- if self.right_ip_address is not None:
- showIndent(outfile, level, pretty_print)
- outfile.write('<%sright-ip-address>%s</%sright-ip-address>%s' % (namespace_, self.gds_format_string(quote_xml(self.right_ip_address).encode(ExternalEncoding), input_name='right-ip-address'), namespace_, eol_))
- for interface_list_ in self.interface_list:
- if isinstance(interface_list_, dict):
- interface_list_ = ServiceInstanceInterfaceType(**interface_list_)
- interface_list_.export(outfile, level, namespace_, name_='interface-list', pretty_print=pretty_print)
- if self.scale_out is not None:
- self.scale_out.export(outfile, level, namespace_, name_='scale-out', pretty_print=pretty_print)
- if self.ha_mode is not None:
- showIndent(outfile, level, pretty_print)
- outfile.write('<%sha-mode>%s</%sha-mode>%s' % (namespace_, self.gds_format_string(quote_xml(self.ha_mode).encode(ExternalEncoding), input_name='ha-mode'), namespace_, eol_))
- if self.virtual_router_id is not None:
- showIndent(outfile, level, pretty_print)
- outfile.write('<%svirtual-router-id>%s</%svirtual-router-id>%s' % (namespace_, self.gds_format_string(quote_xml(self.virtual_router_id).encode(ExternalEncoding), input_name='virtual-router-id'), namespace_, eol_))
- def hasContent_(self):
- if (
- self.auto_policy is not None or
- self.availability_zone is not None or
- self.management_virtual_network is not None or
- self.left_virtual_network is not None or
- self.left_ip_address is not None or
- self.right_virtual_network is not None or
- self.right_ip_address is not None or
- self.interface_list or
- self.scale_out is not None or
- self.ha_mode is not None or
- self.virtual_router_id is not None
- ):
- return True
- else:
- return False
- def exportLiteral(self, outfile, level, name_='ServiceInstanceType'):
- level += 1
- self.exportLiteralAttributes(outfile, level, [], name_)
- if self.hasContent_():
- self.exportLiteralChildren(outfile, level, name_)
- def exportLiteralAttributes(self, outfile, level, already_processed, name_):
- pass
- def exportLiteralChildren(self, outfile, level, name_):
- if self.auto_policy is not None:
- showIndent(outfile, level)
- outfile.write('auto_policy=%s,\n' % self.auto_policy)
- if self.availability_zone is not None:
- showIndent(outfile, level)
- outfile.write('availability_zone=%s,\n' % quote_python(self.availability_zone).encode(ExternalEncoding))
- if self.management_virtual_network is not None:
- showIndent(outfile, level)
- outfile.write('management_virtual_network=%s,\n' % quote_python(self.management_virtual_network).encode(ExternalEncoding))
- if self.left_virtual_network is not None:
- showIndent(outfile, level)
- outfile.write('left_virtual_network=%s,\n' % quote_python(self.left_virtual_network).encode(ExternalEncoding))
- if self.left_ip_address is not None:
- showIndent(outfile, level)
- outfile.write('left_ip_address=%s,\n' % quote_python(self.left_ip_address).encode(ExternalEncoding))
- if self.right_virtual_network is not None:
- showIndent(outfile, level)
- outfile.write('right_virtual_network=%s,\n' % quote_python(self.right_virtual_network).encode(ExternalEncoding))
- if self.right_ip_address is not None:
- showIndent(outfile, level)
- outfile.write('right_ip_address=%s,\n' % quote_python(self.right_ip_address).encode(ExternalEncoding))
- showIndent(outfile, level)
- outfile.write('interface_list=[\n')
- level += 1
- for interface_list_ in self.interface_list:
- showIndent(outfile, level)
- outfile.write('model_.ServiceInstanceInterfaceType(\n')
- interface_list_.exportLiteral(outfile, level, name_='ServiceInstanceInterfaceType')
- showIndent(outfile, level)
- outfile.write('),\n')
- level -= 1
- showIndent(outfile, level)
- outfile.write('],\n')
- if self.scale_out is not None:
- showIndent(outfile, level)
- outfile.write('scale_out=model_.ServiceScaleOutType(\n')
- self.scale_out.exportLiteral(outfile, level, name_='scale_out')
- showIndent(outfile, level)
- outfile.write('),\n')
- if self.ha_mode is not None:
- showIndent(outfile, level)
- outfile.write('ha_mode=%s,\n' % quote_python(self.ha_mode).encode(ExternalEncoding))
- if self.virtual_router_id is not None:
- showIndent(outfile, level)
- outfile.write('virtual_router_id=%s,\n' % quote_python(self.virtual_router_id).encode(ExternalEncoding))
- def exportDict(self, name_='ServiceInstanceType'):
- obj_json = json.dumps(self, default=lambda o: dict((k, v) for k, v in o.__dict__.iteritems()))
- obj_dict = json.loads(obj_json)
- return {name_: obj_dict}
- def build(self, node):
- self.buildAttributes(node, node.attrib, [])
- for child in node:
- nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
- self.buildChildren(child, node, nodeName_)
- def buildAttributes(self, node, attrs, already_processed):
- pass
- def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
- if nodeName_ == 'auto-policy':
- sval_ = child_.text
- if sval_ in ('true', '1'):
- ival_ = True
- elif sval_ in ('false', '0'):
- ival_ = False
- else:
- raise_parse_error(child_, 'requires boolean')
- ival_ = self.gds_validate_boolean(ival_, node, 'auto_policy')
- self.auto_policy = ival_
- elif nodeName_ == 'availability-zone':
- availability_zone_ = child_.text
- availability_zone_ = self.gds_validate_string(availability_zone_, node, 'availability_zone')
- self.availability_zone = availability_zone_
- elif nodeName_ == 'management-virtual-network':
- management_virtual_network_ = child_.text
- management_virtual_network_ = self.gds_validate_string(management_virtual_network_, node, 'management_virtual_network')
- self.management_virtual_network = management_virtual_network_
- elif nodeName_ == 'left-virtual-network':
- left_virtual_network_ = child_.text
- left_virtual_network_ = self.gds_validate_string(left_virtual_network_, node, 'left_virtual_network')
- self.left_virtual_network = left_virtual_network_
- elif nodeName_ == 'left-ip-address':
- left_ip_address_ = child_.text
- left_ip_address_ = self.gds_validate_string(left_ip_address_, node, 'left_ip_address')
- self.left_ip_address = left_ip_address_
- self.validate_IpAddressType(self.left_ip_address) # validate type IpAddressType
- elif nodeName_ == 'right-virtual-network':
- right_virtual_network_ = child_.text
- right_virtual_network_ = self.gds_validate_string(right_virtual_network_, node, 'right_virtual_network')
- self.right_virtual_network = right_virtual_network_
- elif nodeName_ == 'right-ip-address':
- right_ip_address_ = child_.text
- right_ip_address_ = self.gds_validate_string(right_ip_address_, node, 'right_ip_address')
- self.right_ip_address = right_ip_address_
- self.validate_IpAddressType(self.right_ip_address) # validate type IpAddressType
- elif nodeName_ == 'interface-list':
- obj_ = ServiceInstanceInterfaceType.factory()
- obj_.build(child_)
- self.interface_list.append(obj_)
- elif nodeName_ == 'scale-out':
- obj_ = ServiceScaleOutType.factory()
- obj_.build(child_)
- self.set_scale_out(obj_)
- elif nodeName_ == 'ha-mode':
- ha_mode_ = child_.text
- ha_mode_ = self.gds_validate_string(ha_mode_, node, 'ha_mode')
- self.ha_mode = ha_mode_
- self.validate_AddressMode(self.ha_mode) # validate type AddressMode
- elif nodeName_ == 'virtual-router-id':
- virtual_router_id_ = child_.text
- virtual_router_id_ = self.gds_validate_string(virtual_router_id_, node, 'virtual_router_id')
- self.virtual_router_id = virtual_router_id_
-# end class ServiceInstanceType
-
-
-class project_service_instance(GeneratedsSuper):
- """
- project_service_instance class definition from :doc:`vnc_cfg.xsd`
- """
- subclass = None
- superclass = None
- def __init__(self, **kwargs):
- pass
- def factory(*args_, **kwargs_):
- if project_service_instance.subclass:
- return project_service_instance.subclass(*args_, **kwargs_)
- else:
- return project_service_instance(*args_, **kwargs_)
- factory = staticmethod(factory)
- def __eq__(self, other): return True
- def __ne__(self, other): return False
- @classmethod
- def populate (cls, *a, **kwa):
- obj = cls (*a, **kwa)
- return obj
- def export(self, outfile, level=1, namespace_='', name_='project-service-instance', namespacedef_='', pretty_print=True):
- if pretty_print:
- eol_ = '\n'
- else:
- eol_ = ''
- showIndent(outfile, level, pretty_print)
- outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
- already_processed = []
- self.exportAttributes(outfile, level, already_processed, namespace_, name_='project-service-instance')
- if self.hasContent_():
- outfile.write('>%s' % (eol_, ))
- self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print)
- outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
- else:
- outfile.write('/>%s' % (eol_, ))
- def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='project-service-instance'):
- pass
- def exportChildren(self, outfile, level, namespace_='', name_='project-service-instance', fromsubclass_=False, pretty_print=True):
- pass
- def hasContent_(self):
- if (
-
- ):
- return True
- else:
- return False
- def exportLiteral(self, outfile, level, name_='project-service-instance'):
- level += 1
- self.exportLiteralAttributes(outfile, level, [], name_)
- if self.hasContent_():
- self.exportLiteralChildren(outfile, level, name_)
- def exportLiteralAttributes(self, outfile, level, already_processed, name_):
- pass
- def exportLiteralChildren(self, outfile, level, name_):
- pass
- def exportDict(self, name_='project-service-instance'):
- obj_json = json.dumps(self, default=lambda o: dict((k, v) for k, v in o.__dict__.iteritems()))
- obj_dict = json.loads(obj_json)
- return {name_: obj_dict}
- def build(self, node):
- self.buildAttributes(node, node.attrib, [])
- for child in node:
- nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
- self.buildChildren(child, node, nodeName_)
- def buildAttributes(self, node, attrs, already_processed):
- pass
- def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
- pass
-# end class project_service_instance
-
-
-class domain_service_template(GeneratedsSuper):
- """
- domain_service_template class definition from :doc:`vnc_cfg.xsd`
- """
- subclass = None
- superclass = None
- def __init__(self, **kwargs):
- pass
- def factory(*args_, **kwargs_):
- if domain_service_template.subclass:
- return domain_service_template.subclass(*args_, **kwargs_)
- else:
- return domain_service_template(*args_, **kwargs_)
- factory = staticmethod(factory)
- def __eq__(self, other): return True
- def __ne__(self, other): return False
- @classmethod
- def populate (cls, *a, **kwa):
- obj = cls (*a, **kwa)
- return obj
- def export(self, outfile, level=1, namespace_='', name_='domain-service-template', namespacedef_='', pretty_print=True):
- if pretty_print:
- eol_ = '\n'
- else:
- eol_ = ''
- showIndent(outfile, level, pretty_print)
- outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
- already_processed = []
- self.exportAttributes(outfile, level, already_processed, namespace_, name_='domain-service-template')
- if self.hasContent_():
- outfile.write('>%s' % (eol_, ))
- self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print)
- outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
- else:
- outfile.write('/>%s' % (eol_, ))
- def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='domain-service-template'):
- pass
- def exportChildren(self, outfile, level, namespace_='', name_='domain-service-template', fromsubclass_=False, pretty_print=True):
- pass
- def hasContent_(self):
- if (
-
- ):
- return True
- else:
- return False
- def exportLiteral(self, outfile, level, name_='domain-service-template'):
- level += 1
- self.exportLiteralAttributes(outfile, level, [], name_)
- if self.hasContent_():
- self.exportLiteralChildren(outfile, level, name_)
- def exportLiteralAttributes(self, outfile, level, already_processed, name_):
- pass
- def exportLiteralChildren(self, outfile, level, name_):
- pass
- def exportDict(self, name_='domain-service-template'):
- obj_json = json.dumps(self, default=lambda o: dict((k, v) for k, v in o.__dict__.iteritems()))
- obj_dict = json.loads(obj_json)
- return {name_: obj_dict}
- def build(self, node):
- self.buildAttributes(node, node.attrib, [])
- for child in node:
- nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
- self.buildChildren(child, node, nodeName_)
- def buildAttributes(self, node, attrs, already_processed):
- pass
- def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
- pass
-# end class domain_service_template
-
-
-class service_instance_service_template(GeneratedsSuper):
- """
- service_instance_service_template class definition from :doc:`vnc_cfg.xsd`
- """
- subclass = None
- superclass = None
- def __init__(self, **kwargs):
- pass
- def factory(*args_, **kwargs_):
- if service_instance_service_template.subclass:
- return service_instance_service_template.subclass(*args_, **kwargs_)
- else:
- return service_instance_service_template(*args_, **kwargs_)
- factory = staticmethod(factory)
- def __eq__(self, other): return True
- def __ne__(self, other): return False
- @classmethod
- def populate (cls, *a, **kwa):
- obj = cls (*a, **kwa)
- return obj
- def export(self, outfile, level=1, namespace_='', name_='service-instance-service-template', namespacedef_='', pretty_print=True):
- if pretty_print:
- eol_ = '\n'
- else:
- eol_ = ''
- showIndent(outfile, level, pretty_print)
- outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
- already_processed = []
- self.exportAttributes(outfile, level, already_processed, namespace_, name_='service-instance-service-template')
- if self.hasContent_():
- outfile.write('>%s' % (eol_, ))
- self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print)
- outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
- else:
- outfile.write('/>%s' % (eol_, ))
- def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='service-instance-service-template'):
- pass
- def exportChildren(self, outfile, level, namespace_='', name_='service-instance-service-template', fromsubclass_=False, pretty_print=True):
- pass
- def hasContent_(self):
- if (
-
- ):
- return True
- else:
- return False
- def exportLiteral(self, outfile, level, name_='service-instance-service-template'):
- level += 1
- self.exportLiteralAttributes(outfile, level, [], name_)
- if self.hasContent_():
- self.exportLiteralChildren(outfile, level, name_)
- def exportLiteralAttributes(self, outfile, level, already_processed, name_):
- pass
- def exportLiteralChildren(self, outfile, level, name_):
- pass
- def exportDict(self, name_='service-instance-service-template'):
- obj_json = json.dumps(self, default=lambda o: dict((k, v) for k, v in o.__dict__.iteritems()))
- obj_dict = json.loads(obj_json)
- return {name_: obj_dict}
- def build(self, node):
- self.buildAttributes(node, node.attrib, [])
- for child in node:
- nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
- self.buildChildren(child, node, nodeName_)
- def buildAttributes(self, node, attrs, already_processed):
- pass
- def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
- pass
-# end class service_instance_service_template
-
-
-class virtual_machine_service_instance(GeneratedsSuper):
- """
- virtual_machine_service_instance class definition from :doc:`vnc_cfg.xsd`
- """
- subclass = None
- superclass = None
- def __init__(self, **kwargs):
- pass
- def factory(*args_, **kwargs_):
- if virtual_machine_service_instance.subclass:
- return virtual_machine_service_instance.subclass(*args_, **kwargs_)
- else:
- return virtual_machine_service_instance(*args_, **kwargs_)
- factory = staticmethod(factory)
- def __eq__(self, other): return True
- def __ne__(self, other): return False
- @classmethod
- def populate (cls, *a, **kwa):
- obj = cls (*a, **kwa)
- return obj
- def export(self, outfile, level=1, namespace_='', name_='virtual-machine-service-instance', namespacedef_='', pretty_print=True):
- if pretty_print:
- eol_ = '\n'
- else:
- eol_ = ''
- showIndent(outfile, level, pretty_print)
- outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
- already_processed = []
- self.exportAttributes(outfile, level, already_processed, namespace_, name_='virtual-machine-service-instance')
- if self.hasContent_():
- outfile.write('>%s' % (eol_, ))
- self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print)
- outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
- else:
- outfile.write('/>%s' % (eol_, ))
- def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='virtual-machine-service-instance'):
- pass
- def exportChildren(self, outfile, level, namespace_='', name_='virtual-machine-service-instance', fromsubclass_=False, pretty_print=True):
- pass
- def hasContent_(self):
- if (
-
- ):
- return True
- else:
- return False
- def exportLiteral(self, outfile, level, name_='virtual-machine-service-instance'):
- level += 1
- self.exportLiteralAttributes(outfile, level, [], name_)
- if self.hasContent_():
- self.exportLiteralChildren(outfile, level, name_)
- def exportLiteralAttributes(self, outfile, level, already_processed, name_):
- pass
- def exportLiteralChildren(self, outfile, level, name_):
- pass
- def exportDict(self, name_='virtual-machine-service-instance'):
- obj_json = json.dumps(self, default=lambda o: dict((k, v) for k, v in o.__dict__.iteritems()))
- obj_dict = json.loads(obj_json)
- return {name_: obj_dict}
- def build(self, node):
- self.buildAttributes(node, node.attrib, [])
- for child in node:
- nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
- self.buildChildren(child, node, nodeName_)
- def buildAttributes(self, node, attrs, already_processed):
- pass
- def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
- pass
-# end class virtual_machine_service_instance
-
-
-class domain_virtual_DNS(GeneratedsSuper):
- """
- domain_virtual_DNS class definition from :doc:`vnc_cfg.xsd`
- """
- subclass = None
- superclass = None
- def __init__(self, **kwargs):
- pass
- def factory(*args_, **kwargs_):
- if domain_virtual_DNS.subclass:
- return domain_virtual_DNS.subclass(*args_, **kwargs_)
- else:
- return domain_virtual_DNS(*args_, **kwargs_)
- factory = staticmethod(factory)
- def __eq__(self, other): return True
- def __ne__(self, other): return False
- @classmethod
- def populate (cls, *a, **kwa):
- obj = cls (*a, **kwa)
- return obj
- def export(self, outfile, level=1, namespace_='', name_='domain-virtual-DNS', namespacedef_='', pretty_print=True):
- if pretty_print:
- eol_ = '\n'
- else:
- eol_ = ''
- showIndent(outfile, level, pretty_print)
- outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
- already_processed = []
- self.exportAttributes(outfile, level, already_processed, namespace_, name_='domain-virtual-DNS')
- if self.hasContent_():
- outfile.write('>%s' % (eol_, ))
- self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print)
- outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
- else:
- outfile.write('/>%s' % (eol_, ))
- def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='domain-virtual-DNS'):
- pass
- def exportChildren(self, outfile, level, namespace_='', name_='domain-virtual-DNS', fromsubclass_=False, pretty_print=True):
- pass
- def hasContent_(self):
- if (
-
- ):
- return True
- else:
- return False
- def exportLiteral(self, outfile, level, name_='domain-virtual-DNS'):
- level += 1
- self.exportLiteralAttributes(outfile, level, [], name_)
- if self.hasContent_():
- self.exportLiteralChildren(outfile, level, name_)
- def exportLiteralAttributes(self, outfile, level, already_processed, name_):
- pass
- def exportLiteralChildren(self, outfile, level, name_):
- pass
- def exportDict(self, name_='domain-virtual-DNS'):
- obj_json = json.dumps(self, default=lambda o: dict((k, v) for k, v in o.__dict__.iteritems()))
- obj_dict = json.loads(obj_json)
- return {name_: obj_dict}
- def build(self, node):
- self.buildAttributes(node, node.attrib, [])
- for child in node:
- nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
- self.buildChildren(child, node, nodeName_)
- def buildAttributes(self, node, attrs, already_processed):
- pass
- def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
- pass
-# end class domain_virtual_DNS
-
-
-class virtual_DNS_virtual_DNS_record(GeneratedsSuper):
- """
- virtual_DNS_virtual_DNS_record class definition from :doc:`vnc_cfg.xsd`
- """
- subclass = None
- superclass = None
- def __init__(self, **kwargs):
- pass
- def factory(*args_, **kwargs_):
- if virtual_DNS_virtual_DNS_record.subclass:
- return virtual_DNS_virtual_DNS_record.subclass(*args_, **kwargs_)
- else:
- return virtual_DNS_virtual_DNS_record(*args_, **kwargs_)
- factory = staticmethod(factory)
- def __eq__(self, other): return True
- def __ne__(self, other): return False
- @classmethod
- def populate (cls, *a, **kwa):
- obj = cls (*a, **kwa)
- return obj
- def export(self, outfile, level=1, namespace_='', name_='virtual-DNS-virtual-DNS-record', namespacedef_='', pretty_print=True):
- if pretty_print:
- eol_ = '\n'
- else:
- eol_ = ''
- showIndent(outfile, level, pretty_print)
- outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
- already_processed = []
- self.exportAttributes(outfile, level, already_processed, namespace_, name_='virtual-DNS-virtual-DNS-record')
- if self.hasContent_():
- outfile.write('>%s' % (eol_, ))
- self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print)
- outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
- else:
- outfile.write('/>%s' % (eol_, ))
- def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='virtual-DNS-virtual-DNS-record'):
- pass
- def exportChildren(self, outfile, level, namespace_='', name_='virtual-DNS-virtual-DNS-record', fromsubclass_=False, pretty_print=True):
- pass
- def hasContent_(self):
- if (
-
- ):
- return True
- else:
- return False
- def exportLiteral(self, outfile, level, name_='virtual-DNS-virtual-DNS-record'):
- level += 1
- self.exportLiteralAttributes(outfile, level, [], name_)
- if self.hasContent_():
- self.exportLiteralChildren(outfile, level, name_)
- def exportLiteralAttributes(self, outfile, level, already_processed, name_):
- pass
- def exportLiteralChildren(self, outfile, level, name_):
- pass
- def exportDict(self, name_='virtual-DNS-virtual-DNS-record'):
- obj_json = json.dumps(self, default=lambda o: dict((k, v) for k, v in o.__dict__.iteritems()))
- obj_dict = json.loads(obj_json)
- return {name_: obj_dict}
- def build(self, node):
- self.buildAttributes(node, node.attrib, [])
- for child in node:
- nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
- self.buildChildren(child, node, nodeName_)
- def buildAttributes(self, node, attrs, already_processed):
- pass
- def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
- pass
-# end class virtual_DNS_virtual_DNS_record
-
-
-class network_ipam_virtual_DNS(GeneratedsSuper):
- """
- network_ipam_virtual_DNS class definition from :doc:`vnc_cfg.xsd`
- """
- subclass = None
- superclass = None
- def __init__(self, **kwargs):
- pass
- def factory(*args_, **kwargs_):
- if network_ipam_virtual_DNS.subclass:
- return network_ipam_virtual_DNS.subclass(*args_, **kwargs_)
- else:
- return network_ipam_virtual_DNS(*args_, **kwargs_)
- factory = staticmethod(factory)
- def __eq__(self, other): return True
- def __ne__(self, other): return False
- @classmethod
- def populate (cls, *a, **kwa):
- obj = cls (*a, **kwa)
- return obj
- def export(self, outfile, level=1, namespace_='', name_='network-ipam-virtual-DNS', namespacedef_='', pretty_print=True):
- if pretty_print:
- eol_ = '\n'
- else:
- eol_ = ''
- showIndent(outfile, level, pretty_print)
- outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
- already_processed = []
- self.exportAttributes(outfile, level, already_processed, namespace_, name_='network-ipam-virtual-DNS')
- if self.hasContent_():
- outfile.write('>%s' % (eol_, ))
- self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print)
- outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
- else:
- outfile.write('/>%s' % (eol_, ))
- def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='network-ipam-virtual-DNS'):
- pass
- def exportChildren(self, outfile, level, namespace_='', name_='network-ipam-virtual-DNS', fromsubclass_=False, pretty_print=True):
- pass
- def hasContent_(self):
- if (
-
- ):
- return True
- else:
- return False
- def exportLiteral(self, outfile, level, name_='network-ipam-virtual-DNS'):
- level += 1
- self.exportLiteralAttributes(outfile, level, [], name_)
- if self.hasContent_():
- self.exportLiteralChildren(outfile, level, name_)
- def exportLiteralAttributes(self, outfile, level, already_processed, name_):
- pass
- def exportLiteralChildren(self, outfile, level, name_):
- pass
- def exportDict(self, name_='network-ipam-virtual-DNS'):
- obj_json = json.dumps(self, default=lambda o: dict((k, v) for k, v in o.__dict__.iteritems()))
- obj_dict = json.loads(obj_json)
- return {name_: obj_dict}
- def build(self, node):
- self.buildAttributes(node, node.attrib, [])
- for child in node:
- nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
- self.buildChildren(child, node, nodeName_)
- def buildAttributes(self, node, attrs, already_processed):
- pass
- def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
- pass
-# end class network_ipam_virtual_DNS
-
-
-class RouteType(GeneratedsSuper):
- """
- RouteType class definition from :doc:`vnc_cfg.xsd`
- """
- subclass = None
- superclass = None
- def __init__(self, prefix=None, next_hop=None, next_hop_type=None, **kwargs):
- self.prefix = prefix
- self.next_hop = next_hop
- self.next_hop_type = next_hop_type
- def factory(*args_, **kwargs_):
- if RouteType.subclass:
- return RouteType.subclass(*args_, **kwargs_)
- else:
- return RouteType(*args_, **kwargs_)
- factory = staticmethod(factory)
- def get_prefix(self): return self.prefix
- def set_prefix(self, prefix): self.prefix = prefix
- def get_next_hop(self): return self.next_hop
- def set_next_hop(self, next_hop): self.next_hop = next_hop
- def get_next_hop_type(self): return self.next_hop_type
- def set_next_hop_type(self, next_hop_type): self.next_hop_type = next_hop_type
- def __eq__(self, other):
- if isinstance(other, self.__class__):
- return (self.prefix == other.prefix and
- self.next_hop == other.next_hop and
- self.next_hop_type == other.next_hop_type)
- return NotImplemented
- def __ne__(self, other):
- if isinstance(other, self.__class__):
- return not self.__eq__(other)
- return NotImplemented
-
- @classmethod
- def populate (cls, *a, **kwa):
- obj = cls (*a, **kwa)
- obj.set_prefix (obj.populate_string ("prefix"))
- obj.set_next_hop (obj.populate_string ("next_hop"))
- obj.set_next_hop_type (obj.populate_string ("next_hop_type"))
- return obj
- def export(self, outfile, level=1, namespace_='', name_='RouteType', namespacedef_='', pretty_print=True):
- if pretty_print:
- eol_ = '\n'
- else:
- eol_ = ''
- showIndent(outfile, level, pretty_print)
- outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
- already_processed = []
- self.exportAttributes(outfile, level, already_processed, namespace_, name_='RouteType')
- if self.hasContent_():
- outfile.write('>%s' % (eol_, ))
- self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print)
- showIndent(outfile, level, pretty_print)
- outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
- else:
- outfile.write('/>%s' % (eol_, ))
- def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='RouteType'):
- pass
- def exportChildren(self, outfile, level, namespace_='', name_='RouteType', fromsubclass_=False, pretty_print=True):
- if pretty_print:
- eol_ = '\n'
- else:
- eol_ = ''
- if self.prefix is not None:
- showIndent(outfile, level, pretty_print)
- outfile.write('<%sprefix>%s</%sprefix>%s' % (namespace_, self.gds_format_string(quote_xml(self.prefix).encode(ExternalEncoding), input_name='prefix'), namespace_, eol_))
- if self.next_hop is not None:
- showIndent(outfile, level, pretty_print)
- outfile.write('<%snext-hop>%s</%snext-hop>%s' % (namespace_, self.gds_format_string(quote_xml(self.next_hop).encode(ExternalEncoding), input_name='next-hop'), namespace_, eol_))
- if self.next_hop_type is not None:
- showIndent(outfile, level, pretty_print)
- outfile.write('<%snext-hop-type>%s</%snext-hop-type>%s' % (namespace_, self.gds_format_string(quote_xml(self.next_hop_type).encode(ExternalEncoding), input_name='next-hop-type'), namespace_, eol_))
- def hasContent_(self):
- if (
- self.prefix is not None or
- self.next_hop is not None or
- self.next_hop_type is not None
- ):
- return True
- else:
- return False
- def exportLiteral(self, outfile, level, name_='RouteType'):
- level += 1
- self.exportLiteralAttributes(outfile, level, [], name_)
- if self.hasContent_():
- self.exportLiteralChildren(outfile, level, name_)
- def exportLiteralAttributes(self, outfile, level, already_processed, name_):
- pass
- def exportLiteralChildren(self, outfile, level, name_):
- if self.prefix is not None:
- showIndent(outfile, level)
- outfile.write('prefix=%s,\n' % quote_python(self.prefix).encode(ExternalEncoding))
- if self.next_hop is not None:
- showIndent(outfile, level)
- outfile.write('next_hop=%s,\n' % quote_python(self.next_hop).encode(ExternalEncoding))
- if self.next_hop_type is not None:
- showIndent(outfile, level)
- outfile.write('next_hop_type=%s,\n' % quote_python(self.next_hop_type).encode(ExternalEncoding))
- def exportDict(self, name_='RouteType'):
- obj_json = json.dumps(self, default=lambda o: dict((k, v) for k, v in o.__dict__.iteritems()))
- obj_dict = json.loads(obj_json)
- return {name_: obj_dict}
- def build(self, node):
- self.buildAttributes(node, node.attrib, [])
- for child in node:
- nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
- self.buildChildren(child, node, nodeName_)
- def buildAttributes(self, node, attrs, already_processed):
- pass
- def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
- if nodeName_ == 'prefix':
- prefix_ = child_.text
- prefix_ = self.gds_validate_string(prefix_, node, 'prefix')
- self.prefix = prefix_
- elif nodeName_ == 'next-hop':
- next_hop_ = child_.text
- next_hop_ = self.gds_validate_string(next_hop_, node, 'next_hop')
- self.next_hop = next_hop_
- elif nodeName_ == 'next-hop-type':
- next_hop_type_ = child_.text
- next_hop_type_ = self.gds_validate_string(next_hop_type_, node, 'next_hop_type')
- self.next_hop_type = next_hop_type_
-# end class RouteType
-
-
-class RouteTableType(GeneratedsSuper):
- """
- RouteTableType class definition from :doc:`vnc_cfg.xsd`
- """
- subclass = None
- superclass = None
- def __init__(self, route=None, **kwargs):
- if (route is None) or (route == []):
- self.route = []
- else:
- if isinstance(route[0], dict):
- objs = [RouteType(**elem) for elem in route]
- self.route = objs
- else:
- self.route = route
- def factory(*args_, **kwargs_):
- if RouteTableType.subclass:
- return RouteTableType.subclass(*args_, **kwargs_)
- else:
- return RouteTableType(*args_, **kwargs_)
- factory = staticmethod(factory)
- def get_route(self): return self.route
- def set_route(self, route): self.route = route
- def add_route(self, value): self.route.append(value)
- def insert_route(self, index, value): self.route[index] = value
- def delete_route(self, value): self.route.remove(value)
- def __eq__(self, other):
- if isinstance(other, self.__class__):
- return (self.route == other.route)
- return NotImplemented
- def __ne__(self, other):
- if isinstance(other, self.__class__):
- return not self.__eq__(other)
- return NotImplemented
-
- @classmethod
- def populate (cls, *a, **kwa):
- obj = cls (*a, **kwa)
- obj.set_route ([RouteType.populate ()])
- return obj
- def export(self, outfile, level=1, namespace_='', name_='RouteTableType', namespacedef_='', pretty_print=True):
- if pretty_print:
- eol_ = '\n'
- else:
- eol_ = ''
- showIndent(outfile, level, pretty_print)
- outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
- already_processed = []
- self.exportAttributes(outfile, level, already_processed, namespace_, name_='RouteTableType')
- if self.hasContent_():
- outfile.write('>%s' % (eol_, ))
- self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print)
- showIndent(outfile, level, pretty_print)
- outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
- else:
- outfile.write('/>%s' % (eol_, ))
- def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='RouteTableType'):
- pass
- def exportChildren(self, outfile, level, namespace_='', name_='RouteTableType', fromsubclass_=False, pretty_print=True):
- if pretty_print:
- eol_ = '\n'
- else:
- eol_ = ''
- for route_ in self.route:
- if isinstance(route_, dict):
- route_ = RouteType(**route_)
- route_.export(outfile, level, namespace_, name_='route', pretty_print=pretty_print)
- def hasContent_(self):
- if (
- self.route
- ):
- return True
- else:
- return False
- def exportLiteral(self, outfile, level, name_='RouteTableType'):
- level += 1
- self.exportLiteralAttributes(outfile, level, [], name_)
- if self.hasContent_():
- self.exportLiteralChildren(outfile, level, name_)
- def exportLiteralAttributes(self, outfile, level, already_processed, name_):
- pass
- def exportLiteralChildren(self, outfile, level, name_):
- showIndent(outfile, level)
- outfile.write('route=[\n')
- level += 1
- for route_ in self.route:
- showIndent(outfile, level)
- outfile.write('model_.RouteType(\n')
- route_.exportLiteral(outfile, level, name_='RouteType')
- showIndent(outfile, level)
- outfile.write('),\n')
- level -= 1
- showIndent(outfile, level)
- outfile.write('],\n')
- def exportDict(self, name_='RouteTableType'):
- obj_json = json.dumps(self, default=lambda o: dict((k, v) for k, v in o.__dict__.iteritems()))
- obj_dict = json.loads(obj_json)
- return {name_: obj_dict}
- def build(self, node):
- self.buildAttributes(node, node.attrib, [])
- for child in node:
- nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
- self.buildChildren(child, node, nodeName_)
- def buildAttributes(self, node, attrs, already_processed):
- pass
- def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
- if nodeName_ == 'route':
- obj_ = RouteType.factory()
- obj_.build(child_)
- self.route.append(obj_)
-# end class RouteTableType
-
-
-class project_route_table(GeneratedsSuper):
- """
- project_route_table class definition from :doc:`vnc_cfg.xsd`
- """
- subclass = None
- superclass = None
- def __init__(self, **kwargs):
- pass
- def factory(*args_, **kwargs_):
- if project_route_table.subclass:
- return project_route_table.subclass(*args_, **kwargs_)
- else:
- return project_route_table(*args_, **kwargs_)
- factory = staticmethod(factory)
- def __eq__(self, other): return True
- def __ne__(self, other): return False
- @classmethod
- def populate (cls, *a, **kwa):
- obj = cls (*a, **kwa)
- return obj
- def export(self, outfile, level=1, namespace_='', name_='project-route-table', namespacedef_='', pretty_print=True):
- if pretty_print:
- eol_ = '\n'
- else:
- eol_ = ''
- showIndent(outfile, level, pretty_print)
- outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
- already_processed = []
- self.exportAttributes(outfile, level, already_processed, namespace_, name_='project-route-table')
- if self.hasContent_():
- outfile.write('>%s' % (eol_, ))
- self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print)
- outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
- else:
- outfile.write('/>%s' % (eol_, ))
- def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='project-route-table'):
- pass
- def exportChildren(self, outfile, level, namespace_='', name_='project-route-table', fromsubclass_=False, pretty_print=True):
- pass
- def hasContent_(self):
- if (
-
- ):
- return True
- else:
- return False
- def exportLiteral(self, outfile, level, name_='project-route-table'):
- level += 1
- self.exportLiteralAttributes(outfile, level, [], name_)
- if self.hasContent_():
- self.exportLiteralChildren(outfile, level, name_)
- def exportLiteralAttributes(self, outfile, level, already_processed, name_):
- pass
- def exportLiteralChildren(self, outfile, level, name_):
- pass
- def exportDict(self, name_='project-route-table'):
- obj_json = json.dumps(self, default=lambda o: dict((k, v) for k, v in o.__dict__.iteritems()))
- obj_dict = json.loads(obj_json)
- return {name_: obj_dict}
- def build(self, node):
- self.buildAttributes(node, node.attrib, [])
- for child in node:
- nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
- self.buildChildren(child, node, nodeName_)
- def buildAttributes(self, node, attrs, already_processed):
- pass
- def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
- pass
-# end class project_route_table
-
-
-class virtual_network_route_table(GeneratedsSuper):
- """
- virtual_network_route_table class definition from :doc:`vnc_cfg.xsd`
- """
- subclass = None
- superclass = None
- def __init__(self, **kwargs):
- pass
- def factory(*args_, **kwargs_):
- if virtual_network_route_table.subclass:
- return virtual_network_route_table.subclass(*args_, **kwargs_)
- else:
- return virtual_network_route_table(*args_, **kwargs_)
- factory = staticmethod(factory)
- def __eq__(self, other): return True
- def __ne__(self, other): return False
- @classmethod
- def populate (cls, *a, **kwa):
- obj = cls (*a, **kwa)
- return obj
- def export(self, outfile, level=1, namespace_='', name_='virtual-network-route-table', namespacedef_='', pretty_print=True):
- if pretty_print:
- eol_ = '\n'
- else:
- eol_ = ''
- showIndent(outfile, level, pretty_print)
- outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
- already_processed = []
- self.exportAttributes(outfile, level, already_processed, namespace_, name_='virtual-network-route-table')
- if self.hasContent_():
- outfile.write('>%s' % (eol_, ))
- self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print)
- outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
- else:
- outfile.write('/>%s' % (eol_, ))
- def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='virtual-network-route-table'):
- pass
- def exportChildren(self, outfile, level, namespace_='', name_='virtual-network-route-table', fromsubclass_=False, pretty_print=True):
- pass
- def hasContent_(self):
- if (
-
- ):
- return True
- else:
- return False
- def exportLiteral(self, outfile, level, name_='virtual-network-route-table'):
- level += 1
- self.exportLiteralAttributes(outfile, level, [], name_)
- if self.hasContent_():
- self.exportLiteralChildren(outfile, level, name_)
- def exportLiteralAttributes(self, outfile, level, already_processed, name_):
- pass
- def exportLiteralChildren(self, outfile, level, name_):
- pass
- def exportDict(self, name_='virtual-network-route-table'):
- obj_json = json.dumps(self, default=lambda o: dict((k, v) for k, v in o.__dict__.iteritems()))
- obj_dict = json.loads(obj_json)
- return {name_: obj_dict}
- def build(self, node):
- self.buildAttributes(node, node.attrib, [])
- for child in node:
- nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
- self.buildChildren(child, node, nodeName_)
- def buildAttributes(self, node, attrs, already_processed):
- pass
- def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
- pass
-# end class virtual_network_route_table
-
-
-class project_interface_route_table(GeneratedsSuper):
- """
- project_interface_route_table class definition from :doc:`vnc_cfg.xsd`
- """
- subclass = None
- superclass = None
- def __init__(self, **kwargs):
- pass
- def factory(*args_, **kwargs_):
- if project_interface_route_table.subclass:
- return project_interface_route_table.subclass(*args_, **kwargs_)
- else:
- return project_interface_route_table(*args_, **kwargs_)
- factory = staticmethod(factory)
- def __eq__(self, other): return True
- def __ne__(self, other): return False
- @classmethod
- def populate (cls, *a, **kwa):
- obj = cls (*a, **kwa)
- return obj
- def export(self, outfile, level=1, namespace_='', name_='project-interface-route-table', namespacedef_='', pretty_print=True):
- if pretty_print:
- eol_ = '\n'
- else:
- eol_ = ''
- showIndent(outfile, level, pretty_print)
- outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
- already_processed = []
- self.exportAttributes(outfile, level, already_processed, namespace_, name_='project-interface-route-table')
- if self.hasContent_():
- outfile.write('>%s' % (eol_, ))
- self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print)
- outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
- else:
- outfile.write('/>%s' % (eol_, ))
- def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='project-interface-route-table'):
- pass
- def exportChildren(self, outfile, level, namespace_='', name_='project-interface-route-table', fromsubclass_=False, pretty_print=True):
- pass
- def hasContent_(self):
- if (
-
- ):
- return True
- else:
- return False
- def exportLiteral(self, outfile, level, name_='project-interface-route-table'):
- level += 1
- self.exportLiteralAttributes(outfile, level, [], name_)
- if self.hasContent_():
- self.exportLiteralChildren(outfile, level, name_)
- def exportLiteralAttributes(self, outfile, level, already_processed, name_):
- pass
- def exportLiteralChildren(self, outfile, level, name_):
- pass
- def exportDict(self, name_='project-interface-route-table'):
- obj_json = json.dumps(self, default=lambda o: dict((k, v) for k, v in o.__dict__.iteritems()))
- obj_dict = json.loads(obj_json)
- return {name_: obj_dict}
- def build(self, node):
- self.buildAttributes(node, node.attrib, [])
- for child in node:
- nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
- self.buildChildren(child, node, nodeName_)
- def buildAttributes(self, node, attrs, already_processed):
- pass
- def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
- pass
-# end class project_interface_route_table
-
-
-class virtual_machine_interface_route_table(GeneratedsSuper):
- """
- virtual_machine_interface_route_table class definition from :doc:`vnc_cfg.xsd`
- """
- subclass = None
- superclass = None
- def __init__(self, **kwargs):
- pass
- def factory(*args_, **kwargs_):
- if virtual_machine_interface_route_table.subclass:
- return virtual_machine_interface_route_table.subclass(*args_, **kwargs_)
- else:
- return virtual_machine_interface_route_table(*args_, **kwargs_)
- factory = staticmethod(factory)
- def __eq__(self, other): return True
- def __ne__(self, other): return False
- @classmethod
- def populate (cls, *a, **kwa):
- obj = cls (*a, **kwa)
- return obj
- def export(self, outfile, level=1, namespace_='', name_='virtual-machine-interface-route-table', namespacedef_='', pretty_print=True):
- if pretty_print:
- eol_ = '\n'
- else:
- eol_ = ''
- showIndent(outfile, level, pretty_print)
- outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
- already_processed = []
- self.exportAttributes(outfile, level, already_processed, namespace_, name_='virtual-machine-interface-route-table')
- if self.hasContent_():
- outfile.write('>%s' % (eol_, ))
- self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print)
- outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
- else:
- outfile.write('/>%s' % (eol_, ))
- def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='virtual-machine-interface-route-table'):
- pass
- def exportChildren(self, outfile, level, namespace_='', name_='virtual-machine-interface-route-table', fromsubclass_=False, pretty_print=True):
- pass
- def hasContent_(self):
- if (
-
- ):
- return True
- else:
- return False
- def exportLiteral(self, outfile, level, name_='virtual-machine-interface-route-table'):
- level += 1
- self.exportLiteralAttributes(outfile, level, [], name_)
- if self.hasContent_():
- self.exportLiteralChildren(outfile, level, name_)
- def exportLiteralAttributes(self, outfile, level, already_processed, name_):
- pass
- def exportLiteralChildren(self, outfile, level, name_):
- pass
- def exportDict(self, name_='virtual-machine-interface-route-table'):
- obj_json = json.dumps(self, default=lambda o: dict((k, v) for k, v in o.__dict__.iteritems()))
- obj_dict = json.loads(obj_json)
- return {name_: obj_dict}
- def build(self, node):
- self.buildAttributes(node, node.attrib, [])
- for child in node:
- nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
- self.buildChildren(child, node, nodeName_)
- def buildAttributes(self, node, attrs, already_processed):
- pass
- def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
- pass
-# end class virtual_machine_interface_route_table
-
-
-class project_logical_router(GeneratedsSuper):
- """
- project_logical_router class definition from :doc:`vnc_cfg.xsd`
- """
- subclass = None
- superclass = None
- def __init__(self, **kwargs):
- pass
- def factory(*args_, **kwargs_):
- if project_logical_router.subclass:
- return project_logical_router.subclass(*args_, **kwargs_)
- else:
- return project_logical_router(*args_, **kwargs_)
- factory = staticmethod(factory)
- def __eq__(self, other): return True
- def __ne__(self, other): return False
- @classmethod
- def populate (cls, *a, **kwa):
- obj = cls (*a, **kwa)
- return obj
- def export(self, outfile, level=1, namespace_='', name_='project-logical-router', namespacedef_='', pretty_print=True):
- if pretty_print:
- eol_ = '\n'
- else:
- eol_ = ''
- showIndent(outfile, level, pretty_print)
- outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
- already_processed = []
- self.exportAttributes(outfile, level, already_processed, namespace_, name_='project-logical-router')
- if self.hasContent_():
- outfile.write('>%s' % (eol_, ))
- self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print)
- outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
- else:
- outfile.write('/>%s' % (eol_, ))
- def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='project-logical-router'):
- pass
- def exportChildren(self, outfile, level, namespace_='', name_='project-logical-router', fromsubclass_=False, pretty_print=True):
- pass
- def hasContent_(self):
- if (
-
- ):
- return True
- else:
- return False
- def exportLiteral(self, outfile, level, name_='project-logical-router'):
- level += 1
- self.exportLiteralAttributes(outfile, level, [], name_)
- if self.hasContent_():
- self.exportLiteralChildren(outfile, level, name_)
- def exportLiteralAttributes(self, outfile, level, already_processed, name_):
- pass
- def exportLiteralChildren(self, outfile, level, name_):
- pass
- def exportDict(self, name_='project-logical-router'):
- obj_json = json.dumps(self, default=lambda o: dict((k, v) for k, v in o.__dict__.iteritems()))
- obj_dict = json.loads(obj_json)
- return {name_: obj_dict}
- def build(self, node):
- self.buildAttributes(node, node.attrib, [])
- for child in node:
- nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
- self.buildChildren(child, node, nodeName_)
- def buildAttributes(self, node, attrs, already_processed):
- pass
- def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
- pass
-# end class project_logical_router
-
-
-class logical_router_interface(GeneratedsSuper):
- """
- logical_router_interface class definition from :doc:`vnc_cfg.xsd`
- """
- subclass = None
- superclass = None
- def __init__(self, **kwargs):
- pass
- def factory(*args_, **kwargs_):
- if logical_router_interface.subclass:
- return logical_router_interface.subclass(*args_, **kwargs_)
- else:
- return logical_router_interface(*args_, **kwargs_)
- factory = staticmethod(factory)
- def __eq__(self, other): return True
- def __ne__(self, other): return False
- @classmethod
- def populate (cls, *a, **kwa):
- obj = cls (*a, **kwa)
- return obj
- def export(self, outfile, level=1, namespace_='', name_='logical-router-interface', namespacedef_='', pretty_print=True):
- if pretty_print:
- eol_ = '\n'
- else:
- eol_ = ''
- showIndent(outfile, level, pretty_print)
- outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
- already_processed = []
- self.exportAttributes(outfile, level, already_processed, namespace_, name_='logical-router-interface')
- if self.hasContent_():
- outfile.write('>%s' % (eol_, ))
- self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print)
- outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
- else:
- outfile.write('/>%s' % (eol_, ))
- def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='logical-router-interface'):
- pass
- def exportChildren(self, outfile, level, namespace_='', name_='logical-router-interface', fromsubclass_=False, pretty_print=True):
- pass
- def hasContent_(self):
- if (
-
- ):
- return True
- else:
- return False
- def exportLiteral(self, outfile, level, name_='logical-router-interface'):
- level += 1
- self.exportLiteralAttributes(outfile, level, [], name_)
- if self.hasContent_():
- self.exportLiteralChildren(outfile, level, name_)
- def exportLiteralAttributes(self, outfile, level, already_processed, name_):
- pass
- def exportLiteralChildren(self, outfile, level, name_):
- pass
- def exportDict(self, name_='logical-router-interface'):
- obj_json = json.dumps(self, default=lambda o: dict((k, v) for k, v in o.__dict__.iteritems()))
- obj_dict = json.loads(obj_json)
- return {name_: obj_dict}
- def build(self, node):
- self.buildAttributes(node, node.attrib, [])
- for child in node:
- nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
- self.buildChildren(child, node, nodeName_)
- def buildAttributes(self, node, attrs, already_processed):
- pass
- def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
- pass
-# end class logical_router_interface
-
-
-class logical_router_target(GeneratedsSuper):
- """
- logical_router_target class definition from :doc:`vnc_cfg.xsd`
- """
- subclass = None
- superclass = None
- def __init__(self, **kwargs):
- pass
- def factory(*args_, **kwargs_):
- if logical_router_target.subclass:
- return logical_router_target.subclass(*args_, **kwargs_)
- else:
- return logical_router_target(*args_, **kwargs_)
- factory = staticmethod(factory)
- def __eq__(self, other): return True
- def __ne__(self, other): return False
- @classmethod
- def populate (cls, *a, **kwa):
- obj = cls (*a, **kwa)
- return obj
- def export(self, outfile, level=1, namespace_='', name_='logical-router-target', namespacedef_='', pretty_print=True):
- if pretty_print:
- eol_ = '\n'
- else:
- eol_ = ''
- showIndent(outfile, level, pretty_print)
- outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
- already_processed = []
- self.exportAttributes(outfile, level, already_processed, namespace_, name_='logical-router-target')
- if self.hasContent_():
- outfile.write('>%s' % (eol_, ))
- self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print)
- outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
- else:
- outfile.write('/>%s' % (eol_, ))
- def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='logical-router-target'):
- pass
- def exportChildren(self, outfile, level, namespace_='', name_='logical-router-target', fromsubclass_=False, pretty_print=True):
- pass
- def hasContent_(self):
- if (
-
- ):
- return True
- else:
- return False
- def exportLiteral(self, outfile, level, name_='logical-router-target'):
- level += 1
- self.exportLiteralAttributes(outfile, level, [], name_)
- if self.hasContent_():
- self.exportLiteralChildren(outfile, level, name_)
- def exportLiteralAttributes(self, outfile, level, already_processed, name_):
- pass
- def exportLiteralChildren(self, outfile, level, name_):
- pass
- def exportDict(self, name_='logical-router-target'):
- obj_json = json.dumps(self, default=lambda o: dict((k, v) for k, v in o.__dict__.iteritems()))
- obj_dict = json.loads(obj_json)
- return {name_: obj_dict}
- def build(self, node):
- self.buildAttributes(node, node.attrib, [])
- for child in node:
- nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
- self.buildChildren(child, node, nodeName_)
- def buildAttributes(self, node, attrs, already_processed):
- pass
- def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
- pass
-# end class logical_router_target
-
-
-class logical_router_gateway(GeneratedsSuper):
- """
- logical_router_gateway class definition from :doc:`vnc_cfg.xsd`
- """
- subclass = None
- superclass = None
- def __init__(self, **kwargs):
- pass
- def factory(*args_, **kwargs_):
- if logical_router_gateway.subclass:
- return logical_router_gateway.subclass(*args_, **kwargs_)
- else:
- return logical_router_gateway(*args_, **kwargs_)
- factory = staticmethod(factory)
- def __eq__(self, other): return True
- def __ne__(self, other): return False
- @classmethod
- def populate (cls, *a, **kwa):
- obj = cls (*a, **kwa)
- return obj
- def export(self, outfile, level=1, namespace_='', name_='logical-router-gateway', namespacedef_='', pretty_print=True):
- if pretty_print:
- eol_ = '\n'
- else:
- eol_ = ''
- showIndent(outfile, level, pretty_print)
- outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
- already_processed = []
- self.exportAttributes(outfile, level, already_processed, namespace_, name_='logical-router-gateway')
- if self.hasContent_():
- outfile.write('>%s' % (eol_, ))
- self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print)
- outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
- else:
- outfile.write('/>%s' % (eol_, ))
- def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='logical-router-gateway'):
- pass
- def exportChildren(self, outfile, level, namespace_='', name_='logical-router-gateway', fromsubclass_=False, pretty_print=True):
- pass
- def hasContent_(self):
- if (
-
- ):
- return True
- else:
- return False
- def exportLiteral(self, outfile, level, name_='logical-router-gateway'):
- level += 1
- self.exportLiteralAttributes(outfile, level, [], name_)
- if self.hasContent_():
- self.exportLiteralChildren(outfile, level, name_)
- def exportLiteralAttributes(self, outfile, level, already_processed, name_):
- pass
- def exportLiteralChildren(self, outfile, level, name_):
- pass
- def exportDict(self, name_='logical-router-gateway'):
- obj_json = json.dumps(self, default=lambda o: dict((k, v) for k, v in o.__dict__.iteritems()))
- obj_dict = json.loads(obj_json)
- return {name_: obj_dict}
- def build(self, node):
- self.buildAttributes(node, node.attrib, [])
- for child in node:
- nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
- self.buildChildren(child, node, nodeName_)
- def buildAttributes(self, node, attrs, already_processed):
- pass
- def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
- pass
-# end class logical_router_gateway
-
-
-class logical_router_service_instance(GeneratedsSuper):
- """
- logical_router_service_instance class definition from :doc:`vnc_cfg.xsd`
- """
- subclass = None
- superclass = None
- def __init__(self, **kwargs):
- pass
- def factory(*args_, **kwargs_):
- if logical_router_service_instance.subclass:
- return logical_router_service_instance.subclass(*args_, **kwargs_)
- else:
- return logical_router_service_instance(*args_, **kwargs_)
- factory = staticmethod(factory)
- def __eq__(self, other): return True
- def __ne__(self, other): return False
- @classmethod
- def populate (cls, *a, **kwa):
- obj = cls (*a, **kwa)
- return obj
- def export(self, outfile, level=1, namespace_='', name_='logical-router-service-instance', namespacedef_='', pretty_print=True):
- if pretty_print:
- eol_ = '\n'
- else:
- eol_ = ''
- showIndent(outfile, level, pretty_print)
- outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
- already_processed = []
- self.exportAttributes(outfile, level, already_processed, namespace_, name_='logical-router-service-instance')
- if self.hasContent_():
- outfile.write('>%s' % (eol_, ))
- self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print)
- outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
- else:
- outfile.write('/>%s' % (eol_, ))
- def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='logical-router-service-instance'):
- pass
- def exportChildren(self, outfile, level, namespace_='', name_='logical-router-service-instance', fromsubclass_=False, pretty_print=True):
- pass
- def hasContent_(self):
- if (
-
- ):
- return True
- else:
- return False
- def exportLiteral(self, outfile, level, name_='logical-router-service-instance'):
- level += 1
- self.exportLiteralAttributes(outfile, level, [], name_)
- if self.hasContent_():
- self.exportLiteralChildren(outfile, level, name_)
- def exportLiteralAttributes(self, outfile, level, already_processed, name_):
- pass
- def exportLiteralChildren(self, outfile, level, name_):
- pass
- def exportDict(self, name_='logical-router-service-instance'):
- obj_json = json.dumps(self, default=lambda o: dict((k, v) for k, v in o.__dict__.iteritems()))
- obj_dict = json.loads(obj_json)
- return {name_: obj_dict}
- def build(self, node):
- self.buildAttributes(node, node.attrib, [])
- for child in node:
- nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
- self.buildChildren(child, node, nodeName_)
- def buildAttributes(self, node, attrs, already_processed):
- pass
- def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
- pass
-# end class logical_router_service_instance
-
-
-class global_system_config_config_node(GeneratedsSuper):
- """
- global_system_config_config_node class definition from :doc:`vnc_cfg.xsd`
- """
- subclass = None
- superclass = None
- def __init__(self, **kwargs):
- pass
- def factory(*args_, **kwargs_):
- if global_system_config_config_node.subclass:
- return global_system_config_config_node.subclass(*args_, **kwargs_)
- else:
- return global_system_config_config_node(*args_, **kwargs_)
- factory = staticmethod(factory)
- def __eq__(self, other): return True
- def __ne__(self, other): return False
- @classmethod
- def populate (cls, *a, **kwa):
- obj = cls (*a, **kwa)
- return obj
- def export(self, outfile, level=1, namespace_='', name_='global-system-config-config-node', namespacedef_='', pretty_print=True):
- if pretty_print:
- eol_ = '\n'
- else:
- eol_ = ''
- showIndent(outfile, level, pretty_print)
- outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
- already_processed = []
- self.exportAttributes(outfile, level, already_processed, namespace_, name_='global-system-config-config-node')
- if self.hasContent_():
- outfile.write('>%s' % (eol_, ))
- self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print)
- outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
- else:
- outfile.write('/>%s' % (eol_, ))
- def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='global-system-config-config-node'):
- pass
- def exportChildren(self, outfile, level, namespace_='', name_='global-system-config-config-node', fromsubclass_=False, pretty_print=True):
- pass
- def hasContent_(self):
- if (
-
- ):
- return True
- else:
- return False
- def exportLiteral(self, outfile, level, name_='global-system-config-config-node'):
- level += 1
- self.exportLiteralAttributes(outfile, level, [], name_)
- if self.hasContent_():
- self.exportLiteralChildren(outfile, level, name_)
- def exportLiteralAttributes(self, outfile, level, already_processed, name_):
- pass
- def exportLiteralChildren(self, outfile, level, name_):
- pass
- def exportDict(self, name_='global-system-config-config-node'):
- obj_json = json.dumps(self, default=lambda o: dict((k, v) for k, v in o.__dict__.iteritems()))
- obj_dict = json.loads(obj_json)
- return {name_: obj_dict}
- def build(self, node):
- self.buildAttributes(node, node.attrib, [])
- for child in node:
- nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
- self.buildChildren(child, node, nodeName_)
- def buildAttributes(self, node, attrs, already_processed):
- pass
- def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
- pass
-# end class global_system_config_config_node
-
-
-class global_system_config_analytics_node(GeneratedsSuper):
- """
- global_system_config_analytics_node class definition from :doc:`vnc_cfg.xsd`
- """
- subclass = None
- superclass = None
- def __init__(self, **kwargs):
- pass
- def factory(*args_, **kwargs_):
- if global_system_config_analytics_node.subclass:
- return global_system_config_analytics_node.subclass(*args_, **kwargs_)
- else:
- return global_system_config_analytics_node(*args_, **kwargs_)
- factory = staticmethod(factory)
- def __eq__(self, other): return True
- def __ne__(self, other): return False
- @classmethod
- def populate (cls, *a, **kwa):
- obj = cls (*a, **kwa)
- return obj
- def export(self, outfile, level=1, namespace_='', name_='global-system-config-analytics-node', namespacedef_='', pretty_print=True):
- if pretty_print:
- eol_ = '\n'
- else:
- eol_ = ''
- showIndent(outfile, level, pretty_print)
- outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
- already_processed = []
- self.exportAttributes(outfile, level, already_processed, namespace_, name_='global-system-config-analytics-node')
- if self.hasContent_():
- outfile.write('>%s' % (eol_, ))
- self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print)
- outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
- else:
- outfile.write('/>%s' % (eol_, ))
- def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='global-system-config-analytics-node'):
- pass
- def exportChildren(self, outfile, level, namespace_='', name_='global-system-config-analytics-node', fromsubclass_=False, pretty_print=True):
- pass
- def hasContent_(self):
- if (
-
- ):
- return True
- else:
- return False
- def exportLiteral(self, outfile, level, name_='global-system-config-analytics-node'):
- level += 1
- self.exportLiteralAttributes(outfile, level, [], name_)
- if self.hasContent_():
- self.exportLiteralChildren(outfile, level, name_)
- def exportLiteralAttributes(self, outfile, level, already_processed, name_):
- pass
- def exportLiteralChildren(self, outfile, level, name_):
- pass
- def exportDict(self, name_='global-system-config-analytics-node'):
- obj_json = json.dumps(self, default=lambda o: dict((k, v) for k, v in o.__dict__.iteritems()))
- obj_dict = json.loads(obj_json)
- return {name_: obj_dict}
- def build(self, node):
- self.buildAttributes(node, node.attrib, [])
- for child in node:
- nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
- self.buildChildren(child, node, nodeName_)
- def buildAttributes(self, node, attrs, already_processed):
- pass
- def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
- pass
-# end class global_system_config_analytics_node
-
-
-class global_system_config_database_node(GeneratedsSuper):
- """
- global_system_config_database_node class definition from :doc:`vnc_cfg.xsd`
- """
- subclass = None
- superclass = None
- def __init__(self, **kwargs):
- pass
- def factory(*args_, **kwargs_):
- if global_system_config_database_node.subclass:
- return global_system_config_database_node.subclass(*args_, **kwargs_)
- else:
- return global_system_config_database_node(*args_, **kwargs_)
- factory = staticmethod(factory)
- def __eq__(self, other): return True
- def __ne__(self, other): return False
- @classmethod
- def populate (cls, *a, **kwa):
- obj = cls (*a, **kwa)
- return obj
- def export(self, outfile, level=1, namespace_='', name_='global-system-config-database-node', namespacedef_='', pretty_print=True):
- if pretty_print:
- eol_ = '\n'
- else:
- eol_ = ''
- showIndent(outfile, level, pretty_print)
- outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
- already_processed = []
- self.exportAttributes(outfile, level, already_processed, namespace_, name_='global-system-config-database-node')
- if self.hasContent_():
- outfile.write('>%s' % (eol_, ))
- self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print)
- outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
- else:
- outfile.write('/>%s' % (eol_, ))
- def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='global-system-config-database-node'):
- pass
- def exportChildren(self, outfile, level, namespace_='', name_='global-system-config-database-node', fromsubclass_=False, pretty_print=True):
- pass
- def hasContent_(self):
- if (
-
- ):
- return True
- else:
- return False
- def exportLiteral(self, outfile, level, name_='global-system-config-database-node'):
- level += 1
- self.exportLiteralAttributes(outfile, level, [], name_)
- if self.hasContent_():
- self.exportLiteralChildren(outfile, level, name_)
- def exportLiteralAttributes(self, outfile, level, already_processed, name_):
- pass
- def exportLiteralChildren(self, outfile, level, name_):
- pass
- def exportDict(self, name_='global-system-config-database-node'):
- obj_json = json.dumps(self, default=lambda o: dict((k, v) for k, v in o.__dict__.iteritems()))
- obj_dict = json.loads(obj_json)
- return {name_: obj_dict}
- def build(self, node):
- self.buildAttributes(node, node.attrib, [])
- for child in node:
- nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
- self.buildChildren(child, node, nodeName_)
- def buildAttributes(self, node, attrs, already_processed):
- pass
- def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
- pass
-# end class global_system_config_database_node
-
-
-class KeyValuePair(GeneratedsSuper):
- """
- KeyValuePair class definition from :doc:`vnc_cfg.xsd`
- """
- subclass = None
- superclass = None
- def __init__(self, key=None, value=None, **kwargs):
- self.key = key
- self.value = value
- def factory(*args_, **kwargs_):
- if KeyValuePair.subclass:
- return KeyValuePair.subclass(*args_, **kwargs_)
- else:
- return KeyValuePair(*args_, **kwargs_)
- factory = staticmethod(factory)
- def get_key(self): return self.key
- def set_key(self, key): self.key = key
- def get_value(self): return self.value
- def set_value(self, value): self.value = value
- def __eq__(self, other):
- if isinstance(other, self.__class__):
- return (self.key == other.key and
- self.value == other.value)
- return NotImplemented
- def __ne__(self, other):
- if isinstance(other, self.__class__):
- return not self.__eq__(other)
- return NotImplemented
-
- @classmethod
- def populate (cls, *a, **kwa):
- obj = cls (*a, **kwa)
- obj.set_key (obj.populate_string ("key"))
- obj.set_value (obj.populate_string ("value"))
- return obj
- def export(self, outfile, level=1, namespace_='', name_='KeyValuePair', namespacedef_='', pretty_print=True):
- if pretty_print:
- eol_ = '\n'
- else:
- eol_ = ''
- showIndent(outfile, level, pretty_print)
- outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
- already_processed = []
- self.exportAttributes(outfile, level, already_processed, namespace_, name_='KeyValuePair')
- if self.hasContent_():
- outfile.write('>%s' % (eol_, ))
- self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print)
- showIndent(outfile, level, pretty_print)
- outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
- else:
- outfile.write('/>%s' % (eol_, ))
- def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='KeyValuePair'):
- pass
- def exportChildren(self, outfile, level, namespace_='', name_='KeyValuePair', fromsubclass_=False, pretty_print=True):
- if pretty_print:
- eol_ = '\n'
- else:
- eol_ = ''
- if self.key is not None:
- showIndent(outfile, level, pretty_print)
- outfile.write('<%skey>%s</%skey>%s' % (namespace_, self.gds_format_string(quote_xml(self.key).encode(ExternalEncoding), input_name='key'), namespace_, eol_))
- if self.value is not None:
- showIndent(outfile, level, pretty_print)
- outfile.write('<%svalue>%s</%svalue>%s' % (namespace_, self.gds_format_string(quote_xml(self.value).encode(ExternalEncoding), input_name='value'), namespace_, eol_))
- def hasContent_(self):
- if (
- self.key is not None or
- self.value is not None
- ):
- return True
- else:
- return False
- def exportLiteral(self, outfile, level, name_='KeyValuePair'):
- level += 1
- self.exportLiteralAttributes(outfile, level, [], name_)
- if self.hasContent_():
- self.exportLiteralChildren(outfile, level, name_)
- def exportLiteralAttributes(self, outfile, level, already_processed, name_):
- pass
- def exportLiteralChildren(self, outfile, level, name_):
- if self.key is not None:
- showIndent(outfile, level)
- outfile.write('key=%s,\n' % quote_python(self.key).encode(ExternalEncoding))
- if self.value is not None:
- showIndent(outfile, level)
- outfile.write('value=%s,\n' % quote_python(self.value).encode(ExternalEncoding))
- def exportDict(self, name_='KeyValuePair'):
- obj_json = json.dumps(self, default=lambda o: dict((k, v) for k, v in o.__dict__.iteritems()))
- obj_dict = json.loads(obj_json)
- return {name_: obj_dict}
- def build(self, node):
- self.buildAttributes(node, node.attrib, [])
- for child in node:
- nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
- self.buildChildren(child, node, nodeName_)
- def buildAttributes(self, node, attrs, already_processed):
- pass
- def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
- if nodeName_ == 'key':
- key_ = child_.text
- key_ = self.gds_validate_string(key_, node, 'key')
- self.key = key_
- elif nodeName_ == 'value':
- value_ = child_.text
- value_ = self.gds_validate_string(value_, node, 'value')
- self.value = value_
-# end class KeyValuePair
-
-
-class KeyValuePairs(GeneratedsSuper):
- """
- KeyValuePairs class definition from :doc:`vnc_cfg.xsd`
- """
- subclass = None
- superclass = None
- def __init__(self, key_value_pair=None, **kwargs):
- if (key_value_pair is None) or (key_value_pair == []):
- self.key_value_pair = []
- else:
- if isinstance(key_value_pair[0], dict):
- objs = [KeyValuePair(**elem) for elem in key_value_pair]
- self.key_value_pair = objs
- else:
- self.key_value_pair = key_value_pair
- def factory(*args_, **kwargs_):
- if KeyValuePairs.subclass:
- return KeyValuePairs.subclass(*args_, **kwargs_)
- else:
- return KeyValuePairs(*args_, **kwargs_)
- factory = staticmethod(factory)
- def get_key_value_pair(self): return self.key_value_pair
- def set_key_value_pair(self, key_value_pair): self.key_value_pair = key_value_pair
- def add_key_value_pair(self, value): self.key_value_pair.append(value)
- def insert_key_value_pair(self, index, value): self.key_value_pair[index] = value
- def delete_key_value_pair(self, value): self.key_value_pair.remove(value)
- def __eq__(self, other):
- if isinstance(other, self.__class__):
- return (self.key_value_pair == other.key_value_pair)
- return NotImplemented
- def __ne__(self, other):
- if isinstance(other, self.__class__):
- return not self.__eq__(other)
- return NotImplemented
-
- @classmethod
- def populate (cls, *a, **kwa):
- obj = cls (*a, **kwa)
- obj.set_key_value_pair ([KeyValuePair.populate ()])
- return obj
- def export(self, outfile, level=1, namespace_='', name_='KeyValuePairs', namespacedef_='', pretty_print=True):
- if pretty_print:
- eol_ = '\n'
- else:
- eol_ = ''
- showIndent(outfile, level, pretty_print)
- outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
- already_processed = []
- self.exportAttributes(outfile, level, already_processed, namespace_, name_='KeyValuePairs')
- if self.hasContent_():
- outfile.write('>%s' % (eol_, ))
- self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print)
- showIndent(outfile, level, pretty_print)
- outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
- else:
- outfile.write('/>%s' % (eol_, ))
- def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='KeyValuePairs'):
- pass
- def exportChildren(self, outfile, level, namespace_='', name_='KeyValuePairs', fromsubclass_=False, pretty_print=True):
- if pretty_print:
- eol_ = '\n'
- else:
- eol_ = ''
- for key_value_pair_ in self.key_value_pair:
- if isinstance(key_value_pair_, dict):
- key_value_pair_ = KeyValuePair(**key_value_pair_)
- key_value_pair_.export(outfile, level, namespace_, name_='key-value-pair', pretty_print=pretty_print)
- def hasContent_(self):
- if (
- self.key_value_pair
- ):
- return True
- else:
- return False
- def exportLiteral(self, outfile, level, name_='KeyValuePairs'):
- level += 1
- self.exportLiteralAttributes(outfile, level, [], name_)
- if self.hasContent_():
- self.exportLiteralChildren(outfile, level, name_)
- def exportLiteralAttributes(self, outfile, level, already_processed, name_):
- pass
- def exportLiteralChildren(self, outfile, level, name_):
- showIndent(outfile, level)
- outfile.write('key_value_pair=[\n')
- level += 1
- for key_value_pair_ in self.key_value_pair:
- showIndent(outfile, level)
- outfile.write('model_.KeyValuePair(\n')
- key_value_pair_.exportLiteral(outfile, level, name_='KeyValuePair')
- showIndent(outfile, level)
- outfile.write('),\n')
- level -= 1
- showIndent(outfile, level)
- outfile.write('],\n')
- def exportDict(self, name_='KeyValuePairs'):
- obj_json = json.dumps(self, default=lambda o: dict((k, v) for k, v in o.__dict__.iteritems()))
- obj_dict = json.loads(obj_json)
- return {name_: obj_dict}
- def build(self, node):
- self.buildAttributes(node, node.attrib, [])
- for child in node:
- nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
- self.buildChildren(child, node, nodeName_)
- def buildAttributes(self, node, attrs, already_processed):
- pass
- def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
- if nodeName_ == 'key-value-pair':
- obj_ = KeyValuePair.factory()
- obj_.build(child_)
- self.key_value_pair.append(obj_)
-# end class KeyValuePairs
-
-
-class global_system_config_service_appliance_set(GeneratedsSuper):
- """
- global_system_config_service_appliance_set class definition from :doc:`vnc_cfg.xsd`
- """
- subclass = None
- superclass = None
- def __init__(self, **kwargs):
- pass
- def factory(*args_, **kwargs_):
- if global_system_config_service_appliance_set.subclass:
- return global_system_config_service_appliance_set.subclass(*args_, **kwargs_)
- else:
- return global_system_config_service_appliance_set(*args_, **kwargs_)
- factory = staticmethod(factory)
- def __eq__(self, other): return True
- def __ne__(self, other): return False
- @classmethod
- def populate (cls, *a, **kwa):
- obj = cls (*a, **kwa)
- return obj
- def export(self, outfile, level=1, namespace_='', name_='global-system-config-service-appliance-set', namespacedef_='', pretty_print=True):
- if pretty_print:
- eol_ = '\n'
- else:
- eol_ = ''
- showIndent(outfile, level, pretty_print)
- outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
- already_processed = []
- self.exportAttributes(outfile, level, already_processed, namespace_, name_='global-system-config-service-appliance-set')
- if self.hasContent_():
- outfile.write('>%s' % (eol_, ))
- self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print)
- outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
- else:
- outfile.write('/>%s' % (eol_, ))
- def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='global-system-config-service-appliance-set'):
- pass
- def exportChildren(self, outfile, level, namespace_='', name_='global-system-config-service-appliance-set', fromsubclass_=False, pretty_print=True):
- pass
- def hasContent_(self):
- if (
-
- ):
- return True
- else:
- return False
- def exportLiteral(self, outfile, level, name_='global-system-config-service-appliance-set'):
- level += 1
- self.exportLiteralAttributes(outfile, level, [], name_)
- if self.hasContent_():
- self.exportLiteralChildren(outfile, level, name_)
- def exportLiteralAttributes(self, outfile, level, already_processed, name_):
- pass
- def exportLiteralChildren(self, outfile, level, name_):
- pass
- def exportDict(self, name_='global-system-config-service-appliance-set'):
- obj_json = json.dumps(self, default=lambda o: dict((k, v) for k, v in o.__dict__.iteritems()))
- obj_dict = json.loads(obj_json)
- return {name_: obj_dict}
- def build(self, node):
- self.buildAttributes(node, node.attrib, [])
- for child in node:
- nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
- self.buildChildren(child, node, nodeName_)
- def buildAttributes(self, node, attrs, already_processed):
- pass
- def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
- pass
-# end class global_system_config_service_appliance_set
-
-
-class service_appliance_set_service_appliance(GeneratedsSuper):
- """
- service_appliance_set_service_appliance class definition from :doc:`vnc_cfg.xsd`
- """
- subclass = None
- superclass = None
- def __init__(self, **kwargs):
- pass
- def factory(*args_, **kwargs_):
- if service_appliance_set_service_appliance.subclass:
- return service_appliance_set_service_appliance.subclass(*args_, **kwargs_)
- else:
- return service_appliance_set_service_appliance(*args_, **kwargs_)
- factory = staticmethod(factory)
- def __eq__(self, other): return True
- def __ne__(self, other): return False
- @classmethod
- def populate (cls, *a, **kwa):
- obj = cls (*a, **kwa)
- return obj
- def export(self, outfile, level=1, namespace_='', name_='service-appliance-set-service-appliance', namespacedef_='', pretty_print=True):
- if pretty_print:
- eol_ = '\n'
- else:
- eol_ = ''
- showIndent(outfile, level, pretty_print)
- outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
- already_processed = []
- self.exportAttributes(outfile, level, already_processed, namespace_, name_='service-appliance-set-service-appliance')
- if self.hasContent_():
- outfile.write('>%s' % (eol_, ))
- self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print)
- outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
- else:
- outfile.write('/>%s' % (eol_, ))
- def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='service-appliance-set-service-appliance'):
- pass
- def exportChildren(self, outfile, level, namespace_='', name_='service-appliance-set-service-appliance', fromsubclass_=False, pretty_print=True):
- pass
- def hasContent_(self):
- if (
-
- ):
- return True
- else:
- return False
- def exportLiteral(self, outfile, level, name_='service-appliance-set-service-appliance'):
- level += 1
- self.exportLiteralAttributes(outfile, level, [], name_)
- if self.hasContent_():
- self.exportLiteralChildren(outfile, level, name_)
- def exportLiteralAttributes(self, outfile, level, already_processed, name_):
- pass
- def exportLiteralChildren(self, outfile, level, name_):
- pass
- def exportDict(self, name_='service-appliance-set-service-appliance'):
- obj_json = json.dumps(self, default=lambda o: dict((k, v) for k, v in o.__dict__.iteritems()))
- obj_dict = json.loads(obj_json)
- return {name_: obj_dict}
- def build(self, node):
- self.buildAttributes(node, node.attrib, [])
- for child in node:
- nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
- self.buildChildren(child, node, nodeName_)
- def buildAttributes(self, node, attrs, already_processed):
- pass
- def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
- pass
-# end class service_appliance_set_service_appliance
-
-
-class project_loadbalancer_pool(GeneratedsSuper):
- """
- project_loadbalancer_pool class definition from :doc:`vnc_cfg.xsd`
- """
- subclass = None
- superclass = None
- def __init__(self, **kwargs):
- pass
- def factory(*args_, **kwargs_):
- if project_loadbalancer_pool.subclass:
- return project_loadbalancer_pool.subclass(*args_, **kwargs_)
- else:
- return project_loadbalancer_pool(*args_, **kwargs_)
- factory = staticmethod(factory)
- def __eq__(self, other): return True
- def __ne__(self, other): return False
- @classmethod
- def populate (cls, *a, **kwa):
- obj = cls (*a, **kwa)
- return obj
- def export(self, outfile, level=1, namespace_='', name_='project-loadbalancer-pool', namespacedef_='', pretty_print=True):
- if pretty_print:
- eol_ = '\n'
- else:
- eol_ = ''
- showIndent(outfile, level, pretty_print)
- outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
- already_processed = []
- self.exportAttributes(outfile, level, already_processed, namespace_, name_='project-loadbalancer-pool')
- if self.hasContent_():
- outfile.write('>%s' % (eol_, ))
- self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print)
- outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
- else:
- outfile.write('/>%s' % (eol_, ))
- def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='project-loadbalancer-pool'):
- pass
- def exportChildren(self, outfile, level, namespace_='', name_='project-loadbalancer-pool', fromsubclass_=False, pretty_print=True):
- pass
- def hasContent_(self):
- if (
-
- ):
- return True
- else:
- return False
- def exportLiteral(self, outfile, level, name_='project-loadbalancer-pool'):
- level += 1
- self.exportLiteralAttributes(outfile, level, [], name_)
- if self.hasContent_():
- self.exportLiteralChildren(outfile, level, name_)
- def exportLiteralAttributes(self, outfile, level, already_processed, name_):
- pass
- def exportLiteralChildren(self, outfile, level, name_):
- pass
- def exportDict(self, name_='project-loadbalancer-pool'):
- obj_json = json.dumps(self, default=lambda o: dict((k, v) for k, v in o.__dict__.iteritems()))
- obj_dict = json.loads(obj_json)
- return {name_: obj_dict}
- def build(self, node):
- self.buildAttributes(node, node.attrib, [])
- for child in node:
- nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
- self.buildChildren(child, node, nodeName_)
- def buildAttributes(self, node, attrs, already_processed):
- pass
- def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
- pass
-# end class project_loadbalancer_pool
-
-
-class loadbalancer_pool_service_instance(GeneratedsSuper):
- """
- loadbalancer_pool_service_instance class definition from :doc:`vnc_cfg.xsd`
- """
- subclass = None
- superclass = None
- def __init__(self, **kwargs):
- pass
- def factory(*args_, **kwargs_):
- if loadbalancer_pool_service_instance.subclass:
- return loadbalancer_pool_service_instance.subclass(*args_, **kwargs_)
- else:
- return loadbalancer_pool_service_instance(*args_, **kwargs_)
- factory = staticmethod(factory)
- def __eq__(self, other): return True
- def __ne__(self, other): return False
- @classmethod
- def populate (cls, *a, **kwa):
- obj = cls (*a, **kwa)
- return obj
- def export(self, outfile, level=1, namespace_='', name_='loadbalancer-pool-service-instance', namespacedef_='', pretty_print=True):
- if pretty_print:
- eol_ = '\n'
- else:
- eol_ = ''
- showIndent(outfile, level, pretty_print)
- outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
- already_processed = []
- self.exportAttributes(outfile, level, already_processed, namespace_, name_='loadbalancer-pool-service-instance')
- if self.hasContent_():
- outfile.write('>%s' % (eol_, ))
- self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print)
- outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
- else:
- outfile.write('/>%s' % (eol_, ))
- def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='loadbalancer-pool-service-instance'):
- pass
- def exportChildren(self, outfile, level, namespace_='', name_='loadbalancer-pool-service-instance', fromsubclass_=False, pretty_print=True):
- pass
- def hasContent_(self):
- if (
-
- ):
- return True
- else:
- return False
- def exportLiteral(self, outfile, level, name_='loadbalancer-pool-service-instance'):
- level += 1
- self.exportLiteralAttributes(outfile, level, [], name_)
- if self.hasContent_():
- self.exportLiteralChildren(outfile, level, name_)
- def exportLiteralAttributes(self, outfile, level, already_processed, name_):
- pass
- def exportLiteralChildren(self, outfile, level, name_):
- pass
- def exportDict(self, name_='loadbalancer-pool-service-instance'):
- obj_json = json.dumps(self, default=lambda o: dict((k, v) for k, v in o.__dict__.iteritems()))
- obj_dict = json.loads(obj_json)
- return {name_: obj_dict}
- def build(self, node):
- self.buildAttributes(node, node.attrib, [])
- for child in node:
- nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
- self.buildChildren(child, node, nodeName_)
- def buildAttributes(self, node, attrs, already_processed):
- pass
- def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
- pass
-# end class loadbalancer_pool_service_instance
-
-
-class loadbalancer_pool_virtual_machine_interface(GeneratedsSuper):
- """
- loadbalancer_pool_virtual_machine_interface class definition from :doc:`vnc_cfg.xsd`
- """
- subclass = None
- superclass = None
- def __init__(self, **kwargs):
- pass
- def factory(*args_, **kwargs_):
- if loadbalancer_pool_virtual_machine_interface.subclass:
- return loadbalancer_pool_virtual_machine_interface.subclass(*args_, **kwargs_)
- else:
- return loadbalancer_pool_virtual_machine_interface(*args_, **kwargs_)
- factory = staticmethod(factory)
- def __eq__(self, other): return True
- def __ne__(self, other): return False
- @classmethod
- def populate (cls, *a, **kwa):
- obj = cls (*a, **kwa)
- return obj
- def export(self, outfile, level=1, namespace_='', name_='loadbalancer-pool-virtual-machine-interface', namespacedef_='', pretty_print=True):
- if pretty_print:
- eol_ = '\n'
- else:
- eol_ = ''
- showIndent(outfile, level, pretty_print)
- outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
- already_processed = []
- self.exportAttributes(outfile, level, already_processed, namespace_, name_='loadbalancer-pool-virtual-machine-interface')
- if self.hasContent_():
- outfile.write('>%s' % (eol_, ))
- self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print)
- outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
- else:
- outfile.write('/>%s' % (eol_, ))
- def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='loadbalancer-pool-virtual-machine-interface'):
- pass
- def exportChildren(self, outfile, level, namespace_='', name_='loadbalancer-pool-virtual-machine-interface', fromsubclass_=False, pretty_print=True):
- pass
- def hasContent_(self):
- if (
-
- ):
- return True
- else:
- return False
- def exportLiteral(self, outfile, level, name_='loadbalancer-pool-virtual-machine-interface'):
- level += 1
- self.exportLiteralAttributes(outfile, level, [], name_)
- if self.hasContent_():
- self.exportLiteralChildren(outfile, level, name_)
- def exportLiteralAttributes(self, outfile, level, already_processed, name_):
- pass
- def exportLiteralChildren(self, outfile, level, name_):
- pass
- def exportDict(self, name_='loadbalancer-pool-virtual-machine-interface'):
- obj_json = json.dumps(self, default=lambda o: dict((k, v) for k, v in o.__dict__.iteritems()))
- obj_dict = json.loads(obj_json)
- return {name_: obj_dict}
- def build(self, node):
- self.buildAttributes(node, node.attrib, [])
- for child in node:
- nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
- self.buildChildren(child, node, nodeName_)
- def buildAttributes(self, node, attrs, already_processed):
- pass
- def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
- pass
-# end class loadbalancer_pool_virtual_machine_interface
-
-
-class LoadbalancerPoolType(GeneratedsSuper):
- """
- LoadbalancerPoolType class definition from :doc:`vnc_cfg.xsd`
- """
- subclass = None
- superclass = None
- def __init__(self, status=None, status_description=None, admin_state=True, protocol=None, loadbalancer_method=None, subnet_id=None, **kwargs):
- self.status = status
- self.status_description = status_description
- self.admin_state = admin_state
- self.protocol = protocol
- self.loadbalancer_method = loadbalancer_method
- self.subnet_id = subnet_id
- def factory(*args_, **kwargs_):
- if LoadbalancerPoolType.subclass:
- return LoadbalancerPoolType.subclass(*args_, **kwargs_)
- else:
- return LoadbalancerPoolType(*args_, **kwargs_)
- factory = staticmethod(factory)
- def get_status(self): return self.status
- def set_status(self, status): self.status = status
- def get_status_description(self): return self.status_description
- def set_status_description(self, status_description): self.status_description = status_description
- def get_admin_state(self): return self.admin_state
- def set_admin_state(self, admin_state): self.admin_state = admin_state
- def get_protocol(self): return self.protocol
- def set_protocol(self, protocol): self.protocol = protocol
- def validate_LoadbalancerProtocolType(self, value):
- # Validate type LoadbalancerProtocolType, a restriction on xsd:string.
- error = False
- if isinstance(value, list):
- error = set(value) - set([u'HTTP', u'HTTPS', u'TCP'])
- else:
- error = value not in [u'HTTP', u'HTTPS', u'TCP']
- if error:
- raise ValueError("LoadbalancerProtocolType must be one of [u'HTTP', u'HTTPS', u'TCP']")
- def get_loadbalancer_method(self): return self.loadbalancer_method
- def set_loadbalancer_method(self, loadbalancer_method): self.loadbalancer_method = loadbalancer_method
- def validate_LoadbalancerMethodType(self, value):
- # Validate type LoadbalancerMethodType, a restriction on xsd:string.
- error = False
- if isinstance(value, list):
- error = set(value) - set([u'ROUND_ROBIN', u'LEAST_CONNECTIONS', u'SOURCE_IP'])
- else:
- error = value not in [u'ROUND_ROBIN', u'LEAST_CONNECTIONS', u'SOURCE_IP']
- if error:
- raise ValueError("LoadbalancerMethodType must be one of [u'ROUND_ROBIN', u'LEAST_CONNECTIONS', u'SOURCE_IP']")
- def get_subnet_id(self): return self.subnet_id
- def set_subnet_id(self, subnet_id): self.subnet_id = subnet_id
- def validate_UuidStringType(self, value):
- # Validate type UuidStringType, a restriction on xsd:string.
- pass
- def __eq__(self, other):
- if isinstance(other, self.__class__):
- return (self.status == other.status and
- self.status_description == other.status_description and
- self.admin_state == other.admin_state and
- self.protocol == other.protocol and
- self.loadbalancer_method == other.loadbalancer_method and
- self.subnet_id == other.subnet_id)
- return NotImplemented
- def __ne__(self, other):
- if isinstance(other, self.__class__):
- return not self.__eq__(other)
- return NotImplemented
-
- @classmethod
- def populate (cls, *a, **kwa):
- obj = cls (*a, **kwa)
- obj.set_status (obj.populate_string ("status"))
- obj.set_status_description (obj.populate_string ("status_description"))
- obj.set_admin_state (obj.populate_boolean ("admin_state"))
- obj.set_protocol (obj.populate_string ("protocol"))
- obj.set_loadbalancer_method (obj.populate_string ("loadbalancer_method"))
- obj.set_subnet_id (obj.populate_string ("subnet_id"))
- return obj
- def export(self, outfile, level=1, namespace_='', name_='LoadbalancerPoolType', namespacedef_='', pretty_print=True):
- if pretty_print:
- eol_ = '\n'
- else:
- eol_ = ''
- showIndent(outfile, level, pretty_print)
- outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
- already_processed = []
- self.exportAttributes(outfile, level, already_processed, namespace_, name_='LoadbalancerPoolType')
- if self.hasContent_():
- outfile.write('>%s' % (eol_, ))
- self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print)
- showIndent(outfile, level, pretty_print)
- outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
- else:
- outfile.write('/>%s' % (eol_, ))
- def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='LoadbalancerPoolType'):
- pass
- def exportChildren(self, outfile, level, namespace_='', name_='LoadbalancerPoolType', fromsubclass_=False, pretty_print=True):
- if pretty_print:
- eol_ = '\n'
- else:
- eol_ = ''
- if self.status is not None:
- showIndent(outfile, level, pretty_print)
- outfile.write('<%sstatus>%s</%sstatus>%s' % (namespace_, self.gds_format_string(quote_xml(self.status).encode(ExternalEncoding), input_name='status'), namespace_, eol_))
- if self.status_description is not None:
- showIndent(outfile, level, pretty_print)
- outfile.write('<%sstatus-description>%s</%sstatus-description>%s' % (namespace_, self.gds_format_string(quote_xml(self.status_description).encode(ExternalEncoding), input_name='status-description'), namespace_, eol_))
- if self.admin_state is not None:
- showIndent(outfile, level, pretty_print)
- outfile.write('<%sadmin-state>%s</%sadmin-state>%s' % (namespace_, self.gds_format_boolean(self.gds_str_lower(str(self.admin_state)), input_name='admin-state'), namespace_, eol_))
- if self.protocol is not None:
- showIndent(outfile, level, pretty_print)
- outfile.write('<%sprotocol>%s</%sprotocol>%s' % (namespace_, self.gds_format_string(quote_xml(self.protocol).encode(ExternalEncoding), input_name='protocol'), namespace_, eol_))
- if self.loadbalancer_method is not None:
- showIndent(outfile, level, pretty_print)
- outfile.write('<%sloadbalancer-method>%s</%sloadbalancer-method>%s' % (namespace_, self.gds_format_string(quote_xml(self.loadbalancer_method).encode(ExternalEncoding), input_name='loadbalancer-method'), namespace_, eol_))
- if self.subnet_id is not None:
- showIndent(outfile, level, pretty_print)
- outfile.write('<%ssubnet-id>%s</%ssubnet-id>%s' % (namespace_, self.gds_format_string(quote_xml(self.subnet_id).encode(ExternalEncoding), input_name='subnet-id'), namespace_, eol_))
- def hasContent_(self):
- if (
- self.status is not None or
- self.status_description is not None or
- self.admin_state is not None or
- self.protocol is not None or
- self.loadbalancer_method is not None or
- self.subnet_id is not None
- ):
- return True
- else:
- return False
- def exportLiteral(self, outfile, level, name_='LoadbalancerPoolType'):
- level += 1
- self.exportLiteralAttributes(outfile, level, [], name_)
- if self.hasContent_():
- self.exportLiteralChildren(outfile, level, name_)
- def exportLiteralAttributes(self, outfile, level, already_processed, name_):
- pass
- def exportLiteralChildren(self, outfile, level, name_):
- if self.status is not None:
- showIndent(outfile, level)
- outfile.write('status=%s,\n' % quote_python(self.status).encode(ExternalEncoding))
- if self.status_description is not None:
- showIndent(outfile, level)
- outfile.write('status_description=%s,\n' % quote_python(self.status_description).encode(ExternalEncoding))
- if self.admin_state is not None:
- showIndent(outfile, level)
- outfile.write('admin_state=%s,\n' % self.admin_state)
- if self.protocol is not None:
- showIndent(outfile, level)
- outfile.write('protocol=%s,\n' % quote_python(self.protocol).encode(ExternalEncoding))
- if self.loadbalancer_method is not None:
- showIndent(outfile, level)
- outfile.write('loadbalancer_method=%s,\n' % quote_python(self.loadbalancer_method).encode(ExternalEncoding))
- if self.subnet_id is not None:
- showIndent(outfile, level)
- outfile.write('subnet_id=%s,\n' % quote_python(self.subnet_id).encode(ExternalEncoding))
- def exportDict(self, name_='LoadbalancerPoolType'):
- obj_json = json.dumps(self, default=lambda o: dict((k, v) for k, v in o.__dict__.iteritems()))
- obj_dict = json.loads(obj_json)
- return {name_: obj_dict}
- def build(self, node):
- self.buildAttributes(node, node.attrib, [])
- for child in node:
- nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
- self.buildChildren(child, node, nodeName_)
- def buildAttributes(self, node, attrs, already_processed):
- pass
- def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
- if nodeName_ == 'status':
- status_ = child_.text
- status_ = self.gds_validate_string(status_, node, 'status')
- self.status = status_
- elif nodeName_ == 'status-description':
- status_description_ = child_.text
- status_description_ = self.gds_validate_string(status_description_, node, 'status_description')
- self.status_description = status_description_
- elif nodeName_ == 'admin-state':
- sval_ = child_.text
- if sval_ in ('true', '1'):
- ival_ = True
- elif sval_ in ('false', '0'):
- ival_ = False
- else:
- raise_parse_error(child_, 'requires boolean')
- ival_ = self.gds_validate_boolean(ival_, node, 'admin_state')
- self.admin_state = ival_
- elif nodeName_ == 'protocol':
- protocol_ = child_.text
- protocol_ = self.gds_validate_string(protocol_, node, 'protocol')
- self.protocol = protocol_
- self.validate_LoadbalancerProtocolType(self.protocol) # validate type LoadbalancerProtocolType
- elif nodeName_ == 'loadbalancer-method':
- loadbalancer_method_ = child_.text
- loadbalancer_method_ = self.gds_validate_string(loadbalancer_method_, node, 'loadbalancer_method')
- self.loadbalancer_method = loadbalancer_method_
- self.validate_LoadbalancerMethodType(self.loadbalancer_method) # validate type LoadbalancerMethodType
- elif nodeName_ == 'subnet-id':
- subnet_id_ = child_.text
- subnet_id_ = self.gds_validate_string(subnet_id_, node, 'subnet_id')
- self.subnet_id = subnet_id_
- self.validate_UuidStringType(self.subnet_id) # validate type UuidStringType
-# end class LoadbalancerPoolType
-
-
-class loadbalancer_pool_service_appliance_set(GeneratedsSuper):
- """
- loadbalancer_pool_service_appliance_set class definition from :doc:`vnc_cfg.xsd`
- """
- subclass = None
- superclass = None
- def __init__(self, **kwargs):
- pass
- def factory(*args_, **kwargs_):
- if loadbalancer_pool_service_appliance_set.subclass:
- return loadbalancer_pool_service_appliance_set.subclass(*args_, **kwargs_)
- else:
- return loadbalancer_pool_service_appliance_set(*args_, **kwargs_)
- factory = staticmethod(factory)
- def __eq__(self, other): return True
- def __ne__(self, other): return False
- @classmethod
- def populate (cls, *a, **kwa):
- obj = cls (*a, **kwa)
- return obj
- def export(self, outfile, level=1, namespace_='', name_='loadbalancer-pool-service-appliance-set', namespacedef_='', pretty_print=True):
- if pretty_print:
- eol_ = '\n'
- else:
- eol_ = ''
- showIndent(outfile, level, pretty_print)
- outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
- already_processed = []
- self.exportAttributes(outfile, level, already_processed, namespace_, name_='loadbalancer-pool-service-appliance-set')
- if self.hasContent_():
- outfile.write('>%s' % (eol_, ))
- self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print)
- outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
- else:
- outfile.write('/>%s' % (eol_, ))
- def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='loadbalancer-pool-service-appliance-set'):
- pass
- def exportChildren(self, outfile, level, namespace_='', name_='loadbalancer-pool-service-appliance-set', fromsubclass_=False, pretty_print=True):
- pass
- def hasContent_(self):
- if (
-
- ):
- return True
- else:
- return False
- def exportLiteral(self, outfile, level, name_='loadbalancer-pool-service-appliance-set'):
- level += 1
- self.exportLiteralAttributes(outfile, level, [], name_)
- if self.hasContent_():
- self.exportLiteralChildren(outfile, level, name_)
- def exportLiteralAttributes(self, outfile, level, already_processed, name_):
- pass
- def exportLiteralChildren(self, outfile, level, name_):
- pass
- def exportDict(self, name_='loadbalancer-pool-service-appliance-set'):
- obj_json = json.dumps(self, default=lambda o: dict((k, v) for k, v in o.__dict__.iteritems()))
- obj_dict = json.loads(obj_json)
- return {name_: obj_dict}
- def build(self, node):
- self.buildAttributes(node, node.attrib, [])
- for child in node:
- nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
- self.buildChildren(child, node, nodeName_)
- def buildAttributes(self, node, attrs, already_processed):
- pass
- def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
- pass
-# end class loadbalancer_pool_service_appliance_set
-
-
-class loadbalancer_pool_loadbalancer_member(GeneratedsSuper):
- """
- loadbalancer_pool_loadbalancer_member class definition from :doc:`vnc_cfg.xsd`
- """
- subclass = None
- superclass = None
- def __init__(self, **kwargs):
- pass
- def factory(*args_, **kwargs_):
- if loadbalancer_pool_loadbalancer_member.subclass:
- return loadbalancer_pool_loadbalancer_member.subclass(*args_, **kwargs_)
- else:
- return loadbalancer_pool_loadbalancer_member(*args_, **kwargs_)
- factory = staticmethod(factory)
- def __eq__(self, other): return True
- def __ne__(self, other): return False
- @classmethod
- def populate (cls, *a, **kwa):
- obj = cls (*a, **kwa)
- return obj
- def export(self, outfile, level=1, namespace_='', name_='loadbalancer-pool-loadbalancer-member', namespacedef_='', pretty_print=True):
- if pretty_print:
- eol_ = '\n'
- else:
- eol_ = ''
- showIndent(outfile, level, pretty_print)
- outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
- already_processed = []
- self.exportAttributes(outfile, level, already_processed, namespace_, name_='loadbalancer-pool-loadbalancer-member')
- if self.hasContent_():
- outfile.write('>%s' % (eol_, ))
- self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print)
- outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
- else:
- outfile.write('/>%s' % (eol_, ))
- def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='loadbalancer-pool-loadbalancer-member'):
- pass
- def exportChildren(self, outfile, level, namespace_='', name_='loadbalancer-pool-loadbalancer-member', fromsubclass_=False, pretty_print=True):
- pass
- def hasContent_(self):
- if (
-
- ):
- return True
- else:
- return False
- def exportLiteral(self, outfile, level, name_='loadbalancer-pool-loadbalancer-member'):
- level += 1
- self.exportLiteralAttributes(outfile, level, [], name_)
- if self.hasContent_():
- self.exportLiteralChildren(outfile, level, name_)
- def exportLiteralAttributes(self, outfile, level, already_processed, name_):
- pass
- def exportLiteralChildren(self, outfile, level, name_):
- pass
- def exportDict(self, name_='loadbalancer-pool-loadbalancer-member'):
- obj_json = json.dumps(self, default=lambda o: dict((k, v) for k, v in o.__dict__.iteritems()))
- obj_dict = json.loads(obj_json)
- return {name_: obj_dict}
- def build(self, node):
- self.buildAttributes(node, node.attrib, [])
- for child in node:
- nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
- self.buildChildren(child, node, nodeName_)
- def buildAttributes(self, node, attrs, already_processed):
- pass
- def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
- pass
-# end class loadbalancer_pool_loadbalancer_member
-
-
-class LoadbalancerMemberType(GeneratedsSuper):
- """
- LoadbalancerMemberType class definition from :doc:`vnc_cfg.xsd`
- """
- subclass = None
- superclass = None
- def __init__(self, admin_state=True, status=None, status_description=None, protocol_port=None, weight=None, address=None, **kwargs):
- self.admin_state = admin_state
- self.status = status
- self.status_description = status_description
- self.protocol_port = protocol_port
- self.weight = weight
- self.address = address
- def factory(*args_, **kwargs_):
- if LoadbalancerMemberType.subclass:
- return LoadbalancerMemberType.subclass(*args_, **kwargs_)
- else:
- return LoadbalancerMemberType(*args_, **kwargs_)
- factory = staticmethod(factory)
- def get_admin_state(self): return self.admin_state
- def set_admin_state(self, admin_state): self.admin_state = admin_state
- def get_status(self): return self.status
- def set_status(self, status): self.status = status
- def get_status_description(self): return self.status_description
- def set_status_description(self, status_description): self.status_description = status_description
- def get_protocol_port(self): return self.protocol_port
- def set_protocol_port(self, protocol_port): self.protocol_port = protocol_port
- def get_weight(self): return self.weight
- def set_weight(self, weight): self.weight = weight
- def get_address(self): return self.address
- def set_address(self, address): self.address = address
- def validate_IpAddressType(self, value):
- # Validate type IpAddressType, a restriction on xsd:string.
- pass
- def __eq__(self, other):
- if isinstance(other, self.__class__):
- return (self.admin_state == other.admin_state and
- self.status == other.status and
- self.status_description == other.status_description and
- self.protocol_port == other.protocol_port and
- self.weight == other.weight and
- self.address == other.address)
- return NotImplemented
- def __ne__(self, other):
- if isinstance(other, self.__class__):
- return not self.__eq__(other)
- return NotImplemented
-
- @classmethod
- def populate (cls, *a, **kwa):
- obj = cls (*a, **kwa)
- obj.set_admin_state (obj.populate_boolean ("admin_state"))
- obj.set_status (obj.populate_string ("status"))
- obj.set_status_description (obj.populate_string ("status_description"))
- obj.set_protocol_port (obj.populate_integer ("protocol_port"))
- obj.set_weight (obj.populate_integer ("weight"))
- obj.set_address (obj.populate_string ("address"))
- return obj
- def export(self, outfile, level=1, namespace_='', name_='LoadbalancerMemberType', namespacedef_='', pretty_print=True):
- if pretty_print:
- eol_ = '\n'
- else:
- eol_ = ''
- showIndent(outfile, level, pretty_print)
- outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
- already_processed = []
- self.exportAttributes(outfile, level, already_processed, namespace_, name_='LoadbalancerMemberType')
- if self.hasContent_():
- outfile.write('>%s' % (eol_, ))
- self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print)
- showIndent(outfile, level, pretty_print)
- outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
- else:
- outfile.write('/>%s' % (eol_, ))
- def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='LoadbalancerMemberType'):
- pass
- def exportChildren(self, outfile, level, namespace_='', name_='LoadbalancerMemberType', fromsubclass_=False, pretty_print=True):
- if pretty_print:
- eol_ = '\n'
- else:
- eol_ = ''
- if self.admin_state is not None:
- showIndent(outfile, level, pretty_print)
- outfile.write('<%sadmin-state>%s</%sadmin-state>%s' % (namespace_, self.gds_format_boolean(self.gds_str_lower(str(self.admin_state)), input_name='admin-state'), namespace_, eol_))
- if self.status is not None:
- showIndent(outfile, level, pretty_print)
- outfile.write('<%sstatus>%s</%sstatus>%s' % (namespace_, self.gds_format_string(quote_xml(self.status).encode(ExternalEncoding), input_name='status'), namespace_, eol_))
- if self.status_description is not None:
- showIndent(outfile, level, pretty_print)
- outfile.write('<%sstatus-description>%s</%sstatus-description>%s' % (namespace_, self.gds_format_string(quote_xml(self.status_description).encode(ExternalEncoding), input_name='status-description'), namespace_, eol_))
- if self.protocol_port is not None:
- showIndent(outfile, level, pretty_print)
- outfile.write('<%sprotocol-port>%s</%sprotocol-port>%s' % (namespace_, self.gds_format_integer(self.protocol_port, input_name='protocol-port'), namespace_, eol_))
- if self.weight is not None:
- showIndent(outfile, level, pretty_print)
- outfile.write('<%sweight>%s</%sweight>%s' % (namespace_, self.gds_format_integer(self.weight, input_name='weight'), namespace_, eol_))
- if self.address is not None:
- showIndent(outfile, level, pretty_print)
- outfile.write('<%saddress>%s</%saddress>%s' % (namespace_, self.gds_format_string(quote_xml(self.address).encode(ExternalEncoding), input_name='address'), namespace_, eol_))
- def hasContent_(self):
- if (
- self.admin_state is not None or
- self.status is not None or
- self.status_description is not None or
- self.protocol_port is not None or
- self.weight is not None or
- self.address is not None
- ):
- return True
- else:
- return False
- def exportLiteral(self, outfile, level, name_='LoadbalancerMemberType'):
- level += 1
- self.exportLiteralAttributes(outfile, level, [], name_)
- if self.hasContent_():
- self.exportLiteralChildren(outfile, level, name_)
- def exportLiteralAttributes(self, outfile, level, already_processed, name_):
- pass
- def exportLiteralChildren(self, outfile, level, name_):
- if self.admin_state is not None:
- showIndent(outfile, level)
- outfile.write('admin_state=%s,\n' % self.admin_state)
- if self.status is not None:
- showIndent(outfile, level)
- outfile.write('status=%s,\n' % quote_python(self.status).encode(ExternalEncoding))
- if self.status_description is not None:
- showIndent(outfile, level)
- outfile.write('status_description=%s,\n' % quote_python(self.status_description).encode(ExternalEncoding))
- if self.protocol_port is not None:
- showIndent(outfile, level)
- outfile.write('protocol_port=%d,\n' % self.protocol_port)
- if self.weight is not None:
- showIndent(outfile, level)
- outfile.write('weight=%d,\n' % self.weight)
- if self.address is not None:
- showIndent(outfile, level)
- outfile.write('address=%s,\n' % quote_python(self.address).encode(ExternalEncoding))
- def exportDict(self, name_='LoadbalancerMemberType'):
- obj_json = json.dumps(self, default=lambda o: dict((k, v) for k, v in o.__dict__.iteritems()))
- obj_dict = json.loads(obj_json)
- return {name_: obj_dict}
- def build(self, node):
- self.buildAttributes(node, node.attrib, [])
- for child in node:
- nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
- self.buildChildren(child, node, nodeName_)
- def buildAttributes(self, node, attrs, already_processed):
- pass
- def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
- if nodeName_ == 'admin-state':
- sval_ = child_.text
- if sval_ in ('true', '1'):
- ival_ = True
- elif sval_ in ('false', '0'):
- ival_ = False
- else:
- raise_parse_error(child_, 'requires boolean')
- ival_ = self.gds_validate_boolean(ival_, node, 'admin_state')
- self.admin_state = ival_
- elif nodeName_ == 'status':
- status_ = child_.text
- status_ = self.gds_validate_string(status_, node, 'status')
- self.status = status_
- elif nodeName_ == 'status-description':
- status_description_ = child_.text
- status_description_ = self.gds_validate_string(status_description_, node, 'status_description')
- self.status_description = status_description_
- elif nodeName_ == 'protocol-port':
- sval_ = child_.text
- try:
- ival_ = int(sval_)
- except (TypeError, ValueError), exp:
- raise_parse_error(child_, 'requires integer: %s' % exp)
- ival_ = self.gds_validate_integer(ival_, node, 'protocol_port')
- self.protocol_port = ival_
- elif nodeName_ == 'weight':
- sval_ = child_.text
- try:
- ival_ = int(sval_)
- except (TypeError, ValueError), exp:
- raise_parse_error(child_, 'requires integer: %s' % exp)
- ival_ = self.gds_validate_integer(ival_, node, 'weight')
- self.weight = ival_
- elif nodeName_ == 'address':
- address_ = child_.text
- address_ = self.gds_validate_string(address_, node, 'address')
- self.address = address_
- self.validate_IpAddressType(self.address) # validate type IpAddressType
-# end class LoadbalancerMemberType
-
-
-class project_loadbalancer_healthmonitor(GeneratedsSuper):
- """
- project_loadbalancer_healthmonitor class definition from :doc:`vnc_cfg.xsd`
- """
- subclass = None
- superclass = None
- def __init__(self, **kwargs):
- pass
- def factory(*args_, **kwargs_):
- if project_loadbalancer_healthmonitor.subclass:
- return project_loadbalancer_healthmonitor.subclass(*args_, **kwargs_)
- else:
- return project_loadbalancer_healthmonitor(*args_, **kwargs_)
- factory = staticmethod(factory)
- def __eq__(self, other): return True
- def __ne__(self, other): return False
- @classmethod
- def populate (cls, *a, **kwa):
- obj = cls (*a, **kwa)
- return obj
- def export(self, outfile, level=1, namespace_='', name_='project-loadbalancer-healthmonitor', namespacedef_='', pretty_print=True):
- if pretty_print:
- eol_ = '\n'
- else:
- eol_ = ''
- showIndent(outfile, level, pretty_print)
- outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
- already_processed = []
- self.exportAttributes(outfile, level, already_processed, namespace_, name_='project-loadbalancer-healthmonitor')
- if self.hasContent_():
- outfile.write('>%s' % (eol_, ))
- self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print)
- outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
- else:
- outfile.write('/>%s' % (eol_, ))
- def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='project-loadbalancer-healthmonitor'):
- pass
- def exportChildren(self, outfile, level, namespace_='', name_='project-loadbalancer-healthmonitor', fromsubclass_=False, pretty_print=True):
- pass
- def hasContent_(self):
- if (
-
- ):
- return True
- else:
- return False
- def exportLiteral(self, outfile, level, name_='project-loadbalancer-healthmonitor'):
- level += 1
- self.exportLiteralAttributes(outfile, level, [], name_)
- if self.hasContent_():
- self.exportLiteralChildren(outfile, level, name_)
- def exportLiteralAttributes(self, outfile, level, already_processed, name_):
- pass
- def exportLiteralChildren(self, outfile, level, name_):
- pass
- def exportDict(self, name_='project-loadbalancer-healthmonitor'):
- obj_json = json.dumps(self, default=lambda o: dict((k, v) for k, v in o.__dict__.iteritems()))
- obj_dict = json.loads(obj_json)
- return {name_: obj_dict}
- def build(self, node):
- self.buildAttributes(node, node.attrib, [])
- for child in node:
- nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
- self.buildChildren(child, node, nodeName_)
- def buildAttributes(self, node, attrs, already_processed):
- pass
- def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
- pass
-# end class project_loadbalancer_healthmonitor
-
-
-class loadbalancer_pool_loadbalancer_healthmonitor(GeneratedsSuper):
- """
- loadbalancer_pool_loadbalancer_healthmonitor class definition from :doc:`vnc_cfg.xsd`
- """
- subclass = None
- superclass = None
- def __init__(self, **kwargs):
- pass
- def factory(*args_, **kwargs_):
- if loadbalancer_pool_loadbalancer_healthmonitor.subclass:
- return loadbalancer_pool_loadbalancer_healthmonitor.subclass(*args_, **kwargs_)
- else:
- return loadbalancer_pool_loadbalancer_healthmonitor(*args_, **kwargs_)
- factory = staticmethod(factory)
- def __eq__(self, other): return True
- def __ne__(self, other): return False
- @classmethod
- def populate (cls, *a, **kwa):
- obj = cls (*a, **kwa)
- return obj
- def export(self, outfile, level=1, namespace_='', name_='loadbalancer-pool-loadbalancer-healthmonitor', namespacedef_='', pretty_print=True):
- if pretty_print:
- eol_ = '\n'
- else:
- eol_ = ''
- showIndent(outfile, level, pretty_print)
- outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
- already_processed = []
- self.exportAttributes(outfile, level, already_processed, namespace_, name_='loadbalancer-pool-loadbalancer-healthmonitor')
- if self.hasContent_():
- outfile.write('>%s' % (eol_, ))
- self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print)
- outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
- else:
- outfile.write('/>%s' % (eol_, ))
- def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='loadbalancer-pool-loadbalancer-healthmonitor'):
- pass
- def exportChildren(self, outfile, level, namespace_='', name_='loadbalancer-pool-loadbalancer-healthmonitor', fromsubclass_=False, pretty_print=True):
- pass
- def hasContent_(self):
- if (
-
- ):
- return True
- else:
- return False
- def exportLiteral(self, outfile, level, name_='loadbalancer-pool-loadbalancer-healthmonitor'):
- level += 1
- self.exportLiteralAttributes(outfile, level, [], name_)
- if self.hasContent_():
- self.exportLiteralChildren(outfile, level, name_)
- def exportLiteralAttributes(self, outfile, level, already_processed, name_):
- pass
- def exportLiteralChildren(self, outfile, level, name_):
- pass
- def exportDict(self, name_='loadbalancer-pool-loadbalancer-healthmonitor'):
- obj_json = json.dumps(self, default=lambda o: dict((k, v) for k, v in o.__dict__.iteritems()))
- obj_dict = json.loads(obj_json)
- return {name_: obj_dict}
- def build(self, node):
- self.buildAttributes(node, node.attrib, [])
- for child in node:
- nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
- self.buildChildren(child, node, nodeName_)
- def buildAttributes(self, node, attrs, already_processed):
- pass
- def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
- pass
-# end class loadbalancer_pool_loadbalancer_healthmonitor
-
-
-class LoadbalancerHealthmonitorType(GeneratedsSuper):
- """
- LoadbalancerHealthmonitorType class definition from :doc:`vnc_cfg.xsd`
- """
- subclass = None
- superclass = None
- def __init__(self, admin_state=True, monitor_type=None, delay=None, timeout=None, max_retries=None, http_method=None, url_path=None, expected_codes=None, **kwargs):
- self.admin_state = admin_state
- self.monitor_type = monitor_type
- self.delay = delay
- self.timeout = timeout
- self.max_retries = max_retries
- self.http_method = http_method
- self.url_path = url_path
- self.expected_codes = expected_codes
- def factory(*args_, **kwargs_):
- if LoadbalancerHealthmonitorType.subclass:
- return LoadbalancerHealthmonitorType.subclass(*args_, **kwargs_)
- else:
- return LoadbalancerHealthmonitorType(*args_, **kwargs_)
- factory = staticmethod(factory)
- def get_admin_state(self): return self.admin_state
- def set_admin_state(self, admin_state): self.admin_state = admin_state
- def get_monitor_type(self): return self.monitor_type
- def set_monitor_type(self, monitor_type): self.monitor_type = monitor_type
- def validate_HealthmonitorType(self, value):
- # Validate type HealthmonitorType, a restriction on xsd:string.
- error = False
- if isinstance(value, list):
- error = set(value) - set([u'PING', u'TCP', u'HTTP', u'HTTPS'])
- else:
- error = value not in [u'PING', u'TCP', u'HTTP', u'HTTPS']
- if error:
- raise ValueError("HealthmonitorType must be one of [u'PING', u'TCP', u'HTTP', u'HTTPS']")
- def get_delay(self): return self.delay
- def set_delay(self, delay): self.delay = delay
- def get_timeout(self): return self.timeout
- def set_timeout(self, timeout): self.timeout = timeout
- def get_max_retries(self): return self.max_retries
- def set_max_retries(self, max_retries): self.max_retries = max_retries
- def get_http_method(self): return self.http_method
- def set_http_method(self, http_method): self.http_method = http_method
- def get_url_path(self): return self.url_path
- def set_url_path(self, url_path): self.url_path = url_path
- def get_expected_codes(self): return self.expected_codes
- def set_expected_codes(self, expected_codes): self.expected_codes = expected_codes
- def __eq__(self, other):
- if isinstance(other, self.__class__):
- return (self.admin_state == other.admin_state and
- self.monitor_type == other.monitor_type and
- self.delay == other.delay and
- self.timeout == other.timeout and
- self.max_retries == other.max_retries and
- self.http_method == other.http_method and
- self.url_path == other.url_path and
- self.expected_codes == other.expected_codes)
- return NotImplemented
- def __ne__(self, other):
- if isinstance(other, self.__class__):
- return not self.__eq__(other)
- return NotImplemented
-
- @classmethod
- def populate (cls, *a, **kwa):
- obj = cls (*a, **kwa)
- obj.set_admin_state (obj.populate_boolean ("admin_state"))
- obj.set_monitor_type (obj.populate_string ("monitor_type"))
- obj.set_delay (obj.populate_integer ("delay"))
- obj.set_timeout (obj.populate_integer ("timeout"))
- obj.set_max_retries (obj.populate_integer ("max_retries"))
- obj.set_http_method (obj.populate_string ("http_method"))
- obj.set_url_path (obj.populate_string ("url_path"))
- obj.set_expected_codes (obj.populate_string ("expected_codes"))
- return obj
- def export(self, outfile, level=1, namespace_='', name_='LoadbalancerHealthmonitorType', namespacedef_='', pretty_print=True):
- if pretty_print:
- eol_ = '\n'
- else:
- eol_ = ''
- showIndent(outfile, level, pretty_print)
- outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
- already_processed = []
- self.exportAttributes(outfile, level, already_processed, namespace_, name_='LoadbalancerHealthmonitorType')
- if self.hasContent_():
- outfile.write('>%s' % (eol_, ))
- self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print)
- showIndent(outfile, level, pretty_print)
- outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
- else:
- outfile.write('/>%s' % (eol_, ))
- def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='LoadbalancerHealthmonitorType'):
- pass
- def exportChildren(self, outfile, level, namespace_='', name_='LoadbalancerHealthmonitorType', fromsubclass_=False, pretty_print=True):
- if pretty_print:
- eol_ = '\n'
- else:
- eol_ = ''
- if self.admin_state is not None:
- showIndent(outfile, level, pretty_print)
- outfile.write('<%sadmin-state>%s</%sadmin-state>%s' % (namespace_, self.gds_format_boolean(self.gds_str_lower(str(self.admin_state)), input_name='admin-state'), namespace_, eol_))
- if self.monitor_type is not None:
- showIndent(outfile, level, pretty_print)
- outfile.write('<%smonitor-type>%s</%smonitor-type>%s' % (namespace_, self.gds_format_string(quote_xml(self.monitor_type).encode(ExternalEncoding), input_name='monitor-type'), namespace_, eol_))
- if self.delay is not None:
- showIndent(outfile, level, pretty_print)
- outfile.write('<%sdelay>%s</%sdelay>%s' % (namespace_, self.gds_format_integer(self.delay, input_name='delay'), namespace_, eol_))
- if self.timeout is not None:
- showIndent(outfile, level, pretty_print)
- outfile.write('<%stimeout>%s</%stimeout>%s' % (namespace_, self.gds_format_integer(self.timeout, input_name='timeout'), namespace_, eol_))
- if self.max_retries is not None:
- showIndent(outfile, level, pretty_print)
- outfile.write('<%smax-retries>%s</%smax-retries>%s' % (namespace_, self.gds_format_integer(self.max_retries, input_name='max-retries'), namespace_, eol_))
- if self.http_method is not None:
- showIndent(outfile, level, pretty_print)
- outfile.write('<%shttp-method>%s</%shttp-method>%s' % (namespace_, self.gds_format_string(quote_xml(self.http_method).encode(ExternalEncoding), input_name='http-method'), namespace_, eol_))
- if self.url_path is not None:
- showIndent(outfile, level, pretty_print)
- outfile.write('<%surl-path>%s</%surl-path>%s' % (namespace_, self.gds_format_string(quote_xml(self.url_path).encode(ExternalEncoding), input_name='url-path'), namespace_, eol_))
- if self.expected_codes is not None:
- showIndent(outfile, level, pretty_print)
- outfile.write('<%sexpected-codes>%s</%sexpected-codes>%s' % (namespace_, self.gds_format_string(quote_xml(self.expected_codes).encode(ExternalEncoding), input_name='expected-codes'), namespace_, eol_))
- def hasContent_(self):
- if (
- self.admin_state is not None or
- self.monitor_type is not None or
- self.delay is not None or
- self.timeout is not None or
- self.max_retries is not None or
- self.http_method is not None or
- self.url_path is not None or
- self.expected_codes is not None
- ):
- return True
- else:
- return False
- def exportLiteral(self, outfile, level, name_='LoadbalancerHealthmonitorType'):
- level += 1
- self.exportLiteralAttributes(outfile, level, [], name_)
- if self.hasContent_():
- self.exportLiteralChildren(outfile, level, name_)
- def exportLiteralAttributes(self, outfile, level, already_processed, name_):
- pass
- def exportLiteralChildren(self, outfile, level, name_):
- if self.admin_state is not None:
- showIndent(outfile, level)
- outfile.write('admin_state=%s,\n' % self.admin_state)
- if self.monitor_type is not None:
- showIndent(outfile, level)
- outfile.write('monitor_type=%s,\n' % quote_python(self.monitor_type).encode(ExternalEncoding))
- if self.delay is not None:
- showIndent(outfile, level)
- outfile.write('delay=%d,\n' % self.delay)
- if self.timeout is not None:
- showIndent(outfile, level)
- outfile.write('timeout=%d,\n' % self.timeout)
- if self.max_retries is not None:
- showIndent(outfile, level)
- outfile.write('max_retries=%d,\n' % self.max_retries)
- if self.http_method is not None:
- showIndent(outfile, level)
- outfile.write('http_method=%s,\n' % quote_python(self.http_method).encode(ExternalEncoding))
- if self.url_path is not None:
- showIndent(outfile, level)
- outfile.write('url_path=%s,\n' % quote_python(self.url_path).encode(ExternalEncoding))
- if self.expected_codes is not None:
- showIndent(outfile, level)
- outfile.write('expected_codes=%s,\n' % quote_python(self.expected_codes).encode(ExternalEncoding))
- def exportDict(self, name_='LoadbalancerHealthmonitorType'):
- obj_json = json.dumps(self, default=lambda o: dict((k, v) for k, v in o.__dict__.iteritems()))
- obj_dict = json.loads(obj_json)
- return {name_: obj_dict}
- def build(self, node):
- self.buildAttributes(node, node.attrib, [])
- for child in node:
- nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
- self.buildChildren(child, node, nodeName_)
- def buildAttributes(self, node, attrs, already_processed):
- pass
- def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
- if nodeName_ == 'admin-state':
- sval_ = child_.text
- if sval_ in ('true', '1'):
- ival_ = True
- elif sval_ in ('false', '0'):
- ival_ = False
- else:
- raise_parse_error(child_, 'requires boolean')
- ival_ = self.gds_validate_boolean(ival_, node, 'admin_state')
- self.admin_state = ival_
- elif nodeName_ == 'monitor-type':
- monitor_type_ = child_.text
- monitor_type_ = self.gds_validate_string(monitor_type_, node, 'monitor_type')
- self.monitor_type = monitor_type_
- self.validate_HealthmonitorType(self.monitor_type) # validate type HealthmonitorType
- elif nodeName_ == 'delay':
- sval_ = child_.text
- try:
- ival_ = int(sval_)
- except (TypeError, ValueError), exp:
- raise_parse_error(child_, 'requires integer: %s' % exp)
- ival_ = self.gds_validate_integer(ival_, node, 'delay')
- self.delay = ival_
- elif nodeName_ == 'timeout':
- sval_ = child_.text
- try:
- ival_ = int(sval_)
- except (TypeError, ValueError), exp:
- raise_parse_error(child_, 'requires integer: %s' % exp)
- ival_ = self.gds_validate_integer(ival_, node, 'timeout')
- self.timeout = ival_
- elif nodeName_ == 'max-retries':
- sval_ = child_.text
- try:
- ival_ = int(sval_)
- except (TypeError, ValueError), exp:
- raise_parse_error(child_, 'requires integer: %s' % exp)
- ival_ = self.gds_validate_integer(ival_, node, 'max_retries')
- self.max_retries = ival_
- elif nodeName_ == 'http-method':
- http_method_ = child_.text
- http_method_ = self.gds_validate_string(http_method_, node, 'http_method')
- self.http_method = http_method_
- elif nodeName_ == 'url-path':
- url_path_ = child_.text
- url_path_ = self.gds_validate_string(url_path_, node, 'url_path')
- self.url_path = url_path_
- elif nodeName_ == 'expected-codes':
- expected_codes_ = child_.text
- expected_codes_ = self.gds_validate_string(expected_codes_, node, 'expected_codes')
- self.expected_codes = expected_codes_
-# end class LoadbalancerHealthmonitorType
-
-
-class project_virtual_ip(GeneratedsSuper):
- """
- project_virtual_ip class definition from :doc:`vnc_cfg.xsd`
- """
- subclass = None
- superclass = None
- def __init__(self, **kwargs):
- pass
- def factory(*args_, **kwargs_):
- if project_virtual_ip.subclass:
- return project_virtual_ip.subclass(*args_, **kwargs_)
- else:
- return project_virtual_ip(*args_, **kwargs_)
- factory = staticmethod(factory)
- def __eq__(self, other): return True
- def __ne__(self, other): return False
- @classmethod
- def populate (cls, *a, **kwa):
- obj = cls (*a, **kwa)
- return obj
- def export(self, outfile, level=1, namespace_='', name_='project-virtual-ip', namespacedef_='', pretty_print=True):
- if pretty_print:
- eol_ = '\n'
- else:
- eol_ = ''
- showIndent(outfile, level, pretty_print)
- outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
- already_processed = []
- self.exportAttributes(outfile, level, already_processed, namespace_, name_='project-virtual-ip')
- if self.hasContent_():
- outfile.write('>%s' % (eol_, ))
- self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print)
- outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
- else:
- outfile.write('/>%s' % (eol_, ))
- def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='project-virtual-ip'):
- pass
- def exportChildren(self, outfile, level, namespace_='', name_='project-virtual-ip', fromsubclass_=False, pretty_print=True):
- pass
- def hasContent_(self):
- if (
-
- ):
- return True
- else:
- return False
- def exportLiteral(self, outfile, level, name_='project-virtual-ip'):
- level += 1
- self.exportLiteralAttributes(outfile, level, [], name_)
- if self.hasContent_():
- self.exportLiteralChildren(outfile, level, name_)
- def exportLiteralAttributes(self, outfile, level, already_processed, name_):
- pass
- def exportLiteralChildren(self, outfile, level, name_):
- pass
- def exportDict(self, name_='project-virtual-ip'):
- obj_json = json.dumps(self, default=lambda o: dict((k, v) for k, v in o.__dict__.iteritems()))
- obj_dict = json.loads(obj_json)
- return {name_: obj_dict}
- def build(self, node):
- self.buildAttributes(node, node.attrib, [])
- for child in node:
- nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
- self.buildChildren(child, node, nodeName_)
- def buildAttributes(self, node, attrs, already_processed):
- pass
- def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
- pass
-# end class project_virtual_ip
-
-
-class virtual_ip_loadbalancer_pool(GeneratedsSuper):
- """
- virtual_ip_loadbalancer_pool class definition from :doc:`vnc_cfg.xsd`
- """
- subclass = None
- superclass = None
- def __init__(self, **kwargs):
- pass
- def factory(*args_, **kwargs_):
- if virtual_ip_loadbalancer_pool.subclass:
- return virtual_ip_loadbalancer_pool.subclass(*args_, **kwargs_)
- else:
- return virtual_ip_loadbalancer_pool(*args_, **kwargs_)
- factory = staticmethod(factory)
- def __eq__(self, other): return True
- def __ne__(self, other): return False
- @classmethod
- def populate (cls, *a, **kwa):
- obj = cls (*a, **kwa)
- return obj
- def export(self, outfile, level=1, namespace_='', name_='virtual-ip-loadbalancer-pool', namespacedef_='', pretty_print=True):
- if pretty_print:
- eol_ = '\n'
- else:
- eol_ = ''
- showIndent(outfile, level, pretty_print)
- outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
- already_processed = []
- self.exportAttributes(outfile, level, already_processed, namespace_, name_='virtual-ip-loadbalancer-pool')
- if self.hasContent_():
- outfile.write('>%s' % (eol_, ))
- self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print)
- outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
- else:
- outfile.write('/>%s' % (eol_, ))
- def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='virtual-ip-loadbalancer-pool'):
- pass
- def exportChildren(self, outfile, level, namespace_='', name_='virtual-ip-loadbalancer-pool', fromsubclass_=False, pretty_print=True):
- pass
- def hasContent_(self):
- if (
-
- ):
- return True
- else:
- return False
- def exportLiteral(self, outfile, level, name_='virtual-ip-loadbalancer-pool'):
- level += 1
- self.exportLiteralAttributes(outfile, level, [], name_)
- if self.hasContent_():
- self.exportLiteralChildren(outfile, level, name_)
- def exportLiteralAttributes(self, outfile, level, already_processed, name_):
- pass
- def exportLiteralChildren(self, outfile, level, name_):
- pass
- def exportDict(self, name_='virtual-ip-loadbalancer-pool'):
- obj_json = json.dumps(self, default=lambda o: dict((k, v) for k, v in o.__dict__.iteritems()))
- obj_dict = json.loads(obj_json)
- return {name_: obj_dict}
- def build(self, node):
- self.buildAttributes(node, node.attrib, [])
- for child in node:
- nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
- self.buildChildren(child, node, nodeName_)
- def buildAttributes(self, node, attrs, already_processed):
- pass
- def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
- pass
-# end class virtual_ip_loadbalancer_pool
-
-
-class virtual_ip_virtual_machine_interface(GeneratedsSuper):
- """
- virtual_ip_virtual_machine_interface class definition from :doc:`vnc_cfg.xsd`
- """
- subclass = None
- superclass = None
- def __init__(self, **kwargs):
- pass
- def factory(*args_, **kwargs_):
- if virtual_ip_virtual_machine_interface.subclass:
- return virtual_ip_virtual_machine_interface.subclass(*args_, **kwargs_)
- else:
- return virtual_ip_virtual_machine_interface(*args_, **kwargs_)
- factory = staticmethod(factory)
- def __eq__(self, other): return True
- def __ne__(self, other): return False
- @classmethod
- def populate (cls, *a, **kwa):
- obj = cls (*a, **kwa)
- return obj
- def export(self, outfile, level=1, namespace_='', name_='virtual-ip-virtual-machine-interface', namespacedef_='', pretty_print=True):
- if pretty_print:
- eol_ = '\n'
- else:
- eol_ = ''
- showIndent(outfile, level, pretty_print)
- outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
- already_processed = []
- self.exportAttributes(outfile, level, already_processed, namespace_, name_='virtual-ip-virtual-machine-interface')
- if self.hasContent_():
- outfile.write('>%s' % (eol_, ))
- self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print)
- outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
- else:
- outfile.write('/>%s' % (eol_, ))
- def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='virtual-ip-virtual-machine-interface'):
- pass
- def exportChildren(self, outfile, level, namespace_='', name_='virtual-ip-virtual-machine-interface', fromsubclass_=False, pretty_print=True):
- pass
- def hasContent_(self):
- if (
-
- ):
- return True
- else:
- return False
- def exportLiteral(self, outfile, level, name_='virtual-ip-virtual-machine-interface'):
- level += 1
- self.exportLiteralAttributes(outfile, level, [], name_)
- if self.hasContent_():
- self.exportLiteralChildren(outfile, level, name_)
- def exportLiteralAttributes(self, outfile, level, already_processed, name_):
- pass
- def exportLiteralChildren(self, outfile, level, name_):
- pass
- def exportDict(self, name_='virtual-ip-virtual-machine-interface'):
- obj_json = json.dumps(self, default=lambda o: dict((k, v) for k, v in o.__dict__.iteritems()))
- obj_dict = json.loads(obj_json)
- return {name_: obj_dict}
- def build(self, node):
- self.buildAttributes(node, node.attrib, [])
- for child in node:
- nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
- self.buildChildren(child, node, nodeName_)
- def buildAttributes(self, node, attrs, already_processed):
- pass
- def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
- pass
-# end class virtual_ip_virtual_machine_interface
-
-
-class VirtualIpType(GeneratedsSuper):
- """
- VirtualIpType class definition from :doc:`vnc_cfg.xsd`
- """
- subclass = None
- superclass = None
- def __init__(self, address=None, status=None, status_description=None, admin_state=True, protocol=None, protocol_port=None, connection_limit=None, subnet_id=None, persistence_cookie_name=None, persistence_type=None, **kwargs):
- self.address = address
- self.status = status
- self.status_description = status_description
- self.admin_state = admin_state
- self.protocol = protocol
- self.protocol_port = protocol_port
- self.connection_limit = connection_limit
- self.subnet_id = subnet_id
- self.persistence_cookie_name = persistence_cookie_name
- self.persistence_type = persistence_type
- def factory(*args_, **kwargs_):
- if VirtualIpType.subclass:
- return VirtualIpType.subclass(*args_, **kwargs_)
- else:
- return VirtualIpType(*args_, **kwargs_)
- factory = staticmethod(factory)
- def get_address(self): return self.address
- def set_address(self, address): self.address = address
- def validate_IpAddressType(self, value):
- # Validate type IpAddressType, a restriction on xsd:string.
- pass
- def get_status(self): return self.status
- def set_status(self, status): self.status = status
- def get_status_description(self): return self.status_description
- def set_status_description(self, status_description): self.status_description = status_description
- def get_admin_state(self): return self.admin_state
- def set_admin_state(self, admin_state): self.admin_state = admin_state
- def get_protocol(self): return self.protocol
- def set_protocol(self, protocol): self.protocol = protocol
- def validate_LoadbalancerProtocolType(self, value):
- # Validate type LoadbalancerProtocolType, a restriction on xsd:string.
- error = False
- if isinstance(value, list):
- error = set(value) - set([u'HTTP', u'HTTPS', u'TCP'])
- else:
- error = value not in [u'HTTP', u'HTTPS', u'TCP']
- if error:
- raise ValueError("LoadbalancerProtocolType must be one of [u'HTTP', u'HTTPS', u'TCP']")
- def get_protocol_port(self): return self.protocol_port
- def set_protocol_port(self, protocol_port): self.protocol_port = protocol_port
- def get_connection_limit(self): return self.connection_limit
- def set_connection_limit(self, connection_limit): self.connection_limit = connection_limit
- def get_subnet_id(self): return self.subnet_id
- def set_subnet_id(self, subnet_id): self.subnet_id = subnet_id
- def validate_UuidStringType(self, value):
- # Validate type UuidStringType, a restriction on xsd:string.
- pass
- def get_persistence_cookie_name(self): return self.persistence_cookie_name
- def set_persistence_cookie_name(self, persistence_cookie_name): self.persistence_cookie_name = persistence_cookie_name
- def get_persistence_type(self): return self.persistence_type
- def set_persistence_type(self, persistence_type): self.persistence_type = persistence_type
- def validate_SessionPersistenceType(self, value):
- # Validate type SessionPersistenceType, a restriction on xsd:string.
- error = False
- if isinstance(value, list):
- error = set(value) - set([u'SOURCE_IP', u'HTTP_COOKIE', u'APP_COOKIE'])
- else:
- error = value not in [u'SOURCE_IP', u'HTTP_COOKIE', u'APP_COOKIE']
- if error:
- raise ValueError("SessionPersistenceType must be one of [u'SOURCE_IP', u'HTTP_COOKIE', u'APP_COOKIE']")
- def __eq__(self, other):
- if isinstance(other, self.__class__):
- return (self.address == other.address and
- self.status == other.status and
- self.status_description == other.status_description and
- self.admin_state == other.admin_state and
- self.protocol == other.protocol and
- self.protocol_port == other.protocol_port and
- self.connection_limit == other.connection_limit and
- self.subnet_id == other.subnet_id and
- self.persistence_cookie_name == other.persistence_cookie_name and
- self.persistence_type == other.persistence_type)
- return NotImplemented
- def __ne__(self, other):
- if isinstance(other, self.__class__):
- return not self.__eq__(other)
- return NotImplemented
-
- @classmethod
- def populate (cls, *a, **kwa):
- obj = cls (*a, **kwa)
- obj.set_address (obj.populate_string ("address"))
- obj.set_status (obj.populate_string ("status"))
- obj.set_status_description (obj.populate_string ("status_description"))
- obj.set_admin_state (obj.populate_boolean ("admin_state"))
- obj.set_protocol (obj.populate_string ("protocol"))
- obj.set_protocol_port (obj.populate_integer ("protocol_port"))
- obj.set_connection_limit (obj.populate_integer ("connection_limit"))
- obj.set_subnet_id (obj.populate_string ("subnet_id"))
- obj.set_persistence_cookie_name (obj.populate_string ("persistence_cookie_name"))
- obj.set_persistence_type (obj.populate_string ("persistence_type"))
- return obj
- def export(self, outfile, level=1, namespace_='', name_='VirtualIpType', namespacedef_='', pretty_print=True):
- if pretty_print:
- eol_ = '\n'
- else:
- eol_ = ''
- showIndent(outfile, level, pretty_print)
- outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
- already_processed = []
- self.exportAttributes(outfile, level, already_processed, namespace_, name_='VirtualIpType')
- if self.hasContent_():
- outfile.write('>%s' % (eol_, ))
- self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print)
- showIndent(outfile, level, pretty_print)
- outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
- else:
- outfile.write('/>%s' % (eol_, ))
- def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='VirtualIpType'):
- pass
- def exportChildren(self, outfile, level, namespace_='', name_='VirtualIpType', fromsubclass_=False, pretty_print=True):
- if pretty_print:
- eol_ = '\n'
- else:
- eol_ = ''
- if self.address is not None:
- showIndent(outfile, level, pretty_print)
- outfile.write('<%saddress>%s</%saddress>%s' % (namespace_, self.gds_format_string(quote_xml(self.address).encode(ExternalEncoding), input_name='address'), namespace_, eol_))
- if self.status is not None:
- showIndent(outfile, level, pretty_print)
- outfile.write('<%sstatus>%s</%sstatus>%s' % (namespace_, self.gds_format_string(quote_xml(self.status).encode(ExternalEncoding), input_name='status'), namespace_, eol_))
- if self.status_description is not None:
- showIndent(outfile, level, pretty_print)
- outfile.write('<%sstatus-description>%s</%sstatus-description>%s' % (namespace_, self.gds_format_string(quote_xml(self.status_description).encode(ExternalEncoding), input_name='status-description'), namespace_, eol_))
- if self.admin_state is not None:
- showIndent(outfile, level, pretty_print)
- outfile.write('<%sadmin-state>%s</%sadmin-state>%s' % (namespace_, self.gds_format_boolean(self.gds_str_lower(str(self.admin_state)), input_name='admin-state'), namespace_, eol_))
- if self.protocol is not None:
- showIndent(outfile, level, pretty_print)
- outfile.write('<%sprotocol>%s</%sprotocol>%s' % (namespace_, self.gds_format_string(quote_xml(self.protocol).encode(ExternalEncoding), input_name='protocol'), namespace_, eol_))
- if self.protocol_port is not None:
- showIndent(outfile, level, pretty_print)
- outfile.write('<%sprotocol-port>%s</%sprotocol-port>%s' % (namespace_, self.gds_format_integer(self.protocol_port, input_name='protocol-port'), namespace_, eol_))
- if self.connection_limit is not None:
- showIndent(outfile, level, pretty_print)
- outfile.write('<%sconnection-limit>%s</%sconnection-limit>%s' % (namespace_, self.gds_format_integer(self.connection_limit, input_name='connection-limit'), namespace_, eol_))
- if self.subnet_id is not None:
- showIndent(outfile, level, pretty_print)
- outfile.write('<%ssubnet-id>%s</%ssubnet-id>%s' % (namespace_, self.gds_format_string(quote_xml(self.subnet_id).encode(ExternalEncoding), input_name='subnet-id'), namespace_, eol_))
- if self.persistence_cookie_name is not None:
- showIndent(outfile, level, pretty_print)
- outfile.write('<%spersistence-cookie-name>%s</%spersistence-cookie-name>%s' % (namespace_, self.gds_format_string(quote_xml(self.persistence_cookie_name).encode(ExternalEncoding), input_name='persistence-cookie-name'), namespace_, eol_))
- if self.persistence_type is not None:
- showIndent(outfile, level, pretty_print)
- outfile.write('<%spersistence-type>%s</%spersistence-type>%s' % (namespace_, self.gds_format_string(quote_xml(self.persistence_type).encode(ExternalEncoding), input_name='persistence-type'), namespace_, eol_))
- def hasContent_(self):
- if (
- self.address is not None or
- self.status is not None or
- self.status_description is not None or
- self.admin_state is not None or
- self.protocol is not None or
- self.protocol_port is not None or
- self.connection_limit is not None or
- self.subnet_id is not None or
- self.persistence_cookie_name is not None or
- self.persistence_type is not None
- ):
- return True
- else:
- return False
- def exportLiteral(self, outfile, level, name_='VirtualIpType'):
- level += 1
- self.exportLiteralAttributes(outfile, level, [], name_)
- if self.hasContent_():
- self.exportLiteralChildren(outfile, level, name_)
- def exportLiteralAttributes(self, outfile, level, already_processed, name_):
- pass
- def exportLiteralChildren(self, outfile, level, name_):
- if self.address is not None:
- showIndent(outfile, level)
- outfile.write('address=%s,\n' % quote_python(self.address).encode(ExternalEncoding))
- if self.status is not None:
- showIndent(outfile, level)
- outfile.write('status=%s,\n' % quote_python(self.status).encode(ExternalEncoding))
- if self.status_description is not None:
- showIndent(outfile, level)
- outfile.write('status_description=%s,\n' % quote_python(self.status_description).encode(ExternalEncoding))
- if self.admin_state is not None:
- showIndent(outfile, level)
- outfile.write('admin_state=%s,\n' % self.admin_state)
- if self.protocol is not None:
- showIndent(outfile, level)
- outfile.write('protocol=%s,\n' % quote_python(self.protocol).encode(ExternalEncoding))
- if self.protocol_port is not None:
- showIndent(outfile, level)
- outfile.write('protocol_port=%d,\n' % self.protocol_port)
- if self.connection_limit is not None:
- showIndent(outfile, level)
- outfile.write('connection_limit=%d,\n' % self.connection_limit)
- if self.subnet_id is not None:
- showIndent(outfile, level)
- outfile.write('subnet_id=%s,\n' % quote_python(self.subnet_id).encode(ExternalEncoding))
- if self.persistence_cookie_name is not None:
- showIndent(outfile, level)
- outfile.write('persistence_cookie_name=%s,\n' % quote_python(self.persistence_cookie_name).encode(ExternalEncoding))
- if self.persistence_type is not None:
- showIndent(outfile, level)
- outfile.write('persistence_type=%s,\n' % quote_python(self.persistence_type).encode(ExternalEncoding))
- def exportDict(self, name_='VirtualIpType'):
- obj_json = json.dumps(self, default=lambda o: dict((k, v) for k, v in o.__dict__.iteritems()))
- obj_dict = json.loads(obj_json)
- return {name_: obj_dict}
- def build(self, node):
- self.buildAttributes(node, node.attrib, [])
- for child in node:
- nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
- self.buildChildren(child, node, nodeName_)
- def buildAttributes(self, node, attrs, already_processed):
- pass
- def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
- if nodeName_ == 'address':
- address_ = child_.text
- address_ = self.gds_validate_string(address_, node, 'address')
- self.address = address_
- self.validate_IpAddressType(self.address) # validate type IpAddressType
- elif nodeName_ == 'status':
- status_ = child_.text
- status_ = self.gds_validate_string(status_, node, 'status')
- self.status = status_
- elif nodeName_ == 'status-description':
- status_description_ = child_.text
- status_description_ = self.gds_validate_string(status_description_, node, 'status_description')
- self.status_description = status_description_
- elif nodeName_ == 'admin-state':
- sval_ = child_.text
- if sval_ in ('true', '1'):
- ival_ = True
- elif sval_ in ('false', '0'):
- ival_ = False
- else:
- raise_parse_error(child_, 'requires boolean')
- ival_ = self.gds_validate_boolean(ival_, node, 'admin_state')
- self.admin_state = ival_
- elif nodeName_ == 'protocol':
- protocol_ = child_.text
- protocol_ = self.gds_validate_string(protocol_, node, 'protocol')
- self.protocol = protocol_
- self.validate_LoadbalancerProtocolType(self.protocol) # validate type LoadbalancerProtocolType
- elif nodeName_ == 'protocol-port':
- sval_ = child_.text
- try:
- ival_ = int(sval_)
- except (TypeError, ValueError), exp:
- raise_parse_error(child_, 'requires integer: %s' % exp)
- ival_ = self.gds_validate_integer(ival_, node, 'protocol_port')
- self.protocol_port = ival_
- elif nodeName_ == 'connection-limit':
- sval_ = child_.text
- try:
- ival_ = int(sval_)
- except (TypeError, ValueError), exp:
- raise_parse_error(child_, 'requires integer: %s' % exp)
- ival_ = self.gds_validate_integer(ival_, node, 'connection_limit')
- self.connection_limit = ival_
- elif nodeName_ == 'subnet-id':
- subnet_id_ = child_.text
- subnet_id_ = self.gds_validate_string(subnet_id_, node, 'subnet_id')
- self.subnet_id = subnet_id_
- self.validate_UuidStringType(self.subnet_id) # validate type UuidStringType
- elif nodeName_ == 'persistence-cookie-name':
- persistence_cookie_name_ = child_.text
- persistence_cookie_name_ = self.gds_validate_string(persistence_cookie_name_, node, 'persistence_cookie_name')
- self.persistence_cookie_name = persistence_cookie_name_
- elif nodeName_ == 'persistence-type':
- persistence_type_ = child_.text
- persistence_type_ = self.gds_validate_string(persistence_type_, node, 'persistence_type')
- self.persistence_type = persistence_type_
- self.validate_SessionPersistenceType(self.persistence_type) # validate type SessionPersistenceType
-# end class VirtualIpType
-
-
-class BgpRouterParams(GeneratedsSuper):
- """
- BgpRouterParams class definition from :doc:`vnc_cfg.xsd`
- """
- subclass = None
- superclass = None
- def __init__(self, vendor=None, autonomous_system=None, identifier=None, address=None, port=None, hold_time=90, address_families=None, auth_data=None, local_autonomous_system=None, **kwargs):
- self.vendor = vendor
- self.autonomous_system = autonomous_system
- self.identifier = identifier
- self.address = address
- self.port = port
- self.hold_time = hold_time
- if isinstance(address_families, dict):
- obj = AddressFamilies(**address_families)
- self.address_families = obj
- else:
- self.address_families = address_families
- if isinstance(auth_data, dict):
- obj = AuthenticationData(**auth_data)
- self.auth_data = obj
- else:
- self.auth_data = auth_data
- self.local_autonomous_system = local_autonomous_system
- def factory(*args_, **kwargs_):
- if BgpRouterParams.subclass:
- return BgpRouterParams.subclass(*args_, **kwargs_)
- else:
- return BgpRouterParams(*args_, **kwargs_)
- factory = staticmethod(factory)
- def get_vendor(self): return self.vendor
- def set_vendor(self, vendor): self.vendor = vendor
- def get_autonomous_system(self): return self.autonomous_system
- def set_autonomous_system(self, autonomous_system): self.autonomous_system = autonomous_system
- def get_identifier(self): return self.identifier
- def set_identifier(self, identifier): self.identifier = identifier
- def validate_IpAddress(self, value):
- # Validate type IpAddress, a restriction on xsd:string.
- pass
- def get_address(self): return self.address
- def set_address(self, address): self.address = address
- def get_port(self): return self.port
- def set_port(self, port): self.port = port
- def get_hold_time(self): return self.hold_time
- def set_hold_time(self, hold_time): self.hold_time = hold_time
- def validate_BgpHoldTime(self, value):
- # Validate type BgpHoldTime, a restriction on xsd:integer.
- error = False
- if isinstance(value, list):
- v_int = map(int, value)
- v1, v2 = min(v_int), max(v_int)
- else:
- v1, v2 = int(value), int(value)
- error = (1 > v1)
- error |= (v2 > 65535)
- if error:
- raise ValueError("BgpHoldTime must be in the range 1-65535")
- def get_address_families(self): return self.address_families
- def set_address_families(self, address_families): self.address_families = address_families
- def get_auth_data(self): return self.auth_data
- def set_auth_data(self, auth_data): self.auth_data = auth_data
- def get_local_autonomous_system(self): return self.local_autonomous_system
- def set_local_autonomous_system(self, local_autonomous_system): self.local_autonomous_system = local_autonomous_system
- def __eq__(self, other):
- if isinstance(other, self.__class__):
- return (self.vendor == other.vendor and
- self.autonomous_system == other.autonomous_system and
- self.identifier == other.identifier and
- self.address == other.address and
- self.port == other.port and
- self.hold_time == other.hold_time and
- self.address_families == other.address_families and
- self.auth_data == other.auth_data and
- self.local_autonomous_system == other.local_autonomous_system)
- return NotImplemented
- def __ne__(self, other):
- if isinstance(other, self.__class__):
- return not self.__eq__(other)
- return NotImplemented
-
- @classmethod
- def populate (cls, *a, **kwa):
- obj = cls (*a, **kwa)
- obj.set_vendor (obj.populate_string ("vendor"))
- obj.set_autonomous_system (obj.populate_integer ("autonomous_system"))
- obj.set_identifier (obj.populate_string ("identifier"))
- obj.set_address (obj.populate_string ("address"))
- obj.set_port (obj.populate_integer ("port"))
- obj.set_hold_time (obj.populate_integer ("hold_time"))
- obj.set_address_families (AddressFamilies.populate ())
- obj.set_auth_data (AuthenticationData.populate ())
- obj.set_local_autonomous_system (obj.populate_integer ("local_autonomous_system"))
- return obj
- def export(self, outfile, level=1, namespace_='', name_='BgpRouterParams', namespacedef_='', pretty_print=True):
- if pretty_print:
- eol_ = '\n'
- else:
- eol_ = ''
- showIndent(outfile, level, pretty_print)
- outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
- already_processed = []
- self.exportAttributes(outfile, level, already_processed, namespace_, name_='BgpRouterParams')
- if self.hasContent_():
- outfile.write('>%s' % (eol_, ))
- self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print)
- showIndent(outfile, level, pretty_print)
- outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
- else:
- outfile.write('/>%s' % (eol_, ))
- def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='BgpRouterParams'):
- pass
- def exportChildren(self, outfile, level, namespace_='', name_='BgpRouterParams', fromsubclass_=False, pretty_print=True):
- if pretty_print:
- eol_ = '\n'
- else:
- eol_ = ''
- if self.vendor is not None:
- showIndent(outfile, level, pretty_print)
- outfile.write('<%svendor>%s</%svendor>%s' % (namespace_, self.gds_format_string(quote_xml(self.vendor).encode(ExternalEncoding), input_name='vendor'), namespace_, eol_))
- if self.autonomous_system is not None:
- showIndent(outfile, level, pretty_print)
- outfile.write('<%sautonomous-system>%s</%sautonomous-system>%s' % (namespace_, self.gds_format_integer(self.autonomous_system, input_name='autonomous-system'), namespace_, eol_))
- if self.identifier is not None:
- showIndent(outfile, level, pretty_print)
- outfile.write('<%sidentifier>%s</%sidentifier>%s' % (namespace_, self.gds_format_string(quote_xml(self.identifier).encode(ExternalEncoding), input_name='identifier'), namespace_, eol_))
- if self.address is not None:
- showIndent(outfile, level, pretty_print)
- outfile.write('<%saddress>%s</%saddress>%s' % (namespace_, self.gds_format_string(quote_xml(self.address).encode(ExternalEncoding), input_name='address'), namespace_, eol_))
- if self.port is not None:
- showIndent(outfile, level, pretty_print)
- outfile.write('<%sport>%s</%sport>%s' % (namespace_, self.gds_format_integer(self.port, input_name='port'), namespace_, eol_))
- if self.hold_time is not None:
- showIndent(outfile, level, pretty_print)
- outfile.write('<%shold-time>%s</%shold-time>%s' % (namespace_, self.gds_format_integer(self.hold_time, input_name='hold-time'), namespace_, eol_))
- if self.address_families is not None:
- self.address_families.export(outfile, level, namespace_, name_='address-families', pretty_print=pretty_print)
- if self.auth_data is not None:
- self.auth_data.export(outfile, level, namespace_, name_='auth-data', pretty_print=pretty_print)
- if self.local_autonomous_system is not None:
- showIndent(outfile, level, pretty_print)
- outfile.write('<%slocal-autonomous-system>%s</%slocal-autonomous-system>%s' % (namespace_, self.gds_format_integer(self.local_autonomous_system, input_name='local-autonomous-system'), namespace_, eol_))
- def hasContent_(self):
- if (
- self.vendor is not None or
- self.autonomous_system is not None or
- self.identifier is not None or
- self.address is not None or
- self.port is not None or
- self.hold_time is not None or
- self.address_families is not None or
- self.auth_data is not None or
- self.local_autonomous_system is not None
- ):
- return True
- else:
- return False
- def exportLiteral(self, outfile, level, name_='BgpRouterParams'):
- level += 1
- self.exportLiteralAttributes(outfile, level, [], name_)
- if self.hasContent_():
- self.exportLiteralChildren(outfile, level, name_)
- def exportLiteralAttributes(self, outfile, level, already_processed, name_):
- pass
- def exportLiteralChildren(self, outfile, level, name_):
- if self.vendor is not None:
- showIndent(outfile, level)
- outfile.write('vendor=%s,\n' % quote_python(self.vendor).encode(ExternalEncoding))
- if self.autonomous_system is not None:
- showIndent(outfile, level)
- outfile.write('autonomous_system=%d,\n' % self.autonomous_system)
- if self.identifier is not None:
- showIndent(outfile, level)
- outfile.write('identifier=%s,\n' % quote_python(self.identifier).encode(ExternalEncoding))
- if self.address is not None:
- showIndent(outfile, level)
- outfile.write('address=%s,\n' % quote_python(self.address).encode(ExternalEncoding))
- if self.port is not None:
- showIndent(outfile, level)
- outfile.write('port=%d,\n' % self.port)
- if self.hold_time is not None:
- showIndent(outfile, level)
- outfile.write('hold_time=%d,\n' % self.hold_time)
- if self.address_families is not None:
- showIndent(outfile, level)
- outfile.write('address_families=model_.AddressFamilies(\n')
- self.address_families.exportLiteral(outfile, level, name_='address_families')
- showIndent(outfile, level)
- outfile.write('),\n')
- if self.auth_data is not None:
- showIndent(outfile, level)
- outfile.write('auth_data=model_.AuthenticationData(\n')
- self.auth_data.exportLiteral(outfile, level, name_='auth_data')
- showIndent(outfile, level)
- outfile.write('),\n')
- if self.local_autonomous_system is not None:
- showIndent(outfile, level)
- outfile.write('local_autonomous_system=%d,\n' % self.local_autonomous_system)
- def exportDict(self, name_='BgpRouterParams'):
- obj_json = json.dumps(self, default=lambda o: dict((k, v) for k, v in o.__dict__.iteritems()))
- obj_dict = json.loads(obj_json)
- return {name_: obj_dict}
- def build(self, node):
- self.buildAttributes(node, node.attrib, [])
- for child in node:
- nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
- self.buildChildren(child, node, nodeName_)
- def buildAttributes(self, node, attrs, already_processed):
- pass
- def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
- if nodeName_ == 'vendor':
- vendor_ = child_.text
- vendor_ = self.gds_validate_string(vendor_, node, 'vendor')
- self.vendor = vendor_
- elif nodeName_ == 'autonomous-system':
- sval_ = child_.text
- try:
- ival_ = int(sval_)
- except (TypeError, ValueError), exp:
- raise_parse_error(child_, 'requires integer: %s' % exp)
- ival_ = self.gds_validate_integer(ival_, node, 'autonomous_system')
- self.autonomous_system = ival_
- elif nodeName_ == 'identifier':
- identifier_ = child_.text
- identifier_ = self.gds_validate_string(identifier_, node, 'identifier')
- self.identifier = identifier_
- self.validate_IpAddress(self.identifier) # validate type IpAddress
- elif nodeName_ == 'address':
- address_ = child_.text
- address_ = self.gds_validate_string(address_, node, 'address')
- self.address = address_
- self.validate_IpAddress(self.address) # validate type IpAddress
- elif nodeName_ == 'port':
- sval_ = child_.text
- try:
- ival_ = int(sval_)
- except (TypeError, ValueError), exp:
- raise_parse_error(child_, 'requires integer: %s' % exp)
- ival_ = self.gds_validate_integer(ival_, node, 'port')
- self.port = ival_
- elif nodeName_ == 'hold-time':
- sval_ = child_.text
- try:
- ival_ = int(sval_)
- except (TypeError, ValueError), exp:
- raise_parse_error(child_, 'requires integer: %s' % exp)
- ival_ = self.gds_validate_integer(ival_, node, 'hold_time')
- self.hold_time = ival_
- self.validate_BgpHoldTime(self.hold_time) # validate type BgpHoldTime
- elif nodeName_ == 'address-families':
- obj_ = AddressFamilies.factory()
- obj_.build(child_)
- self.set_address_families(obj_)
- elif nodeName_ == 'auth-data':
- obj_ = AuthenticationData.factory()
- obj_.build(child_)
- self.set_auth_data(obj_)
- elif nodeName_ == 'local-autonomous-system':
- sval_ = child_.text
- try:
- ival_ = int(sval_)
- except (TypeError, ValueError), exp:
- raise_parse_error(child_, 'requires integer: %s' % exp)
- ival_ = self.gds_validate_integer(ival_, node, 'local_autonomous_system')
- self.local_autonomous_system = ival_
-# end class BgpRouterParams
-
-
-class instance_bgp_router(GeneratedsSuper):
- """
- instance_bgp_router class definition from :doc:`vnc_cfg.xsd`
- """
- subclass = None
- superclass = None
- def __init__(self, **kwargs):
- pass
- def factory(*args_, **kwargs_):
- if instance_bgp_router.subclass:
- return instance_bgp_router.subclass(*args_, **kwargs_)
- else:
- return instance_bgp_router(*args_, **kwargs_)
- factory = staticmethod(factory)
- def __eq__(self, other): return True
- def __ne__(self, other): return False
- @classmethod
- def populate (cls, *a, **kwa):
- obj = cls (*a, **kwa)
- return obj
- def export(self, outfile, level=1, namespace_='', name_='instance-bgp-router', namespacedef_='', pretty_print=True):
- if pretty_print:
- eol_ = '\n'
- else:
- eol_ = ''
- showIndent(outfile, level, pretty_print)
- outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
- already_processed = []
- self.exportAttributes(outfile, level, already_processed, namespace_, name_='instance-bgp-router')
- if self.hasContent_():
- outfile.write('>%s' % (eol_, ))
- self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print)
- outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
- else:
- outfile.write('/>%s' % (eol_, ))
- def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='instance-bgp-router'):
- pass
- def exportChildren(self, outfile, level, namespace_='', name_='instance-bgp-router', fromsubclass_=False, pretty_print=True):
- pass
- def hasContent_(self):
- if (
-
- ):
- return True
- else:
- return False
- def exportLiteral(self, outfile, level, name_='instance-bgp-router'):
- level += 1
- self.exportLiteralAttributes(outfile, level, [], name_)
- if self.hasContent_():
- self.exportLiteralChildren(outfile, level, name_)
- def exportLiteralAttributes(self, outfile, level, already_processed, name_):
- pass
- def exportLiteralChildren(self, outfile, level, name_):
- pass
- def exportDict(self, name_='instance-bgp-router'):
- obj_json = json.dumps(self, default=lambda o: dict((k, v) for k, v in o.__dict__.iteritems()))
- obj_dict = json.loads(obj_json)
- return {name_: obj_dict}
- def build(self, node):
- self.buildAttributes(node, node.attrib, [])
- for child in node:
- nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
- self.buildChildren(child, node, nodeName_)
- def buildAttributes(self, node, attrs, already_processed):
- pass
- def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
- pass
-# end class instance_bgp_router
-
-
-class BgpPeeringAttributes(GeneratedsSuper):
- """
- BgpPeeringAttributes class definition from :doc:`vnc_cfg.xsd`
- """
- subclass = None
- superclass = None
- def __init__(self, session=None, **kwargs):
- if (session is None) or (session == []):
- self.session = []
- else:
- if isinstance(session[0], dict):
- objs = [BgpSession(**elem) for elem in session]
- self.session = objs
- else:
- self.session = session
- def factory(*args_, **kwargs_):
- if BgpPeeringAttributes.subclass:
- return BgpPeeringAttributes.subclass(*args_, **kwargs_)
- else:
- return BgpPeeringAttributes(*args_, **kwargs_)
- factory = staticmethod(factory)
- def get_session(self): return self.session
- def set_session(self, session): self.session = session
- def add_session(self, value): self.session.append(value)
- def insert_session(self, index, value): self.session[index] = value
- def delete_session(self, value): self.session.remove(value)
- def __eq__(self, other):
- if isinstance(other, self.__class__):
- return (self.session == other.session)
- return NotImplemented
- def __ne__(self, other):
- if isinstance(other, self.__class__):
- return not self.__eq__(other)
- return NotImplemented
-
- @classmethod
- def populate (cls, *a, **kwa):
- obj = cls (*a, **kwa)
- obj.set_session ([BgpSession.populate ()])
- return obj
- def export(self, outfile, level=1, namespace_='', name_='BgpPeeringAttributes', namespacedef_='', pretty_print=True):
- if pretty_print:
- eol_ = '\n'
- else:
- eol_ = ''
- showIndent(outfile, level, pretty_print)
- outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
- already_processed = []
- self.exportAttributes(outfile, level, already_processed, namespace_, name_='BgpPeeringAttributes')
- if self.hasContent_():
- outfile.write('>%s' % (eol_, ))
- self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print)
- showIndent(outfile, level, pretty_print)
- outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
- else:
- outfile.write('/>%s' % (eol_, ))
- def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='BgpPeeringAttributes'):
- pass
- def exportChildren(self, outfile, level, namespace_='', name_='BgpPeeringAttributes', fromsubclass_=False, pretty_print=True):
- if pretty_print:
- eol_ = '\n'
- else:
- eol_ = ''
- for session_ in self.session:
- if isinstance(session_, dict):
- session_ = BgpSession(**session_)
- session_.export(outfile, level, namespace_, name_='session', pretty_print=pretty_print)
- def hasContent_(self):
- if (
- self.session
- ):
- return True
- else:
- return False
- def exportLiteral(self, outfile, level, name_='BgpPeeringAttributes'):
- level += 1
- self.exportLiteralAttributes(outfile, level, [], name_)
- if self.hasContent_():
- self.exportLiteralChildren(outfile, level, name_)
- def exportLiteralAttributes(self, outfile, level, already_processed, name_):
- pass
- def exportLiteralChildren(self, outfile, level, name_):
- showIndent(outfile, level)
- outfile.write('session=[\n')
- level += 1
- for session_ in self.session:
- showIndent(outfile, level)
- outfile.write('model_.BgpSession(\n')
- session_.exportLiteral(outfile, level, name_='BgpSession')
- showIndent(outfile, level)
- outfile.write('),\n')
- level -= 1
- showIndent(outfile, level)
- outfile.write('],\n')
- def exportDict(self, name_='BgpPeeringAttributes'):
- obj_json = json.dumps(self, default=lambda o: dict((k, v) for k, v in o.__dict__.iteritems()))
- obj_dict = json.loads(obj_json)
- return {name_: obj_dict}
- def build(self, node):
- self.buildAttributes(node, node.attrib, [])
- for child in node:
- nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
- self.buildChildren(child, node, nodeName_)
- def buildAttributes(self, node, attrs, already_processed):
- pass
- def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
- if nodeName_ == 'session':
- obj_ = BgpSession.factory()
- obj_.build(child_)
- self.session.append(obj_)
-# end class BgpPeeringAttributes
-
-
-class BgpSession(GeneratedsSuper):
- """
- BgpSession class definition from :doc:`vnc_cfg.xsd`
- """
- subclass = None
- superclass = None
- def __init__(self, uuid=None, attributes=None, **kwargs):
- self.uuid = uuid
- if (attributes is None) or (attributes == []):
- self.attributes = []
- else:
- if isinstance(attributes[0], dict):
- objs = [BgpSessionAttributes(**elem) for elem in attributes]
- self.attributes = objs
- else:
- self.attributes = attributes
- def factory(*args_, **kwargs_):
- if BgpSession.subclass:
- return BgpSession.subclass(*args_, **kwargs_)
- else:
- return BgpSession(*args_, **kwargs_)
- factory = staticmethod(factory)
- def get_uuid(self): return self.uuid
- def set_uuid(self, uuid): self.uuid = uuid
- def get_attributes(self): return self.attributes
- def set_attributes(self, attributes): self.attributes = attributes
- def add_attributes(self, value): self.attributes.append(value)
- def insert_attributes(self, index, value): self.attributes[index] = value
- def delete_attributes(self, value): self.attributes.remove(value)
- def __eq__(self, other):
- if isinstance(other, self.__class__):
- return (self.uuid == other.uuid and
- self.attributes == other.attributes)
- return NotImplemented
- def __ne__(self, other):
- if isinstance(other, self.__class__):
- return not self.__eq__(other)
- return NotImplemented
-
- @classmethod
- def populate (cls, *a, **kwa):
- obj = cls (*a, **kwa)
- obj.set_uuid (obj.populate_string ("uuid"))
- obj.set_attributes ([BgpSessionAttributes.populate ()])
- return obj
- def export(self, outfile, level=1, namespace_='', name_='BgpSession', namespacedef_='', pretty_print=True):
- if pretty_print:
- eol_ = '\n'
- else:
- eol_ = ''
- showIndent(outfile, level, pretty_print)
- outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
- already_processed = []
- self.exportAttributes(outfile, level, already_processed, namespace_, name_='BgpSession')
- if self.hasContent_():
- outfile.write('>%s' % (eol_, ))
- self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print)
- showIndent(outfile, level, pretty_print)
- outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
- else:
- outfile.write('/>%s' % (eol_, ))
- def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='BgpSession'):
- pass
- def exportChildren(self, outfile, level, namespace_='', name_='BgpSession', fromsubclass_=False, pretty_print=True):
- if pretty_print:
- eol_ = '\n'
- else:
- eol_ = ''
- if self.uuid is not None:
- showIndent(outfile, level, pretty_print)
- outfile.write('<%suuid>%s</%suuid>%s' % (namespace_, self.gds_format_string(quote_xml(self.uuid).encode(ExternalEncoding), input_name='uuid'), namespace_, eol_))
- for attributes_ in self.attributes:
- if isinstance(attributes_, dict):
- attributes_ = BgpSessionAttributes(**attributes_)
- attributes_.export(outfile, level, namespace_, name_='attributes', pretty_print=pretty_print)
- def hasContent_(self):
- if (
- self.uuid is not None or
- self.attributes
- ):
- return True
- else:
- return False
- def exportLiteral(self, outfile, level, name_='BgpSession'):
- level += 1
- self.exportLiteralAttributes(outfile, level, [], name_)
- if self.hasContent_():
- self.exportLiteralChildren(outfile, level, name_)
- def exportLiteralAttributes(self, outfile, level, already_processed, name_):
- pass
- def exportLiteralChildren(self, outfile, level, name_):
- if self.uuid is not None:
- showIndent(outfile, level)
- outfile.write('uuid=%s,\n' % quote_python(self.uuid).encode(ExternalEncoding))
- showIndent(outfile, level)
- outfile.write('attributes=[\n')
- level += 1
- for attributes_ in self.attributes:
- showIndent(outfile, level)
- outfile.write('model_.BgpSessionAttributes(\n')
- attributes_.exportLiteral(outfile, level, name_='BgpSessionAttributes')
- showIndent(outfile, level)
- outfile.write('),\n')
- level -= 1
- showIndent(outfile, level)
- outfile.write('],\n')
- def exportDict(self, name_='BgpSession'):
- obj_json = json.dumps(self, default=lambda o: dict((k, v) for k, v in o.__dict__.iteritems()))
- obj_dict = json.loads(obj_json)
- return {name_: obj_dict}
- def build(self, node):
- self.buildAttributes(node, node.attrib, [])
- for child in node:
- nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
- self.buildChildren(child, node, nodeName_)
- def buildAttributes(self, node, attrs, already_processed):
- pass
- def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
- if nodeName_ == 'uuid':
- uuid_ = child_.text
- uuid_ = self.gds_validate_string(uuid_, node, 'uuid')
- self.uuid = uuid_
- elif nodeName_ == 'attributes':
- obj_ = BgpSessionAttributes.factory()
- obj_.build(child_)
- self.attributes.append(obj_)
-# end class BgpSession
-
-
-class BgpSessionAttributes(GeneratedsSuper):
- """
- BgpSessionAttributes class definition from :doc:`vnc_cfg.xsd`
- """
- subclass = None
- superclass = None
- def __init__(self, bgp_router=None, address_families=None, auth_data=None, **kwargs):
- self.bgp_router = bgp_router
- if isinstance(address_families, dict):
- obj = AddressFamilies(**address_families)
- self.address_families = obj
- else:
- self.address_families = address_families
- if isinstance(auth_data, dict):
- obj = AuthenticationData(**auth_data)
- self.auth_data = obj
- else:
- self.auth_data = auth_data
- def factory(*args_, **kwargs_):
- if BgpSessionAttributes.subclass:
- return BgpSessionAttributes.subclass(*args_, **kwargs_)
- else:
- return BgpSessionAttributes(*args_, **kwargs_)
- factory = staticmethod(factory)
- def get_bgp_router(self): return self.bgp_router
- def set_bgp_router(self, bgp_router): self.bgp_router = bgp_router
- def get_address_families(self): return self.address_families
- def set_address_families(self, address_families): self.address_families = address_families
- def get_auth_data(self): return self.auth_data
- def set_auth_data(self, auth_data): self.auth_data = auth_data
- def __eq__(self, other):
- if isinstance(other, self.__class__):
- return (self.bgp_router == other.bgp_router and
- self.address_families == other.address_families and
- self.auth_data == other.auth_data)
- return NotImplemented
- def __ne__(self, other):
- if isinstance(other, self.__class__):
- return not self.__eq__(other)
- return NotImplemented
-
- @classmethod
- def populate (cls, *a, **kwa):
- obj = cls (*a, **kwa)
- obj.set_bgp_router (obj.populate_string ("bgp_router"))
- obj.set_address_families (AddressFamilies.populate ())
- obj.set_auth_data (AuthenticationData.populate ())
- return obj
- def export(self, outfile, level=1, namespace_='', name_='BgpSessionAttributes', namespacedef_='', pretty_print=True):
- if pretty_print:
- eol_ = '\n'
- else:
- eol_ = ''
- showIndent(outfile, level, pretty_print)
- outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
- already_processed = []
- self.exportAttributes(outfile, level, already_processed, namespace_, name_='BgpSessionAttributes')
- if self.hasContent_():
- outfile.write('>%s' % (eol_, ))
- self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print)
- showIndent(outfile, level, pretty_print)
- outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
- else:
- outfile.write('/>%s' % (eol_, ))
- def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='BgpSessionAttributes'):
- pass
- def exportChildren(self, outfile, level, namespace_='', name_='BgpSessionAttributes', fromsubclass_=False, pretty_print=True):
- if pretty_print:
- eol_ = '\n'
- else:
- eol_ = ''
- if self.bgp_router is not None:
- showIndent(outfile, level, pretty_print)
- outfile.write('<%sbgp-router>%s</%sbgp-router>%s' % (namespace_, self.gds_format_string(quote_xml(self.bgp_router).encode(ExternalEncoding), input_name='bgp-router'), namespace_, eol_))
- if self.address_families is not None:
- self.address_families.export(outfile, level, namespace_, name_='address-families', pretty_print=pretty_print)
- if self.auth_data is not None:
- self.auth_data.export(outfile, level, namespace_, name_='auth-data', pretty_print=pretty_print)
- def hasContent_(self):
- if (
- self.bgp_router is not None or
- self.address_families is not None or
- self.auth_data is not None
- ):
- return True
- else:
- return False
- def exportLiteral(self, outfile, level, name_='BgpSessionAttributes'):
- level += 1
- self.exportLiteralAttributes(outfile, level, [], name_)
- if self.hasContent_():
- self.exportLiteralChildren(outfile, level, name_)
- def exportLiteralAttributes(self, outfile, level, already_processed, name_):
- pass
- def exportLiteralChildren(self, outfile, level, name_):
- if self.bgp_router is not None:
- showIndent(outfile, level)
- outfile.write('bgp_router=%s,\n' % quote_python(self.bgp_router).encode(ExternalEncoding))
- if self.address_families is not None:
- showIndent(outfile, level)
- outfile.write('address_families=model_.AddressFamilies(\n')
- self.address_families.exportLiteral(outfile, level, name_='address_families')
- showIndent(outfile, level)
- outfile.write('),\n')
- if self.auth_data is not None:
- showIndent(outfile, level)
- outfile.write('auth_data=model_.AuthenticationData(\n')
- self.auth_data.exportLiteral(outfile, level, name_='auth_data')
- showIndent(outfile, level)
- outfile.write('),\n')
- def exportDict(self, name_='BgpSessionAttributes'):
- obj_json = json.dumps(self, default=lambda o: dict((k, v) for k, v in o.__dict__.iteritems()))
- obj_dict = json.loads(obj_json)
- return {name_: obj_dict}
- def build(self, node):
- self.buildAttributes(node, node.attrib, [])
- for child in node:
- nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
- self.buildChildren(child, node, nodeName_)
- def buildAttributes(self, node, attrs, already_processed):
- pass
- def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
- if nodeName_ == 'bgp-router':
- bgp_router_ = child_.text
- bgp_router_ = self.gds_validate_string(bgp_router_, node, 'bgp_router')
- self.bgp_router = bgp_router_
- elif nodeName_ == 'address-families':
- obj_ = AddressFamilies.factory()
- obj_.build(child_)
- self.set_address_families(obj_)
- elif nodeName_ == 'auth-data':
- obj_ = AuthenticationData.factory()
- obj_.build(child_)
- self.set_auth_data(obj_)
-# end class BgpSessionAttributes
-
-
-class AddressFamilies(GeneratedsSuper):
- """
- AddressFamilies class definition from :doc:`vnc_cfg.xsd`
- """
- subclass = None
- superclass = None
- def __init__(self, family=None, **kwargs):
- if (family is None) or (family == []):
- self.family = []
- else:
- self.family = family
- def factory(*args_, **kwargs_):
- if AddressFamilies.subclass:
- return AddressFamilies.subclass(*args_, **kwargs_)
- else:
- return AddressFamilies(*args_, **kwargs_)
- factory = staticmethod(factory)
- def get_family(self): return self.family
- def set_family(self, family): self.family = family
- def add_family(self, value): self.family.append(value)
- def insert_family(self, index, value): self.family[index] = value
- def delete_family(self, value): self.family.remove(value)
- def validate_AddressFamily(self, value):
- # Validate type AddressFamily, a restriction on xsd:string.
- error = False
- if isinstance(value, list):
- error = set(value) - set([u'inet', u'inet-vpn', u'e-vpn', u'erm-vpn', u'route-target', u'inet6-vpn'])
- else:
- error = value not in [u'inet', u'inet-vpn', u'e-vpn', u'erm-vpn', u'route-target', u'inet6-vpn']
- if error:
- raise ValueError("AddressFamily must be one of [u'inet', u'inet-vpn', u'e-vpn', u'erm-vpn', u'route-target', u'inet6-vpn']")
- def __eq__(self, other):
- if isinstance(other, self.__class__):
- return (self.family == other.family)
- return NotImplemented
- def __ne__(self, other):
- if isinstance(other, self.__class__):
- return not self.__eq__(other)
- return NotImplemented
-
- @classmethod
- def populate (cls, *a, **kwa):
- obj = cls (*a, **kwa)
- obj.set_family ([obj.populate_string ("family")])
- return obj
- def export(self, outfile, level=1, namespace_='', name_='AddressFamilies', namespacedef_='', pretty_print=True):
- if pretty_print:
- eol_ = '\n'
- else:
- eol_ = ''
- showIndent(outfile, level, pretty_print)
- outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
- already_processed = []
- self.exportAttributes(outfile, level, already_processed, namespace_, name_='AddressFamilies')
- if self.hasContent_():
- outfile.write('>%s' % (eol_, ))
- self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print)
- showIndent(outfile, level, pretty_print)
- outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
- else:
- outfile.write('/>%s' % (eol_, ))
- def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='AddressFamilies'):
- pass
- def exportChildren(self, outfile, level, namespace_='', name_='AddressFamilies', fromsubclass_=False, pretty_print=True):
- if pretty_print:
- eol_ = '\n'
- else:
- eol_ = ''
- for family_ in self.family:
- showIndent(outfile, level, pretty_print)
- outfile.write('<%sfamily>%s</%sfamily>%s' % (namespace_, self.gds_format_string(quote_xml(family_).encode(ExternalEncoding), input_name='family'), namespace_, eol_))
- def hasContent_(self):
- if (
- self.family
- ):
- return True
- else:
- return False
- def exportLiteral(self, outfile, level, name_='AddressFamilies'):
- level += 1
- self.exportLiteralAttributes(outfile, level, [], name_)
- if self.hasContent_():
- self.exportLiteralChildren(outfile, level, name_)
- def exportLiteralAttributes(self, outfile, level, already_processed, name_):
- pass
- def exportLiteralChildren(self, outfile, level, name_):
- showIndent(outfile, level)
- outfile.write('family=[\n')
- level += 1
- for family_ in self.family:
- showIndent(outfile, level)
- outfile.write('%s,\n' % quote_python(family_).encode(ExternalEncoding))
- level -= 1
- showIndent(outfile, level)
- outfile.write('],\n')
- def exportDict(self, name_='AddressFamilies'):
- obj_json = json.dumps(self, default=lambda o: dict((k, v) for k, v in o.__dict__.iteritems()))
- obj_dict = json.loads(obj_json)
- return {name_: obj_dict}
- def build(self, node):
- self.buildAttributes(node, node.attrib, [])
- for child in node:
- nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
- self.buildChildren(child, node, nodeName_)
- def buildAttributes(self, node, attrs, already_processed):
- pass
- def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
- if nodeName_ == 'family':
- family_ = child_.text
- family_ = self.gds_validate_string(family_, node, 'family')
- self.family.append(family_)
- self.validate_AddressFamily(self.family) # validate type AddressFamily
-# end class AddressFamilies
-
-
-class AuthenticationKeyItem(GeneratedsSuper):
- """
- AuthenticationKeyItem class definition from :doc:`vnc_cfg.xsd`
- """
- subclass = None
- superclass = None
- def __init__(self, key_id=None, key=None, **kwargs):
- self.key_id = key_id
- self.key = key
- def factory(*args_, **kwargs_):
- if AuthenticationKeyItem.subclass:
- return AuthenticationKeyItem.subclass(*args_, **kwargs_)
- else:
- return AuthenticationKeyItem(*args_, **kwargs_)
- factory = staticmethod(factory)
- def get_key_id(self): return self.key_id
- def set_key_id(self, key_id): self.key_id = key_id
- def validate_AuthenticationKeyId(self, value):
- # Validate type AuthenticationKeyId, a restriction on xsd:integer.
- error = False
- if isinstance(value, list):
- v_int = map(int, value)
- v1, v2 = min(v_int), max(v_int)
- else:
- v1, v2 = int(value), int(value)
- error = (0 > v1)
- error |= (v2 > 63)
- if error:
- raise ValueError("AuthenticationKeyId must be in the range 0-63")
- def get_key(self): return self.key
- def set_key(self, key): self.key = key
- def __eq__(self, other):
- if isinstance(other, self.__class__):
- return (self.key_id == other.key_id and
- self.key == other.key)
- return NotImplemented
- def __ne__(self, other):
- if isinstance(other, self.__class__):
- return not self.__eq__(other)
- return NotImplemented
-
- @classmethod
- def populate (cls, *a, **kwa):
- obj = cls (*a, **kwa)
- obj.set_key_id (obj.populate_integer ("key_id"))
- obj.set_key (obj.populate_string ("key"))
- return obj
- def export(self, outfile, level=1, namespace_='', name_='AuthenticationKeyItem', namespacedef_='', pretty_print=True):
- if pretty_print:
- eol_ = '\n'
- else:
- eol_ = ''
- showIndent(outfile, level, pretty_print)
- outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
- already_processed = []
- self.exportAttributes(outfile, level, already_processed, namespace_, name_='AuthenticationKeyItem')
- if self.hasContent_():
- outfile.write('>%s' % (eol_, ))
- self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print)
- showIndent(outfile, level, pretty_print)
- outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
- else:
- outfile.write('/>%s' % (eol_, ))
- def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='AuthenticationKeyItem'):
- pass
- def exportChildren(self, outfile, level, namespace_='', name_='AuthenticationKeyItem', fromsubclass_=False, pretty_print=True):
- if pretty_print:
- eol_ = '\n'
- else:
- eol_ = ''
- if self.key_id is not None:
- showIndent(outfile, level, pretty_print)
- outfile.write('<%skey-id>%s</%skey-id>%s' % (namespace_, self.gds_format_integer(self.key_id, input_name='key-id'), namespace_, eol_))
- if self.key is not None:
- showIndent(outfile, level, pretty_print)
- outfile.write('<%skey>%s</%skey>%s' % (namespace_, self.gds_format_string(quote_xml(self.key).encode(ExternalEncoding), input_name='key'), namespace_, eol_))
- def hasContent_(self):
- if (
- self.key_id is not None or
- self.key is not None
- ):
- return True
- else:
- return False
- def exportLiteral(self, outfile, level, name_='AuthenticationKeyItem'):
- level += 1
- self.exportLiteralAttributes(outfile, level, [], name_)
- if self.hasContent_():
- self.exportLiteralChildren(outfile, level, name_)
- def exportLiteralAttributes(self, outfile, level, already_processed, name_):
- pass
- def exportLiteralChildren(self, outfile, level, name_):
- if self.key_id is not None:
- showIndent(outfile, level)
- outfile.write('key_id=%d,\n' % self.key_id)
- if self.key is not None:
- showIndent(outfile, level)
- outfile.write('key=%s,\n' % quote_python(self.key).encode(ExternalEncoding))
- def exportDict(self, name_='AuthenticationKeyItem'):
- obj_json = json.dumps(self, default=lambda o: dict((k, v) for k, v in o.__dict__.iteritems()))
- obj_dict = json.loads(obj_json)
- return {name_: obj_dict}
- def build(self, node):
- self.buildAttributes(node, node.attrib, [])
- for child in node:
- nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
- self.buildChildren(child, node, nodeName_)
- def buildAttributes(self, node, attrs, already_processed):
- pass
- def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
- if nodeName_ == 'key-id':
- sval_ = child_.text
- try:
- ival_ = int(sval_)
- except (TypeError, ValueError), exp:
- raise_parse_error(child_, 'requires integer: %s' % exp)
- ival_ = self.gds_validate_integer(ival_, node, 'key_id')
- self.key_id = ival_
- self.validate_AuthenticationKeyId(self.key_id) # validate type AuthenticationKeyId
- elif nodeName_ == 'key':
- key_ = child_.text
- key_ = self.gds_validate_string(key_, node, 'key')
- self.key = key_
-# end class AuthenticationKeyItem
-
-
-class AuthenticationData(GeneratedsSuper):
- """
- AuthenticationData class definition from :doc:`vnc_cfg.xsd`
- """
- subclass = None
- superclass = None
- def __init__(self, key_type=None, key_items=None, **kwargs):
- self.key_type = key_type
- if (key_items is None) or (key_items == []):
- self.key_items = []
- else:
- if isinstance(key_items[0], dict):
- objs = [AuthenticationKeyItem(**elem) for elem in key_items]
- self.key_items = objs
- else:
- self.key_items = key_items
- def factory(*args_, **kwargs_):
- if AuthenticationData.subclass:
- return AuthenticationData.subclass(*args_, **kwargs_)
- else:
- return AuthenticationData(*args_, **kwargs_)
- factory = staticmethod(factory)
- def get_key_type(self): return self.key_type
- def set_key_type(self, key_type): self.key_type = key_type
- def validate_AuthenticationKeyType(self, value):
- # Validate type AuthenticationKeyType, a restriction on xsd:string.
- error = False
- if isinstance(value, list):
- error = set(value) - set([u'md5'])
- else:
- error = value not in [u'md5']
- if error:
- raise ValueError("AuthenticationKeyType must be one of [u'md5']")
- def get_key_items(self): return self.key_items
- def set_key_items(self, key_items): self.key_items = key_items
- def add_key_items(self, value): self.key_items.append(value)
- def insert_key_items(self, index, value): self.key_items[index] = value
- def delete_key_items(self, value): self.key_items.remove(value)
- def __eq__(self, other):
- if isinstance(other, self.__class__):
- return (self.key_type == other.key_type and
- self.key_items == other.key_items)
- return NotImplemented
- def __ne__(self, other):
- if isinstance(other, self.__class__):
- return not self.__eq__(other)
- return NotImplemented
-
- @classmethod
- def populate (cls, *a, **kwa):
- obj = cls (*a, **kwa)
- obj.set_key_type (obj.populate_string ("key_type"))
- obj.set_key_items ([AuthenticationKeyItem.populate ()])
- return obj
- def export(self, outfile, level=1, namespace_='', name_='AuthenticationData', namespacedef_='', pretty_print=True):
- if pretty_print:
- eol_ = '\n'
- else:
- eol_ = ''
- showIndent(outfile, level, pretty_print)
- outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
- already_processed = []
- self.exportAttributes(outfile, level, already_processed, namespace_, name_='AuthenticationData')
- if self.hasContent_():
- outfile.write('>%s' % (eol_, ))
- self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print)
- showIndent(outfile, level, pretty_print)
- outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
- else:
- outfile.write('/>%s' % (eol_, ))
- def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='AuthenticationData'):
- pass
- def exportChildren(self, outfile, level, namespace_='', name_='AuthenticationData', fromsubclass_=False, pretty_print=True):
- if pretty_print:
- eol_ = '\n'
- else:
- eol_ = ''
- if self.key_type is not None:
- showIndent(outfile, level, pretty_print)
- outfile.write('<%skey-type>%s</%skey-type>%s' % (namespace_, self.gds_format_string(quote_xml(self.key_type).encode(ExternalEncoding), input_name='key-type'), namespace_, eol_))
- for key_items_ in self.key_items:
- if isinstance(key_items_, dict):
- key_items_ = AuthenticationKeyItem(**key_items_)
- key_items_.export(outfile, level, namespace_, name_='key-items', pretty_print=pretty_print)
- def hasContent_(self):
- if (
- self.key_type is not None or
- self.key_items
- ):
- return True
- else:
- return False
- def exportLiteral(self, outfile, level, name_='AuthenticationData'):
- level += 1
- self.exportLiteralAttributes(outfile, level, [], name_)
- if self.hasContent_():
- self.exportLiteralChildren(outfile, level, name_)
- def exportLiteralAttributes(self, outfile, level, already_processed, name_):
- pass
- def exportLiteralChildren(self, outfile, level, name_):
- if self.key_type is not None:
- showIndent(outfile, level)
- outfile.write('key_type=%s,\n' % quote_python(self.key_type).encode(ExternalEncoding))
- showIndent(outfile, level)
- outfile.write('key_items=[\n')
- level += 1
- for key_items_ in self.key_items:
- showIndent(outfile, level)
- outfile.write('model_.AuthenticationKeyItem(\n')
- key_items_.exportLiteral(outfile, level, name_='AuthenticationKeyItem')
- showIndent(outfile, level)
- outfile.write('),\n')
- level -= 1
- showIndent(outfile, level)
- outfile.write('],\n')
- def exportDict(self, name_='AuthenticationData'):
- obj_json = json.dumps(self, default=lambda o: dict((k, v) for k, v in o.__dict__.iteritems()))
- obj_dict = json.loads(obj_json)
- return {name_: obj_dict}
- def build(self, node):
- self.buildAttributes(node, node.attrib, [])
- for child in node:
- nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
- self.buildChildren(child, node, nodeName_)
- def buildAttributes(self, node, attrs, already_processed):
- pass
- def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
- if nodeName_ == 'key-type':
- key_type_ = child_.text
- key_type_ = self.gds_validate_string(key_type_, node, 'key_type')
- self.key_type = key_type_
- self.validate_AuthenticationKeyType(self.key_type) # validate type AuthenticationKeyType
- elif nodeName_ == 'key-items':
- obj_ = AuthenticationKeyItem.factory()
- obj_.build(child_)
- self.key_items.append(obj_)
-# end class AuthenticationData
-
-
-class ServiceChainInfo(GeneratedsSuper):
- """
- ServiceChainInfo class definition from :doc:`vnc_cfg.xsd`
- """
- subclass = None
- superclass = None
- def __init__(self, routing_instance=None, prefix=None, service_chain_address=None, service_instance=None, source_routing_instance=None, **kwargs):
- self.routing_instance = routing_instance
- if (prefix is None) or (prefix == []):
- self.prefix = []
- else:
- self.prefix = prefix
- self.service_chain_address = service_chain_address
- self.service_instance = service_instance
- self.source_routing_instance = source_routing_instance
- def factory(*args_, **kwargs_):
- if ServiceChainInfo.subclass:
- return ServiceChainInfo.subclass(*args_, **kwargs_)
- else:
- return ServiceChainInfo(*args_, **kwargs_)
- factory = staticmethod(factory)
- def get_routing_instance(self): return self.routing_instance
- def set_routing_instance(self, routing_instance): self.routing_instance = routing_instance
- def get_prefix(self): return self.prefix
- def set_prefix(self, prefix): self.prefix = prefix
- def add_prefix(self, value): self.prefix.append(value)
- def insert_prefix(self, index, value): self.prefix[index] = value
- def delete_prefix(self, value): self.prefix.remove(value)
- def get_service_chain_address(self): return self.service_chain_address
- def set_service_chain_address(self, service_chain_address): self.service_chain_address = service_chain_address
- def validate_IpAddress(self, value):
- # Validate type IpAddress, a restriction on xsd:string.
- pass
- def get_service_instance(self): return self.service_instance
- def set_service_instance(self, service_instance): self.service_instance = service_instance
- def get_source_routing_instance(self): return self.source_routing_instance
- def set_source_routing_instance(self, source_routing_instance): self.source_routing_instance = source_routing_instance
- def __eq__(self, other):
- if isinstance(other, self.__class__):
- return (self.routing_instance == other.routing_instance and
- self.prefix == other.prefix and
- self.service_chain_address == other.service_chain_address and
- self.service_instance == other.service_instance and
- self.source_routing_instance == other.source_routing_instance)
- return NotImplemented
- def __ne__(self, other):
- if isinstance(other, self.__class__):
- return not self.__eq__(other)
- return NotImplemented
-
- @classmethod
- def populate (cls, *a, **kwa):
- obj = cls (*a, **kwa)
- obj.set_routing_instance (obj.populate_string ("routing_instance"))
- obj.set_prefix ([obj.populate_string ("prefix")])
- obj.set_service_chain_address (obj.populate_string ("service_chain_address"))
- obj.set_service_instance (obj.populate_string ("service_instance"))
- obj.set_source_routing_instance (obj.populate_string ("source_routing_instance"))
- return obj
- def export(self, outfile, level=1, namespace_='', name_='ServiceChainInfo', namespacedef_='', pretty_print=True):
- if pretty_print:
- eol_ = '\n'
- else:
- eol_ = ''
- showIndent(outfile, level, pretty_print)
- outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
- already_processed = []
- self.exportAttributes(outfile, level, already_processed, namespace_, name_='ServiceChainInfo')
- if self.hasContent_():
- outfile.write('>%s' % (eol_, ))
- self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print)
- showIndent(outfile, level, pretty_print)
- outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
- else:
- outfile.write('/>%s' % (eol_, ))
- def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='ServiceChainInfo'):
- pass
- def exportChildren(self, outfile, level, namespace_='', name_='ServiceChainInfo', fromsubclass_=False, pretty_print=True):
- if pretty_print:
- eol_ = '\n'
- else:
- eol_ = ''
- if self.routing_instance is not None:
- showIndent(outfile, level, pretty_print)
- outfile.write('<%srouting-instance>%s</%srouting-instance>%s' % (namespace_, self.gds_format_string(quote_xml(self.routing_instance).encode(ExternalEncoding), input_name='routing-instance'), namespace_, eol_))
- for prefix_ in self.prefix:
- showIndent(outfile, level, pretty_print)
- outfile.write('<%sprefix>%s</%sprefix>%s' % (namespace_, self.gds_format_string(quote_xml(prefix_).encode(ExternalEncoding), input_name='prefix'), namespace_, eol_))
- if self.service_chain_address is not None:
- showIndent(outfile, level, pretty_print)
- outfile.write('<%sservice-chain-address>%s</%sservice-chain-address>%s' % (namespace_, self.gds_format_string(quote_xml(self.service_chain_address).encode(ExternalEncoding), input_name='service-chain-address'), namespace_, eol_))
- if self.service_instance is not None:
- showIndent(outfile, level, pretty_print)
- outfile.write('<%sservice-instance>%s</%sservice-instance>%s' % (namespace_, self.gds_format_string(quote_xml(self.service_instance).encode(ExternalEncoding), input_name='service-instance'), namespace_, eol_))
- if self.source_routing_instance is not None:
- showIndent(outfile, level, pretty_print)
- outfile.write('<%ssource-routing-instance>%s</%ssource-routing-instance>%s' % (namespace_, self.gds_format_string(quote_xml(self.source_routing_instance).encode(ExternalEncoding), input_name='source-routing-instance'), namespace_, eol_))
- def hasContent_(self):
- if (
- self.routing_instance is not None or
- self.prefix or
- self.service_chain_address is not None or
- self.service_instance is not None or
- self.source_routing_instance is not None
- ):
- return True
- else:
- return False
- def exportLiteral(self, outfile, level, name_='ServiceChainInfo'):
- level += 1
- self.exportLiteralAttributes(outfile, level, [], name_)
- if self.hasContent_():
- self.exportLiteralChildren(outfile, level, name_)
- def exportLiteralAttributes(self, outfile, level, already_processed, name_):
- pass
- def exportLiteralChildren(self, outfile, level, name_):
- if self.routing_instance is not None:
- showIndent(outfile, level)
- outfile.write('routing_instance=%s,\n' % quote_python(self.routing_instance).encode(ExternalEncoding))
- showIndent(outfile, level)
- outfile.write('prefix=[\n')
- level += 1
- for prefix_ in self.prefix:
- showIndent(outfile, level)
- outfile.write('%s,\n' % quote_python(prefix_).encode(ExternalEncoding))
- level -= 1
- showIndent(outfile, level)
- outfile.write('],\n')
- if self.service_chain_address is not None:
- showIndent(outfile, level)
- outfile.write('service_chain_address=%s,\n' % quote_python(self.service_chain_address).encode(ExternalEncoding))
- if self.service_instance is not None:
- showIndent(outfile, level)
- outfile.write('service_instance=%s,\n' % quote_python(self.service_instance).encode(ExternalEncoding))
- if self.source_routing_instance is not None:
- showIndent(outfile, level)
- outfile.write('source_routing_instance=%s,\n' % quote_python(self.source_routing_instance).encode(ExternalEncoding))
- def exportDict(self, name_='ServiceChainInfo'):
- obj_json = json.dumps(self, default=lambda o: dict((k, v) for k, v in o.__dict__.iteritems()))
- obj_dict = json.loads(obj_json)
- return {name_: obj_dict}
- def build(self, node):
- self.buildAttributes(node, node.attrib, [])
- for child in node:
- nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
- self.buildChildren(child, node, nodeName_)
- def buildAttributes(self, node, attrs, already_processed):
- pass
- def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
- if nodeName_ == 'routing-instance':
- routing_instance_ = child_.text
- routing_instance_ = self.gds_validate_string(routing_instance_, node, 'routing_instance')
- self.routing_instance = routing_instance_
- elif nodeName_ == 'prefix':
- prefix_ = child_.text
- prefix_ = self.gds_validate_string(prefix_, node, 'prefix')
- self.prefix.append(prefix_)
- elif nodeName_ == 'service-chain-address':
- service_chain_address_ = child_.text
- service_chain_address_ = self.gds_validate_string(service_chain_address_, node, 'service_chain_address')
- self.service_chain_address = service_chain_address_
- self.validate_IpAddress(self.service_chain_address) # validate type IpAddress
- elif nodeName_ == 'service-instance':
- service_instance_ = child_.text
- service_instance_ = self.gds_validate_string(service_instance_, node, 'service_instance')
- self.service_instance = service_instance_
- elif nodeName_ == 'source-routing-instance':
- source_routing_instance_ = child_.text
- source_routing_instance_ = self.gds_validate_string(source_routing_instance_, node, 'source_routing_instance')
- self.source_routing_instance = source_routing_instance_
-# end class ServiceChainInfo
-
-
-class StaticRouteType(GeneratedsSuper):
- """
- StaticRouteType class definition from :doc:`vnc_cfg.xsd`
- """
- subclass = None
- superclass = None
- def __init__(self, prefix=None, next_hop=None, route_target=None, **kwargs):
- self.prefix = prefix
- self.next_hop = next_hop
- if (route_target is None) or (route_target == []):
- self.route_target = []
- else:
- self.route_target = route_target
- def factory(*args_, **kwargs_):
- if StaticRouteType.subclass:
- return StaticRouteType.subclass(*args_, **kwargs_)
- else:
- return StaticRouteType(*args_, **kwargs_)
- factory = staticmethod(factory)
- def get_prefix(self): return self.prefix
- def set_prefix(self, prefix): self.prefix = prefix
- def get_next_hop(self): return self.next_hop
- def set_next_hop(self, next_hop): self.next_hop = next_hop
- def get_route_target(self): return self.route_target
- def set_route_target(self, route_target): self.route_target = route_target
- def add_route_target(self, value): self.route_target.append(value)
- def insert_route_target(self, index, value): self.route_target[index] = value
- def delete_route_target(self, value): self.route_target.remove(value)
- def __eq__(self, other):
- if isinstance(other, self.__class__):
- return (self.prefix == other.prefix and
- self.next_hop == other.next_hop and
- self.route_target == other.route_target)
- return NotImplemented
- def __ne__(self, other):
- if isinstance(other, self.__class__):
- return not self.__eq__(other)
- return NotImplemented
-
- @classmethod
- def populate (cls, *a, **kwa):
- obj = cls (*a, **kwa)
- obj.set_prefix (obj.populate_string ("prefix"))
- obj.set_next_hop (obj.populate_string ("next_hop"))
- obj.set_route_target ([obj.populate_string ("route_target")])
- return obj
- def export(self, outfile, level=1, namespace_='', name_='StaticRouteType', namespacedef_='', pretty_print=True):
- if pretty_print:
- eol_ = '\n'
- else:
- eol_ = ''
- showIndent(outfile, level, pretty_print)
- outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
- already_processed = []
- self.exportAttributes(outfile, level, already_processed, namespace_, name_='StaticRouteType')
- if self.hasContent_():
- outfile.write('>%s' % (eol_, ))
- self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print)
- showIndent(outfile, level, pretty_print)
- outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
- else:
- outfile.write('/>%s' % (eol_, ))
- def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='StaticRouteType'):
- pass
- def exportChildren(self, outfile, level, namespace_='', name_='StaticRouteType', fromsubclass_=False, pretty_print=True):
- if pretty_print:
- eol_ = '\n'
- else:
- eol_ = ''
- if self.prefix is not None:
- showIndent(outfile, level, pretty_print)
- outfile.write('<%sprefix>%s</%sprefix>%s' % (namespace_, self.gds_format_string(quote_xml(self.prefix).encode(ExternalEncoding), input_name='prefix'), namespace_, eol_))
- if self.next_hop is not None:
- showIndent(outfile, level, pretty_print)
- outfile.write('<%snext-hop>%s</%snext-hop>%s' % (namespace_, self.gds_format_string(quote_xml(self.next_hop).encode(ExternalEncoding), input_name='next-hop'), namespace_, eol_))
- for route_target_ in self.route_target:
- showIndent(outfile, level, pretty_print)
- outfile.write('<%sroute-target>%s</%sroute-target>%s' % (namespace_, self.gds_format_string(quote_xml(route_target_).encode(ExternalEncoding), input_name='route-target'), namespace_, eol_))
- def hasContent_(self):
- if (
- self.prefix is not None or
- self.next_hop is not None or
- self.route_target
- ):
- return True
- else:
- return False
- def exportLiteral(self, outfile, level, name_='StaticRouteType'):
- level += 1
- self.exportLiteralAttributes(outfile, level, [], name_)
- if self.hasContent_():
- self.exportLiteralChildren(outfile, level, name_)
- def exportLiteralAttributes(self, outfile, level, already_processed, name_):
- pass
- def exportLiteralChildren(self, outfile, level, name_):
- if self.prefix is not None:
- showIndent(outfile, level)
- outfile.write('prefix=%s,\n' % quote_python(self.prefix).encode(ExternalEncoding))
- if self.next_hop is not None:
- showIndent(outfile, level)
- outfile.write('next_hop=%s,\n' % quote_python(self.next_hop).encode(ExternalEncoding))
- showIndent(outfile, level)
- outfile.write('route_target=[\n')
- level += 1
- for route_target_ in self.route_target:
- showIndent(outfile, level)
- outfile.write('%s,\n' % quote_python(route_target_).encode(ExternalEncoding))
- level -= 1
- showIndent(outfile, level)
- outfile.write('],\n')
- def exportDict(self, name_='StaticRouteType'):
- obj_json = json.dumps(self, default=lambda o: dict((k, v) for k, v in o.__dict__.iteritems()))
- obj_dict = json.loads(obj_json)
- return {name_: obj_dict}
- def build(self, node):
- self.buildAttributes(node, node.attrib, [])
- for child in node:
- nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
- self.buildChildren(child, node, nodeName_)
- def buildAttributes(self, node, attrs, already_processed):
- pass
- def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
- if nodeName_ == 'prefix':
- prefix_ = child_.text
- prefix_ = self.gds_validate_string(prefix_, node, 'prefix')
- self.prefix = prefix_
- elif nodeName_ == 'next-hop':
- next_hop_ = child_.text
- next_hop_ = self.gds_validate_string(next_hop_, node, 'next_hop')
- self.next_hop = next_hop_
- elif nodeName_ == 'route-target':
- route_target_ = child_.text
- route_target_ = self.gds_validate_string(route_target_, node, 'route_target')
- self.route_target.append(route_target_)
-# end class StaticRouteType
-
-
-class StaticRouteEntriesType(GeneratedsSuper):
- """
- StaticRouteEntriesType class definition from :doc:`vnc_cfg.xsd`
- """
- subclass = None
- superclass = None
- def __init__(self, route=None, **kwargs):
- if (route is None) or (route == []):
- self.route = []
- else:
- if isinstance(route[0], dict):
- objs = [StaticRouteType(**elem) for elem in route]
- self.route = objs
- else:
- self.route = route
- def factory(*args_, **kwargs_):
- if StaticRouteEntriesType.subclass:
- return StaticRouteEntriesType.subclass(*args_, **kwargs_)
- else:
- return StaticRouteEntriesType(*args_, **kwargs_)
- factory = staticmethod(factory)
- def get_route(self): return self.route
- def set_route(self, route): self.route = route
- def add_route(self, value): self.route.append(value)
- def insert_route(self, index, value): self.route[index] = value
- def delete_route(self, value): self.route.remove(value)
- def __eq__(self, other):
- if isinstance(other, self.__class__):
- return (self.route == other.route)
- return NotImplemented
- def __ne__(self, other):
- if isinstance(other, self.__class__):
- return not self.__eq__(other)
- return NotImplemented
-
- @classmethod
- def populate (cls, *a, **kwa):
- obj = cls (*a, **kwa)
- obj.set_route ([StaticRouteType.populate ()])
- return obj
- def export(self, outfile, level=1, namespace_='', name_='StaticRouteEntriesType', namespacedef_='', pretty_print=True):
- if pretty_print:
- eol_ = '\n'
- else:
- eol_ = ''
- showIndent(outfile, level, pretty_print)
- outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
- already_processed = []
- self.exportAttributes(outfile, level, already_processed, namespace_, name_='StaticRouteEntriesType')
- if self.hasContent_():
- outfile.write('>%s' % (eol_, ))
- self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print)
- showIndent(outfile, level, pretty_print)
- outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
- else:
- outfile.write('/>%s' % (eol_, ))
- def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='StaticRouteEntriesType'):
- pass
- def exportChildren(self, outfile, level, namespace_='', name_='StaticRouteEntriesType', fromsubclass_=False, pretty_print=True):
- if pretty_print:
- eol_ = '\n'
- else:
- eol_ = ''
- for route_ in self.route:
- if isinstance(route_, dict):
- route_ = StaticRouteType(**route_)
- route_.export(outfile, level, namespace_, name_='route', pretty_print=pretty_print)
- def hasContent_(self):
- if (
- self.route
- ):
- return True
- else:
- return False
- def exportLiteral(self, outfile, level, name_='StaticRouteEntriesType'):
- level += 1
- self.exportLiteralAttributes(outfile, level, [], name_)
- if self.hasContent_():
- self.exportLiteralChildren(outfile, level, name_)
- def exportLiteralAttributes(self, outfile, level, already_processed, name_):
- pass
- def exportLiteralChildren(self, outfile, level, name_):
- showIndent(outfile, level)
- outfile.write('route=[\n')
- level += 1
- for route_ in self.route:
- showIndent(outfile, level)
- outfile.write('model_.StaticRouteType(\n')
- route_.exportLiteral(outfile, level, name_='StaticRouteType')
- showIndent(outfile, level)
- outfile.write('),\n')
- level -= 1
- showIndent(outfile, level)
- outfile.write('],\n')
- def exportDict(self, name_='StaticRouteEntriesType'):
- obj_json = json.dumps(self, default=lambda o: dict((k, v) for k, v in o.__dict__.iteritems()))
- obj_dict = json.loads(obj_json)
- return {name_: obj_dict}
- def build(self, node):
- self.buildAttributes(node, node.attrib, [])
- for child in node:
- nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
- self.buildChildren(child, node, nodeName_)
- def buildAttributes(self, node, attrs, already_processed):
- pass
- def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
- if nodeName_ == 'route':
- obj_ = StaticRouteType.factory()
- obj_.build(child_)
- self.route.append(obj_)
-# end class StaticRouteEntriesType
-
-
-class ProtocolBgpType(GeneratedsSuper):
- """
- ProtocolBgpType class definition from :doc:`vnc_cfg.xsd`
- """
- subclass = None
- superclass = None
- def __init__(self, **kwargs):
- pass
- def factory(*args_, **kwargs_):
- if ProtocolBgpType.subclass:
- return ProtocolBgpType.subclass(*args_, **kwargs_)
- else:
- return ProtocolBgpType(*args_, **kwargs_)
- factory = staticmethod(factory)
- def __eq__(self, other): return True
- def __ne__(self, other): return False
- @classmethod
- def populate (cls, *a, **kwa):
- obj = cls (*a, **kwa)
- return obj
- def export(self, outfile, level=1, namespace_='', name_='ProtocolBgpType', namespacedef_='', pretty_print=True):
- if pretty_print:
- eol_ = '\n'
- else:
- eol_ = ''
- showIndent(outfile, level, pretty_print)
- outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
- already_processed = []
- self.exportAttributes(outfile, level, already_processed, namespace_, name_='ProtocolBgpType')
- if self.hasContent_():
- outfile.write('>%s' % (eol_, ))
- self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print)
- outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
- else:
- outfile.write('/>%s' % (eol_, ))
- def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='ProtocolBgpType'):
- pass
- def exportChildren(self, outfile, level, namespace_='', name_='ProtocolBgpType', fromsubclass_=False, pretty_print=True):
- pass
- def hasContent_(self):
- if (
-
- ):
- return True
- else:
- return False
- def exportLiteral(self, outfile, level, name_='ProtocolBgpType'):
- level += 1
- self.exportLiteralAttributes(outfile, level, [], name_)
- if self.hasContent_():
- self.exportLiteralChildren(outfile, level, name_)
- def exportLiteralAttributes(self, outfile, level, already_processed, name_):
- pass
- def exportLiteralChildren(self, outfile, level, name_):
- pass
- def exportDict(self, name_='ProtocolBgpType'):
- obj_json = json.dumps(self, default=lambda o: dict((k, v) for k, v in o.__dict__.iteritems()))
- obj_dict = json.loads(obj_json)
- return {name_: obj_dict}
- def build(self, node):
- self.buildAttributes(node, node.attrib, [])
- for child in node:
- nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
- self.buildChildren(child, node, nodeName_)
- def buildAttributes(self, node, attrs, already_processed):
- pass
- def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
- pass
-# end class ProtocolBgpType
-
-
-class ProtocolOspfType(GeneratedsSuper):
- """
- ProtocolOspfType class definition from :doc:`vnc_cfg.xsd`
- """
- subclass = None
- superclass = None
- def __init__(self, area=None, **kwargs):
- self.area = area
- def factory(*args_, **kwargs_):
- if ProtocolOspfType.subclass:
- return ProtocolOspfType.subclass(*args_, **kwargs_)
- else:
- return ProtocolOspfType(*args_, **kwargs_)
- factory = staticmethod(factory)
- def get_area(self): return self.area
- def set_area(self, area): self.area = area
- def __eq__(self, other):
- if isinstance(other, self.__class__):
- return (self.area == other.area)
- return NotImplemented
- def __ne__(self, other):
- if isinstance(other, self.__class__):
- return not self.__eq__(other)
- return NotImplemented
-
- @classmethod
- def populate (cls, *a, **kwa):
- obj = cls (*a, **kwa)
- obj.set_area (obj.populate_integer ("area"))
- return obj
- def export(self, outfile, level=1, namespace_='', name_='ProtocolOspfType', namespacedef_='', pretty_print=True):
- if pretty_print:
- eol_ = '\n'
- else:
- eol_ = ''
- showIndent(outfile, level, pretty_print)
- outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
- already_processed = []
- self.exportAttributes(outfile, level, already_processed, namespace_, name_='ProtocolOspfType')
- if self.hasContent_():
- outfile.write('>%s' % (eol_, ))
- self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print)
- showIndent(outfile, level, pretty_print)
- outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
- else:
- outfile.write('/>%s' % (eol_, ))
- def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='ProtocolOspfType'):
- pass
- def exportChildren(self, outfile, level, namespace_='', name_='ProtocolOspfType', fromsubclass_=False, pretty_print=True):
- if pretty_print:
- eol_ = '\n'
- else:
- eol_ = ''
- if self.area is not None:
- showIndent(outfile, level, pretty_print)
- outfile.write('<%sarea>%s</%sarea>%s' % (namespace_, self.gds_format_integer(self.area, input_name='area'), namespace_, eol_))
- def hasContent_(self):
- if (
- self.area is not None
- ):
- return True
- else:
- return False
- def exportLiteral(self, outfile, level, name_='ProtocolOspfType'):
- level += 1
- self.exportLiteralAttributes(outfile, level, [], name_)
- if self.hasContent_():
- self.exportLiteralChildren(outfile, level, name_)
- def exportLiteralAttributes(self, outfile, level, already_processed, name_):
- pass
- def exportLiteralChildren(self, outfile, level, name_):
- if self.area is not None:
- showIndent(outfile, level)
- outfile.write('area=%d,\n' % self.area)
- def exportDict(self, name_='ProtocolOspfType'):
- obj_json = json.dumps(self, default=lambda o: dict((k, v) for k, v in o.__dict__.iteritems()))
- obj_dict = json.loads(obj_json)
- return {name_: obj_dict}
- def build(self, node):
- self.buildAttributes(node, node.attrib, [])
- for child in node:
- nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
- self.buildChildren(child, node, nodeName_)
- def buildAttributes(self, node, attrs, already_processed):
- pass
- def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
- if nodeName_ == 'area':
- sval_ = child_.text
- try:
- ival_ = int(sval_)
- except (TypeError, ValueError), exp:
- raise_parse_error(child_, 'requires integer: %s' % exp)
- ival_ = self.gds_validate_integer(ival_, node, 'area')
- self.area = ival_
-# end class ProtocolOspfType
-
-
-class ProtocolStaticType(GeneratedsSuper):
- """
- ProtocolStaticType class definition from :doc:`vnc_cfg.xsd`
- """
- subclass = None
- superclass = None
- def __init__(self, route=None, **kwargs):
- if (route is None) or (route == []):
- self.route = []
- else:
- self.route = route
- def factory(*args_, **kwargs_):
- if ProtocolStaticType.subclass:
- return ProtocolStaticType.subclass(*args_, **kwargs_)
- else:
- return ProtocolStaticType(*args_, **kwargs_)
- factory = staticmethod(factory)
- def get_route(self): return self.route
- def set_route(self, route): self.route = route
- def add_route(self, value): self.route.append(value)
- def insert_route(self, index, value): self.route[index] = value
- def delete_route(self, value): self.route.remove(value)
- def __eq__(self, other):
- if isinstance(other, self.__class__):
- return (self.route == other.route)
- return NotImplemented
- def __ne__(self, other):
- if isinstance(other, self.__class__):
- return not self.__eq__(other)
- return NotImplemented
-
- @classmethod
- def populate (cls, *a, **kwa):
- obj = cls (*a, **kwa)
- obj.set_route ([obj.populate_string ("route")])
- return obj
- def export(self, outfile, level=1, namespace_='', name_='ProtocolStaticType', namespacedef_='', pretty_print=True):
- if pretty_print:
- eol_ = '\n'
- else:
- eol_ = ''
- showIndent(outfile, level, pretty_print)
- outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
- already_processed = []
- self.exportAttributes(outfile, level, already_processed, namespace_, name_='ProtocolStaticType')
- if self.hasContent_():
- outfile.write('>%s' % (eol_, ))
- self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print)
- showIndent(outfile, level, pretty_print)
- outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
- else:
- outfile.write('/>%s' % (eol_, ))
- def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='ProtocolStaticType'):
- pass
- def exportChildren(self, outfile, level, namespace_='', name_='ProtocolStaticType', fromsubclass_=False, pretty_print=True):
- if pretty_print:
- eol_ = '\n'
- else:
- eol_ = ''
- for route_ in self.route:
- showIndent(outfile, level, pretty_print)
- outfile.write('<%sroute>%s</%sroute>%s' % (namespace_, self.gds_format_string(quote_xml(route_).encode(ExternalEncoding), input_name='route'), namespace_, eol_))
- def hasContent_(self):
- if (
- self.route
- ):
- return True
- else:
- return False
- def exportLiteral(self, outfile, level, name_='ProtocolStaticType'):
- level += 1
- self.exportLiteralAttributes(outfile, level, [], name_)
- if self.hasContent_():
- self.exportLiteralChildren(outfile, level, name_)
- def exportLiteralAttributes(self, outfile, level, already_processed, name_):
- pass
- def exportLiteralChildren(self, outfile, level, name_):
- showIndent(outfile, level)
- outfile.write('route=[\n')
- level += 1
- for route_ in self.route:
- showIndent(outfile, level)
- outfile.write('%s,\n' % quote_python(route_).encode(ExternalEncoding))
- level -= 1
- showIndent(outfile, level)
- outfile.write('],\n')
- def exportDict(self, name_='ProtocolStaticType'):
- obj_json = json.dumps(self, default=lambda o: dict((k, v) for k, v in o.__dict__.iteritems()))
- obj_dict = json.loads(obj_json)
- return {name_: obj_dict}
- def build(self, node):
- self.buildAttributes(node, node.attrib, [])
- for child in node:
- nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
- self.buildChildren(child, node, nodeName_)
- def buildAttributes(self, node, attrs, already_processed):
- pass
- def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
- if nodeName_ == 'route':
- route_ = child_.text
- route_ = self.gds_validate_string(route_, node, 'route')
- self.route.append(route_)
-# end class ProtocolStaticType
-
-
-class ConnectionType(GeneratedsSuper):
- """
- ConnectionType class definition from :doc:`vnc_cfg.xsd`
- """
- subclass = None
- superclass = None
- def __init__(self, **kwargs):
- pass
- def factory(*args_, **kwargs_):
- if ConnectionType.subclass:
- return ConnectionType.subclass(*args_, **kwargs_)
- else:
- return ConnectionType(*args_, **kwargs_)
- factory = staticmethod(factory)
- def __eq__(self, other): return True
- def __ne__(self, other): return False
- @classmethod
- def populate (cls, *a, **kwa):
- obj = cls (*a, **kwa)
- return obj
- def export(self, outfile, level=1, namespace_='', name_='ConnectionType', namespacedef_='', pretty_print=True):
- if pretty_print:
- eol_ = '\n'
- else:
- eol_ = ''
- showIndent(outfile, level, pretty_print)
- outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
- already_processed = []
- self.exportAttributes(outfile, level, already_processed, namespace_, name_='ConnectionType')
- if self.hasContent_():
- outfile.write('>%s' % (eol_, ))
- self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print)
- outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
- else:
- outfile.write('/>%s' % (eol_, ))
- def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='ConnectionType'):
- pass
- def exportChildren(self, outfile, level, namespace_='', name_='ConnectionType', fromsubclass_=False, pretty_print=True):
- pass
- def hasContent_(self):
- if (
-
- ):
- return True
- else:
- return False
- def exportLiteral(self, outfile, level, name_='ConnectionType'):
- level += 1
- self.exportLiteralAttributes(outfile, level, [], name_)
- if self.hasContent_():
- self.exportLiteralChildren(outfile, level, name_)
- def exportLiteralAttributes(self, outfile, level, already_processed, name_):
- pass
- def exportLiteralChildren(self, outfile, level, name_):
- pass
- def exportDict(self, name_='ConnectionType'):
- obj_json = json.dumps(self, default=lambda o: dict((k, v) for k, v in o.__dict__.iteritems()))
- obj_dict = json.loads(obj_json)
- return {name_: obj_dict}
- def build(self, node):
- self.buildAttributes(node, node.attrib, [])
- for child in node:
- nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
- self.buildChildren(child, node, nodeName_)
- def buildAttributes(self, node, attrs, already_processed):
- pass
- def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
- pass
-# end class ConnectionType
-
-
-class InstanceTargetType(GeneratedsSuper):
- """
- InstanceTargetType class definition from :doc:`vnc_cfg.xsd`
- """
- subclass = None
- superclass = None
- def __init__(self, import_export=None, **kwargs):
- self.import_export = import_export
- def factory(*args_, **kwargs_):
- if InstanceTargetType.subclass:
- return InstanceTargetType.subclass(*args_, **kwargs_)
- else:
- return InstanceTargetType(*args_, **kwargs_)
- factory = staticmethod(factory)
- def get_import_export(self): return self.import_export
- def set_import_export(self, import_export): self.import_export = import_export
- def validate_ImportExportType(self, value):
- # Validate type ImportExportType, a restriction on xsd:string.
- error = False
- if isinstance(value, list):
- error = set(value) - set([u'import', u'export'])
- else:
- error = value not in [u'import', u'export']
- if error:
- raise ValueError("ImportExportType must be one of [u'import', u'export']")
- def __eq__(self, other):
- if isinstance(other, self.__class__):
- return (self.import_export == other.import_export)
- return NotImplemented
- def __ne__(self, other):
- if isinstance(other, self.__class__):
- return not self.__eq__(other)
- return NotImplemented
-
- @classmethod
- def populate (cls, *a, **kwa):
- obj = cls (*a, **kwa)
- obj.set_import_export (obj.populate_string ("import_export"))
- return obj
- def export(self, outfile, level=1, namespace_='', name_='InstanceTargetType', namespacedef_='', pretty_print=True):
- if pretty_print:
- eol_ = '\n'
- else:
- eol_ = ''
- showIndent(outfile, level, pretty_print)
- outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
- already_processed = []
- self.exportAttributes(outfile, level, already_processed, namespace_, name_='InstanceTargetType')
- if self.hasContent_():
- outfile.write('>%s' % (eol_, ))
- self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print)
- showIndent(outfile, level, pretty_print)
- outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
- else:
- outfile.write('/>%s' % (eol_, ))
- def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='InstanceTargetType'):
- pass
- def exportChildren(self, outfile, level, namespace_='', name_='InstanceTargetType', fromsubclass_=False, pretty_print=True):
- if pretty_print:
- eol_ = '\n'
- else:
- eol_ = ''
- if self.import_export is not None:
- showIndent(outfile, level, pretty_print)
- outfile.write('<%simport-export>%s</%simport-export>%s' % (namespace_, self.gds_format_string(quote_xml(self.import_export).encode(ExternalEncoding), input_name='import-export'), namespace_, eol_))
- def hasContent_(self):
- if (
- self.import_export is not None
- ):
- return True
- else:
- return False
- def exportLiteral(self, outfile, level, name_='InstanceTargetType'):
- level += 1
- self.exportLiteralAttributes(outfile, level, [], name_)
- if self.hasContent_():
- self.exportLiteralChildren(outfile, level, name_)
- def exportLiteralAttributes(self, outfile, level, already_processed, name_):
- pass
- def exportLiteralChildren(self, outfile, level, name_):
- if self.import_export is not None:
- showIndent(outfile, level)
- outfile.write('import_export=%s,\n' % quote_python(self.import_export).encode(ExternalEncoding))
- def exportDict(self, name_='InstanceTargetType'):
- obj_json = json.dumps(self, default=lambda o: dict((k, v) for k, v in o.__dict__.iteritems()))
- obj_dict = json.loads(obj_json)
- return {name_: obj_dict}
- def build(self, node):
- self.buildAttributes(node, node.attrib, [])
- for child in node:
- nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
- self.buildChildren(child, node, nodeName_)
- def buildAttributes(self, node, attrs, already_processed):
- pass
- def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
- if nodeName_ == 'import-export':
- import_export_ = child_.text
- import_export_ = self.gds_validate_string(import_export_, node, 'import_export')
- self.import_export = import_export_
- self.validate_ImportExportType(self.import_export) # validate type ImportExportType
-# end class InstanceTargetType
-
-
-class DefaultProtocolType(GeneratedsSuper):
- """
- DefaultProtocolType class definition from :doc:`vnc_cfg.xsd`
- """
- subclass = None
- superclass = None
- def __init__(self, bgp=None, ospf=None, **kwargs):
- if isinstance(bgp, dict):
- obj = ProtocolBgpType(**bgp)
- self.bgp = obj
- else:
- self.bgp = bgp
- if isinstance(ospf, dict):
- obj = ProtocolOspfType(**ospf)
- self.ospf = obj
- else:
- self.ospf = ospf
- def factory(*args_, **kwargs_):
- if DefaultProtocolType.subclass:
- return DefaultProtocolType.subclass(*args_, **kwargs_)
- else:
- return DefaultProtocolType(*args_, **kwargs_)
- factory = staticmethod(factory)
- def get_bgp(self): return self.bgp
- def set_bgp(self, bgp): self.bgp = bgp
- def get_ospf(self): return self.ospf
- def set_ospf(self, ospf): self.ospf = ospf
- def __eq__(self, other):
- if isinstance(other, self.__class__):
- return (self.bgp == other.bgp and
- self.ospf == other.ospf)
- return NotImplemented
- def __ne__(self, other):
- if isinstance(other, self.__class__):
- return not self.__eq__(other)
- return NotImplemented
-
- @classmethod
- def populate (cls, *a, **kwa):
- obj = cls (*a, **kwa)
- obj.set_bgp (ProtocolBgpType.populate ())
- obj.set_ospf (ProtocolOspfType.populate ())
- return obj
- def export(self, outfile, level=1, namespace_='', name_='DefaultProtocolType', namespacedef_='', pretty_print=True):
- if pretty_print:
- eol_ = '\n'
- else:
- eol_ = ''
- showIndent(outfile, level, pretty_print)
- outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
- already_processed = []
- self.exportAttributes(outfile, level, already_processed, namespace_, name_='DefaultProtocolType')
- if self.hasContent_():
- outfile.write('>%s' % (eol_, ))
- self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print)
- showIndent(outfile, level, pretty_print)
- outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
- else:
- outfile.write('/>%s' % (eol_, ))
- def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='DefaultProtocolType'):
- pass
- def exportChildren(self, outfile, level, namespace_='', name_='DefaultProtocolType', fromsubclass_=False, pretty_print=True):
- if pretty_print:
- eol_ = '\n'
- else:
- eol_ = ''
- if self.bgp is not None:
- self.bgp.export(outfile, level, namespace_, name_='bgp', pretty_print=pretty_print)
- if self.ospf is not None:
- self.ospf.export(outfile, level, namespace_, name_='ospf', pretty_print=pretty_print)
- def hasContent_(self):
- if (
- self.bgp is not None or
- self.ospf is not None
- ):
- return True
- else:
- return False
- def exportLiteral(self, outfile, level, name_='DefaultProtocolType'):
- level += 1
- self.exportLiteralAttributes(outfile, level, [], name_)
- if self.hasContent_():
- self.exportLiteralChildren(outfile, level, name_)
- def exportLiteralAttributes(self, outfile, level, already_processed, name_):
- pass
- def exportLiteralChildren(self, outfile, level, name_):
- if self.bgp is not None:
- showIndent(outfile, level)
- outfile.write('bgp=model_.ProtocolBgpType(\n')
- self.bgp.exportLiteral(outfile, level, name_='bgp')
- showIndent(outfile, level)
- outfile.write('),\n')
- if self.ospf is not None:
- showIndent(outfile, level)
- outfile.write('ospf=model_.ProtocolOspfType(\n')
- self.ospf.exportLiteral(outfile, level, name_='ospf')
- showIndent(outfile, level)
- outfile.write('),\n')
- def exportDict(self, name_='DefaultProtocolType'):
- obj_json = json.dumps(self, default=lambda o: dict((k, v) for k, v in o.__dict__.iteritems()))
- obj_dict = json.loads(obj_json)
- return {name_: obj_dict}
- def build(self, node):
- self.buildAttributes(node, node.attrib, [])
- for child in node:
- nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
- self.buildChildren(child, node, nodeName_)
- def buildAttributes(self, node, attrs, already_processed):
- pass
- def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
- if nodeName_ == 'bgp':
- obj_ = ProtocolBgpType.factory()
- obj_.build(child_)
- self.set_bgp(obj_)
- elif nodeName_ == 'ospf':
- obj_ = ProtocolOspfType.factory()
- obj_.build(child_)
- self.set_ospf(obj_)
-# end class DefaultProtocolType
-
-
-class BindingType(GeneratedsSuper):
- """
- BindingType class definition from :doc:`vnc_cfg.xsd`
- """
- subclass = None
- superclass = None
- def __init__(self, **kwargs):
- pass
- def factory(*args_, **kwargs_):
- if BindingType.subclass:
- return BindingType.subclass(*args_, **kwargs_)
- else:
- return BindingType(*args_, **kwargs_)
- factory = staticmethod(factory)
- def __eq__(self, other): return True
- def __ne__(self, other): return False
- @classmethod
- def populate (cls, *a, **kwa):
- obj = cls (*a, **kwa)
- return obj
- def export(self, outfile, level=1, namespace_='', name_='BindingType', namespacedef_='', pretty_print=True):
- if pretty_print:
- eol_ = '\n'
- else:
- eol_ = ''
- showIndent(outfile, level, pretty_print)
- outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
- already_processed = []
- self.exportAttributes(outfile, level, already_processed, namespace_, name_='BindingType')
- if self.hasContent_():
- outfile.write('>%s' % (eol_, ))
- self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print)
- outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
- else:
- outfile.write('/>%s' % (eol_, ))
- def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='BindingType'):
- pass
- def exportChildren(self, outfile, level, namespace_='', name_='BindingType', fromsubclass_=False, pretty_print=True):
- pass
- def hasContent_(self):
- if (
-
- ):
- return True
- else:
- return False
- def exportLiteral(self, outfile, level, name_='BindingType'):
- level += 1
- self.exportLiteralAttributes(outfile, level, [], name_)
- if self.hasContent_():
- self.exportLiteralChildren(outfile, level, name_)
- def exportLiteralAttributes(self, outfile, level, already_processed, name_):
- pass
- def exportLiteralChildren(self, outfile, level, name_):
- pass
- def exportDict(self, name_='BindingType'):
- obj_json = json.dumps(self, default=lambda o: dict((k, v) for k, v in o.__dict__.iteritems()))
- obj_dict = json.loads(obj_json)
- return {name_: obj_dict}
- def build(self, node):
- self.buildAttributes(node, node.attrib, [])
- for child in node:
- nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
- self.buildChildren(child, node, nodeName_)
- def buildAttributes(self, node, attrs, already_processed):
- pass
- def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
- pass
-# end class BindingType
-
-
-class AttachmentAddressType(GeneratedsSuper):
- """
- AttachmentAddressType class definition from :doc:`vnc_cfg.xsd`
- """
- subclass = None
- superclass = None
- def __init__(self, **kwargs):
- pass
- def factory(*args_, **kwargs_):
- if AttachmentAddressType.subclass:
- return AttachmentAddressType.subclass(*args_, **kwargs_)
- else:
- return AttachmentAddressType(*args_, **kwargs_)
- factory = staticmethod(factory)
- def __eq__(self, other): return True
- def __ne__(self, other): return False
- @classmethod
- def populate (cls, *a, **kwa):
- obj = cls (*a, **kwa)
- return obj
- def export(self, outfile, level=1, namespace_='', name_='AttachmentAddressType', namespacedef_='', pretty_print=True):
- if pretty_print:
- eol_ = '\n'
- else:
- eol_ = ''
- showIndent(outfile, level, pretty_print)
- outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
- already_processed = []
- self.exportAttributes(outfile, level, already_processed, namespace_, name_='AttachmentAddressType')
- if self.hasContent_():
- outfile.write('>%s' % (eol_, ))
- self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print)
- outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
- else:
- outfile.write('/>%s' % (eol_, ))
- def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='AttachmentAddressType'):
- pass
- def exportChildren(self, outfile, level, namespace_='', name_='AttachmentAddressType', fromsubclass_=False, pretty_print=True):
- pass
- def hasContent_(self):
- if (
-
- ):
- return True
- else:
- return False
- def exportLiteral(self, outfile, level, name_='AttachmentAddressType'):
- level += 1
- self.exportLiteralAttributes(outfile, level, [], name_)
- if self.hasContent_():
- self.exportLiteralChildren(outfile, level, name_)
- def exportLiteralAttributes(self, outfile, level, already_processed, name_):
- pass
- def exportLiteralChildren(self, outfile, level, name_):
- pass
- def exportDict(self, name_='AttachmentAddressType'):
- obj_json = json.dumps(self, default=lambda o: dict((k, v) for k, v in o.__dict__.iteritems()))
- obj_dict = json.loads(obj_json)
- return {name_: obj_dict}
- def build(self, node):
- self.buildAttributes(node, node.attrib, [])
- for child in node:
- nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
- self.buildChildren(child, node, nodeName_)
- def buildAttributes(self, node, attrs, already_processed):
- pass
- def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
- pass
-# end class AttachmentAddressType
-
-
-class AttachmentInfoType(GeneratedsSuper):
- """
- AttachmentInfoType class definition from :doc:`vnc_cfg.xsd`
- """
- subclass = None
- superclass = None
- def __init__(self, static=None, bgp=None, ospf=None, state=None, **kwargs):
- if isinstance(static, dict):
- obj = ProtocolStaticType(**static)
- self.static = obj
- else:
- self.static = static
- if isinstance(bgp, dict):
- obj = ProtocolBgpType(**bgp)
- self.bgp = obj
- else:
- self.bgp = bgp
- if isinstance(ospf, dict):
- obj = ProtocolOspfType(**ospf)
- self.ospf = obj
- else:
- self.ospf = ospf
- self.state = state
- def factory(*args_, **kwargs_):
- if AttachmentInfoType.subclass:
- return AttachmentInfoType.subclass(*args_, **kwargs_)
- else:
- return AttachmentInfoType(*args_, **kwargs_)
- factory = staticmethod(factory)
- def get_static(self): return self.static
- def set_static(self, static): self.static = static
- def get_bgp(self): return self.bgp
- def set_bgp(self, bgp): self.bgp = bgp
- def get_ospf(self): return self.ospf
- def set_ospf(self, ospf): self.ospf = ospf
- def get_state(self): return self.state
- def set_state(self, state): self.state = state
- def __eq__(self, other):
- if isinstance(other, self.__class__):
- return (self.static == other.static and
- self.bgp == other.bgp and
- self.ospf == other.ospf and
- self.state == other.state)
- return NotImplemented
- def __ne__(self, other):
- if isinstance(other, self.__class__):
- return not self.__eq__(other)
- return NotImplemented
-
- @classmethod
- def populate (cls, *a, **kwa):
- obj = cls (*a, **kwa)
- obj.set_static (ProtocolStaticType.populate ())
- obj.set_bgp (ProtocolBgpType.populate ())
- obj.set_ospf (ProtocolOspfType.populate ())
- obj.set_state (obj.populate_string ("state"))
- return obj
- def export(self, outfile, level=1, namespace_='', name_='AttachmentInfoType', namespacedef_='', pretty_print=True):
- if pretty_print:
- eol_ = '\n'
- else:
- eol_ = ''
- showIndent(outfile, level, pretty_print)
- outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
- already_processed = []
- self.exportAttributes(outfile, level, already_processed, namespace_, name_='AttachmentInfoType')
- if self.hasContent_():
- outfile.write('>%s' % (eol_, ))
- self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print)
- showIndent(outfile, level, pretty_print)
- outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
- else:
- outfile.write('/>%s' % (eol_, ))
- def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='AttachmentInfoType'):
- pass
- def exportChildren(self, outfile, level, namespace_='', name_='AttachmentInfoType', fromsubclass_=False, pretty_print=True):
- if pretty_print:
- eol_ = '\n'
- else:
- eol_ = ''
- if self.static is not None:
- self.static.export(outfile, level, namespace_, name_='static', pretty_print=pretty_print)
- if self.bgp is not None:
- self.bgp.export(outfile, level, namespace_, name_='bgp', pretty_print=pretty_print)
- if self.ospf is not None:
- self.ospf.export(outfile, level, namespace_, name_='ospf', pretty_print=pretty_print)
- if self.state is not None:
- showIndent(outfile, level, pretty_print)
- outfile.write('<%sstate>%s</%sstate>%s' % (namespace_, self.gds_format_string(quote_xml(self.state).encode(ExternalEncoding), input_name='state'), namespace_, eol_))
- def hasContent_(self):
- if (
- self.static is not None or
- self.bgp is not None or
- self.ospf is not None or
- self.state is not None
- ):
- return True
- else:
- return False
- def exportLiteral(self, outfile, level, name_='AttachmentInfoType'):
- level += 1
- self.exportLiteralAttributes(outfile, level, [], name_)
- if self.hasContent_():
- self.exportLiteralChildren(outfile, level, name_)
- def exportLiteralAttributes(self, outfile, level, already_processed, name_):
- pass
- def exportLiteralChildren(self, outfile, level, name_):
- if self.static is not None:
- showIndent(outfile, level)
- outfile.write('static=model_.ProtocolStaticType(\n')
- self.static.exportLiteral(outfile, level, name_='static')
- showIndent(outfile, level)
- outfile.write('),\n')
- if self.bgp is not None:
- showIndent(outfile, level)
- outfile.write('bgp=model_.ProtocolBgpType(\n')
- self.bgp.exportLiteral(outfile, level, name_='bgp')
- showIndent(outfile, level)
- outfile.write('),\n')
- if self.ospf is not None:
- showIndent(outfile, level)
- outfile.write('ospf=model_.ProtocolOspfType(\n')
- self.ospf.exportLiteral(outfile, level, name_='ospf')
- showIndent(outfile, level)
- outfile.write('),\n')
- if self.state is not None:
- showIndent(outfile, level)
- outfile.write('state=%s,\n' % quote_python(self.state).encode(ExternalEncoding))
- def exportDict(self, name_='AttachmentInfoType'):
- obj_json = json.dumps(self, default=lambda o: dict((k, v) for k, v in o.__dict__.iteritems()))
- obj_dict = json.loads(obj_json)
- return {name_: obj_dict}
- def build(self, node):
- self.buildAttributes(node, node.attrib, [])
- for child in node:
- nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
- self.buildChildren(child, node, nodeName_)
- def buildAttributes(self, node, attrs, already_processed):
- pass
- def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
- if nodeName_ == 'static':
- obj_ = ProtocolStaticType.factory()
- obj_.build(child_)
- self.set_static(obj_)
- elif nodeName_ == 'bgp':
- obj_ = ProtocolBgpType.factory()
- obj_.build(child_)
- self.set_bgp(obj_)
- elif nodeName_ == 'ospf':
- obj_ = ProtocolOspfType.factory()
- obj_.build(child_)
- self.set_ospf(obj_)
- elif nodeName_ == 'state':
- state_ = child_.text
- state_ = self.gds_validate_string(state_, node, 'state')
- self.state = state_
-# end class AttachmentInfoType
-
-
-
-__all__ = [
- "AclEntriesType",
- "AclRuleType",
- "ActionListType",
- "AddressFamilies",
- "AddressType",
- "AllocationPoolType",
- "AllowedAddressPair",
- "AllowedAddressPairs",
- "ApiAccessListType",
- "ApiAccessType",
- "AttachmentAddressType",
- "AttachmentInfoType",
- "AuthenticationData",
- "AuthenticationKeyItem",
- "BgpPeeringAttributes",
- "BgpRouterParams",
- "BgpSession",
- "BgpSessionAttributes",
- "BindingType",
- "ConnectionType",
- "DefaultProtocolType",
- "DhcpOptionType",
- "DhcpOptionsListType",
- "DomainLimitsType",
- "EncapsulationPrioritiesType",
- "FloatingIpPoolType",
- "IdPermsType",
- "InstanceTargetType",
- "InterfaceMirrorType",
- "IpAddressesType",
- "IpamDnsAddressType",
- "IpamSubnetType",
- "IpamType",
- "JunosServicePorts",
- "KeyValuePair",
- "KeyValuePairs",
- "LinklocalServiceEntryType",
- "LinklocalServicesTypes",
- "LoadbalancerHealthmonitorType",
- "LoadbalancerMemberType",
- "LoadbalancerPoolType",
- "MacAddressesType",
- "MatchConditionType",
- "MirrorActionType",
- "PermType",
- "PluginProperties",
- "PluginProperty",
- "PolicyBasedForwardingRuleType",
- "PolicyEntriesType",
- "PolicyRuleType",
- "PortType",
- "ProtocolBgpType",
- "ProtocolOspfType",
- "ProtocolStaticType",
- "QuotaType",
- "RouteTableType",
- "RouteTargetList",
- "RouteType",
- "SNMPCredentials",
- "SequenceType",
- "ServiceChainInfo",
- "ServiceInstanceInterfaceType",
- "ServiceInstanceType",
- "ServiceScaleOutType",
- "ServiceTemplateInterfaceType",
- "ServiceTemplateType",
- "StaticRouteEntriesType",
- "StaticRouteType",
- "SubnetListType",
- "SubnetType",
- "TimerType",
- "UserCredentials",
- "UuidType",
- "VirtualDnsRecordType",
- "VirtualDnsType",
- "VirtualIpType",
- "VirtualMachineInterfacePropertiesType",
- "VirtualNetworkPolicyType",
- "VirtualNetworkType",
- "VnSubnetsType",
- "VrfAssignRuleType",
- "VrfAssignTableType",
- "config_root_domain",
- "config_root_global_system_config",
- "customer_attachment_floating_ip",
- "customer_attachment_virtual_machine_interface",
- "domain_namespace",
- "domain_project",
- "domain_service_template",
- "domain_virtual_DNS",
- "floating_ip_pool_floating_ip",
- "floating_ip_project",
- "floating_ip_virtual_machine_interface",
- "global_system_config_analytics_node",
- "global_system_config_bgp_router",
- "global_system_config_config_node",
- "global_system_config_database_node",
- "global_system_config_global_vrouter_config",
- "global_system_config_physical_router",
- "global_system_config_service_appliance_set",
- "global_system_config_virtual_router",
- "instance_bgp_router",
- "instance_ip_virtual_machine_interface",
- "instance_ip_virtual_network",
- "loadbalancer_pool_loadbalancer_healthmonitor",
- "loadbalancer_pool_loadbalancer_member",
- "loadbalancer_pool_service_appliance_set",
- "loadbalancer_pool_service_instance",
- "loadbalancer_pool_virtual_machine_interface",
- "logical_interface_virtual_machine_interface",
- "logical_router_gateway",
- "logical_router_interface",
- "logical_router_service_instance",
- "logical_router_target",
- "network_ipam_virtual_DNS",
- "physical_interface_logical_interface",
- "physical_router_bgp_router",
- "physical_router_logical_interface",
- "physical_router_physical_interface",
- "physical_router_virtual_network",
- "physical_router_virtual_router",
- "project_floating_ip_pool",
- "project_interface_route_table",
- "project_loadbalancer_healthmonitor",
- "project_loadbalancer_pool",
- "project_logical_router",
- "project_network_ipam",
- "project_network_policy",
- "project_qos_forwarding_class",
- "project_qos_queue",
- "project_route_table",
- "project_security_group",
- "project_service_instance",
- "project_virtual_ip",
- "project_virtual_machine_interface",
- "project_virtual_network",
- "provider_attachment_virtual_router",
- "qos_forwarding_class_qos_queue",
- "security_group_access_control_list",
- "service_appliance_set_service_appliance",
- "service_instance_service_template",
- "subnet_virtual_machine_interface",
- "virtual_DNS_virtual_DNS_record",
- "virtual_ip_loadbalancer_pool",
- "virtual_ip_virtual_machine_interface",
- "virtual_machine_interface_qos_forwarding_class",
- "virtual_machine_interface_route_table",
- "virtual_machine_interface_security_group",
- "virtual_machine_interface_sub_interface",
- "virtual_machine_interface_virtual_machine",
- "virtual_machine_interface_virtual_network",
- "virtual_machine_service_instance",
- "virtual_machine_virtual_machine_interface",
- "virtual_network_access_control_list",
- "virtual_network_floating_ip_pool",
- "virtual_network_qos_forwarding_class",
- "virtual_network_route_table",
- "virtual_network_routing_instance",
- "virtual_router_bgp_router",
- "virtual_router_virtual_machine"
- ]
diff --git a/Testcases/vnc_api/gen/resource_xsd.pyc b/Testcases/vnc_api/gen/resource_xsd.pyc
deleted file mode 100644
index 8272f58..0000000
--- a/Testcases/vnc_api/gen/resource_xsd.pyc
+++ /dev/null
Binary files differ
diff --git a/Testcases/vnc_api/gen/vnc_api_client_gen.py b/Testcases/vnc_api/gen/vnc_api_client_gen.py
deleted file mode 100644
index ce969e9..0000000
--- a/Testcases/vnc_api/gen/vnc_api_client_gen.py
+++ /dev/null
@@ -1,5354 +0,0 @@
-
-# AUTO-GENERATED file from IFMapApiGenerator. Do Not Edit!
-
-import json
-import vnc_api.gen.resource_xsd
-import vnc_api.gen.resource_client
-from vnc_api.gen.connection_drv_gen import ConnectionDriverBase
-from cfgm_common import rest
-from cfgm_common.exceptions import *
-
-class VncApiClientGen(ConnectionDriverBase):
- """
- This class provides type specific methods to create,
- read, update, delete and list objects from the server
- """
-
- _tenant_name = 'default-tenant'
- def __init__(self, obj_serializer):
- self._obj_serializer = obj_serializer
- self._type_to_class = {}
- self._type_to_class['domain'] = vnc_api.gen.resource_client.Domain
- self._type_to_class['global_vrouter_config'] = vnc_api.gen.resource_client.GlobalVrouterConfig
- self._type_to_class['instance_ip'] = vnc_api.gen.resource_client.InstanceIp
- self._type_to_class['network_policy'] = vnc_api.gen.resource_client.NetworkPolicy
- self._type_to_class['loadbalancer_pool'] = vnc_api.gen.resource_client.LoadbalancerPool
- self._type_to_class['virtual_DNS_record'] = vnc_api.gen.resource_client.VirtualDnsRecord
- self._type_to_class['route_target'] = vnc_api.gen.resource_client.RouteTarget
- self._type_to_class['floating_ip'] = vnc_api.gen.resource_client.FloatingIp
- self._type_to_class['floating_ip_pool'] = vnc_api.gen.resource_client.FloatingIpPool
- self._type_to_class['physical_router'] = vnc_api.gen.resource_client.PhysicalRouter
- self._type_to_class['bgp_router'] = vnc_api.gen.resource_client.BgpRouter
- self._type_to_class['virtual_router'] = vnc_api.gen.resource_client.VirtualRouter
- self._type_to_class['config_root'] = vnc_api.gen.resource_client.ConfigRoot
- self._type_to_class['subnet'] = vnc_api.gen.resource_client.Subnet
- self._type_to_class['global_system_config'] = vnc_api.gen.resource_client.GlobalSystemConfig
- self._type_to_class['service_appliance'] = vnc_api.gen.resource_client.ServiceAppliance
- self._type_to_class['service_instance'] = vnc_api.gen.resource_client.ServiceInstance
- self._type_to_class['namespace'] = vnc_api.gen.resource_client.Namespace
- self._type_to_class['logical_interface'] = vnc_api.gen.resource_client.LogicalInterface
- self._type_to_class['route_table'] = vnc_api.gen.resource_client.RouteTable
- self._type_to_class['physical_interface'] = vnc_api.gen.resource_client.PhysicalInterface
- self._type_to_class['access_control_list'] = vnc_api.gen.resource_client.AccessControlList
- self._type_to_class['analytics_node'] = vnc_api.gen.resource_client.AnalyticsNode
- self._type_to_class['virtual_DNS'] = vnc_api.gen.resource_client.VirtualDns
- self._type_to_class['customer_attachment'] = vnc_api.gen.resource_client.CustomerAttachment
- self._type_to_class['service_appliance_set'] = vnc_api.gen.resource_client.ServiceApplianceSet
- self._type_to_class['config_node'] = vnc_api.gen.resource_client.ConfigNode
- self._type_to_class['qos_queue'] = vnc_api.gen.resource_client.QosQueue
- self._type_to_class['virtual_machine'] = vnc_api.gen.resource_client.VirtualMachine
- self._type_to_class['interface_route_table'] = vnc_api.gen.resource_client.InterfaceRouteTable
- self._type_to_class['service_template'] = vnc_api.gen.resource_client.ServiceTemplate
- self._type_to_class['virtual_ip'] = vnc_api.gen.resource_client.VirtualIp
- self._type_to_class['loadbalancer_member'] = vnc_api.gen.resource_client.LoadbalancerMember
- self._type_to_class['security_group'] = vnc_api.gen.resource_client.SecurityGroup
- self._type_to_class['provider_attachment'] = vnc_api.gen.resource_client.ProviderAttachment
- self._type_to_class['virtual_machine_interface'] = vnc_api.gen.resource_client.VirtualMachineInterface
- self._type_to_class['loadbalancer_healthmonitor'] = vnc_api.gen.resource_client.LoadbalancerHealthmonitor
- self._type_to_class['virtual_network'] = vnc_api.gen.resource_client.VirtualNetwork
- self._type_to_class['project'] = vnc_api.gen.resource_client.Project
- self._type_to_class['qos_forwarding_class'] = vnc_api.gen.resource_client.QosForwardingClass
- self._type_to_class['database_node'] = vnc_api.gen.resource_client.DatabaseNode
- self._type_to_class['routing_instance'] = vnc_api.gen.resource_client.RoutingInstance
- self._type_to_class['network_ipam'] = vnc_api.gen.resource_client.NetworkIpam
- self._type_to_class['logical_router'] = vnc_api.gen.resource_client.LogicalRouter
- #end __init__
- def domain_create(self, obj):
- """Create new domain.
-
- :param obj: :class:`.Domain` object
-
- """
- obj._pending_field_updates |= obj._pending_ref_updates
- obj._pending_ref_updates = set([])
- # Ignore fields with None value in json representation
- json_param = json.dumps(obj, default = self._obj_serializer)
- json_body = '{"domain":' + json_param + '}'
- content = self._request_server(rest.OP_POST,
- vnc_api.gen.resource_client.Domain.create_uri,
- data = json_body)
-
- domain_dict = json.loads(content)['domain']
- obj.uuid = domain_dict['uuid']
- if 'parent_uuid' in domain_dict:
- obj.parent_uuid = domain_dict['parent_uuid']
-
- obj.set_server_conn(self)
-
- return obj.uuid
- #end domain_create
-
- def domain_read(self, fq_name = None, fq_name_str = None, id = None, ifmap_id = None, fields = None):
- """Return domain information.
-
- :param fq_name: Fully qualified name of domain
- :param fq_name_str: Fully qualified name string of domain
- :param id: UUID of domain
- :param ifmap_id: IFMAP id of domain
- :returns: :class:`.Domain` object
-
- """
- (args_ok, result) = self._read_args_to_id('domain', fq_name, fq_name_str, id, ifmap_id)
- if not args_ok:
- return result
-
- id = result
- uri = vnc_api.gen.resource_client.Domain.resource_uri_base['domain'] + '/' + id
-
- if fields:
- comma_sep_fields = ','.join(f for f in fields)
- query_params = {'fields': comma_sep_fields}
- else:
- query_params = {'exclude_back_refs':True,
- 'exclude_children':True,}
- content = self._request_server(rest.OP_GET, uri, query_params)
-
- obj_dict = json.loads(content)['domain']
- domain_obj = vnc_api.gen.resource_client.Domain.from_dict(**obj_dict)
- domain_obj.clear_pending_updates()
- domain_obj.set_server_conn(self)
-
- return domain_obj
- #end domain_read
-
- def domain_update(self, obj):
- """Update domain.
-
- :param obj: :class:`.Domain` object
-
- """
- # Read in uuid from api-server if not specified in obj
- if not obj.uuid:
- obj.uuid = self.fq_name_to_id('domain', obj.get_fq_name())
-
- # Ignore fields with None value in json representation
- json_param = json.dumps(obj, default = self._obj_serializer)
- json_body = '{"domain":' + json_param + '}'
-
- id = obj.uuid
- uri = vnc_api.gen.resource_client.Domain.resource_uri_base['domain'] + '/' + id
- content = self._request_server(rest.OP_PUT, uri, data = json_body)
- for ref_name in obj._pending_ref_updates:
- ref_orig = set([(x.get('uuid'), tuple(x.get('to', [])), x.get('attr')) for x in getattr(obj, '_original_' + ref_name, [])])
- ref_new = set([(x.get('uuid'), tuple(x.get('to', [])), x.get('attr')) for x in getattr(obj, ref_name, [])])
- for ref in ref_orig - ref_new:
- self.ref_update('domain', obj.uuid, ref_name, ref[0], list(ref[1]), 'DELETE')
- for ref in ref_new - ref_orig:
- self.ref_update('domain', obj.uuid, ref_name, ref[0], list(ref[1]), 'ADD', ref[2])
- obj.clear_pending_updates()
-
- return content
- #end domain_update
-
- def domains_list(self, parent_id = None, parent_fq_name = None, obj_uuids = None, fields = None, detail = False, count = False, filters = None):
- """List all domains.
-
- :param parent_id: UUID of parent as optional search filter
- :param parent_fq_name: full qualified name of parent as optional search filter
- :returns: list of :class:`.Domain` objects
-
- """
- return self.resource_list('domain', parent_id = parent_id, parent_fq_name = parent_fq_name, obj_uuids=obj_uuids, fields=fields, detail=detail, count=count, filters=filters)
- #end domains_list
-
- def domain_delete(self, fq_name = None, id = None, ifmap_id = None):
- """Delete domain from the system.
-
- :param fq_name: Fully qualified name of domain
- :param id: UUID of domain
- :param ifmap_id: IFMAP id of domain
-
- """
- (args_ok, result) = self._read_args_to_id(obj_type = 'domain', fq_name = fq_name, id = id, ifmap_id = ifmap_id)
- if not args_ok:
- return result
-
- id = result
- uri = vnc_api.gen.resource_client.Domain.resource_uri_base['domain'] + '/' + id
-
- content = self._request_server(rest.OP_DELETE, uri)
- #end domain_delete
-
- def get_default_domain_id(self):
- """Return UUID of default domain."""
- return self.fq_name_to_id('domain', vnc_api.gen.resource_client.Domain().get_fq_name())
- #end get_default_domain_delete
-
- def global_vrouter_config_create(self, obj):
- """Create new global-vrouter-config.
-
- :param obj: :class:`.GlobalVrouterConfig` object
-
- """
- obj._pending_field_updates |= obj._pending_ref_updates
- obj._pending_ref_updates = set([])
- # Ignore fields with None value in json representation
- json_param = json.dumps(obj, default = self._obj_serializer)
- json_body = '{"global-vrouter-config":' + json_param + '}'
- content = self._request_server(rest.OP_POST,
- vnc_api.gen.resource_client.GlobalVrouterConfig.create_uri,
- data = json_body)
-
- global_vrouter_config_dict = json.loads(content)['global-vrouter-config']
- obj.uuid = global_vrouter_config_dict['uuid']
- if 'parent_uuid' in global_vrouter_config_dict:
- obj.parent_uuid = global_vrouter_config_dict['parent_uuid']
-
- obj.set_server_conn(self)
-
- return obj.uuid
- #end global_vrouter_config_create
-
- def global_vrouter_config_read(self, fq_name = None, fq_name_str = None, id = None, ifmap_id = None, fields = None):
- """Return global-vrouter-config information.
-
- :param fq_name: Fully qualified name of global-vrouter-config
- :param fq_name_str: Fully qualified name string of global-vrouter-config
- :param id: UUID of global-vrouter-config
- :param ifmap_id: IFMAP id of global-vrouter-config
- :returns: :class:`.GlobalVrouterConfig` object
-
- """
- (args_ok, result) = self._read_args_to_id('global-vrouter-config', fq_name, fq_name_str, id, ifmap_id)
- if not args_ok:
- return result
-
- id = result
- uri = vnc_api.gen.resource_client.GlobalVrouterConfig.resource_uri_base['global-vrouter-config'] + '/' + id
-
- if fields:
- comma_sep_fields = ','.join(f for f in fields)
- query_params = {'fields': comma_sep_fields}
- else:
- query_params = {'exclude_back_refs':True,
- 'exclude_children':True,}
- content = self._request_server(rest.OP_GET, uri, query_params)
-
- obj_dict = json.loads(content)['global-vrouter-config']
- global_vrouter_config_obj = vnc_api.gen.resource_client.GlobalVrouterConfig.from_dict(**obj_dict)
- global_vrouter_config_obj.clear_pending_updates()
- global_vrouter_config_obj.set_server_conn(self)
-
- return global_vrouter_config_obj
- #end global_vrouter_config_read
-
- def global_vrouter_config_update(self, obj):
- """Update global-vrouter-config.
-
- :param obj: :class:`.GlobalVrouterConfig` object
-
- """
- # Read in uuid from api-server if not specified in obj
- if not obj.uuid:
- obj.uuid = self.fq_name_to_id('global-vrouter-config', obj.get_fq_name())
-
- # Ignore fields with None value in json representation
- json_param = json.dumps(obj, default = self._obj_serializer)
- json_body = '{"global-vrouter-config":' + json_param + '}'
-
- id = obj.uuid
- uri = vnc_api.gen.resource_client.GlobalVrouterConfig.resource_uri_base['global-vrouter-config'] + '/' + id
- content = self._request_server(rest.OP_PUT, uri, data = json_body)
- for ref_name in obj._pending_ref_updates:
- ref_orig = set([(x.get('uuid'), tuple(x.get('to', [])), x.get('attr')) for x in getattr(obj, '_original_' + ref_name, [])])
- ref_new = set([(x.get('uuid'), tuple(x.get('to', [])), x.get('attr')) for x in getattr(obj, ref_name, [])])
- for ref in ref_orig - ref_new:
- self.ref_update('global-vrouter-config', obj.uuid, ref_name, ref[0], list(ref[1]), 'DELETE')
- for ref in ref_new - ref_orig:
- self.ref_update('global-vrouter-config', obj.uuid, ref_name, ref[0], list(ref[1]), 'ADD', ref[2])
- obj.clear_pending_updates()
-
- return content
- #end global_vrouter_config_update
-
- def global_vrouter_configs_list(self, parent_id = None, parent_fq_name = None, obj_uuids = None, fields = None, detail = False, count = False, filters = None):
- """List all global-vrouter-configs.
-
- :param parent_id: UUID of parent as optional search filter
- :param parent_fq_name: full qualified name of parent as optional search filter
- :returns: list of :class:`.GlobalVrouterConfig` objects
-
- """
- return self.resource_list('global-vrouter-config', parent_id = parent_id, parent_fq_name = parent_fq_name, obj_uuids=obj_uuids, fields=fields, detail=detail, count=count, filters=filters)
- #end global_vrouter_configs_list
-
- def global_vrouter_config_delete(self, fq_name = None, id = None, ifmap_id = None):
- """Delete global-vrouter-config from the system.
-
- :param fq_name: Fully qualified name of global-vrouter-config
- :param id: UUID of global-vrouter-config
- :param ifmap_id: IFMAP id of global-vrouter-config
-
- """
- (args_ok, result) = self._read_args_to_id(obj_type = 'global-vrouter-config', fq_name = fq_name, id = id, ifmap_id = ifmap_id)
- if not args_ok:
- return result
-
- id = result
- uri = vnc_api.gen.resource_client.GlobalVrouterConfig.resource_uri_base['global-vrouter-config'] + '/' + id
-
- content = self._request_server(rest.OP_DELETE, uri)
- #end global_vrouter_config_delete
-
- def get_default_global_vrouter_config_id(self):
- """Return UUID of default global-vrouter-config."""
- return self.fq_name_to_id('global-vrouter-config', vnc_api.gen.resource_client.GlobalVrouterConfig().get_fq_name())
- #end get_default_global_vrouter_config_delete
-
- def instance_ip_create(self, obj):
- """Create new instance-ip.
-
- :param obj: :class:`.InstanceIp` object
-
- """
- obj._pending_field_updates |= obj._pending_ref_updates
- obj._pending_ref_updates = set([])
- # Ignore fields with None value in json representation
- json_param = json.dumps(obj, default = self._obj_serializer)
- json_body = '{"instance-ip":' + json_param + '}'
- content = self._request_server(rest.OP_POST,
- vnc_api.gen.resource_client.InstanceIp.create_uri,
- data = json_body)
-
- instance_ip_dict = json.loads(content)['instance-ip']
- obj.uuid = instance_ip_dict['uuid']
- if 'parent_uuid' in instance_ip_dict:
- obj.parent_uuid = instance_ip_dict['parent_uuid']
-
- obj.set_server_conn(self)
-
- return obj.uuid
- #end instance_ip_create
-
- def instance_ip_read(self, fq_name = None, fq_name_str = None, id = None, ifmap_id = None, fields = None):
- """Return instance-ip information.
-
- :param fq_name: Fully qualified name of instance-ip
- :param fq_name_str: Fully qualified name string of instance-ip
- :param id: UUID of instance-ip
- :param ifmap_id: IFMAP id of instance-ip
- :returns: :class:`.InstanceIp` object
-
- """
- (args_ok, result) = self._read_args_to_id('instance-ip', fq_name, fq_name_str, id, ifmap_id)
- if not args_ok:
- return result
-
- id = result
- uri = vnc_api.gen.resource_client.InstanceIp.resource_uri_base['instance-ip'] + '/' + id
-
- if fields:
- comma_sep_fields = ','.join(f for f in fields)
- query_params = {'fields': comma_sep_fields}
- else:
- query_params = {'exclude_back_refs':True,
- 'exclude_children':True,}
- content = self._request_server(rest.OP_GET, uri, query_params)
-
- obj_dict = json.loads(content)['instance-ip']
- instance_ip_obj = vnc_api.gen.resource_client.InstanceIp.from_dict(**obj_dict)
- instance_ip_obj.clear_pending_updates()
- instance_ip_obj.set_server_conn(self)
-
- return instance_ip_obj
- #end instance_ip_read
-
- def instance_ip_update(self, obj):
- """Update instance-ip.
-
- :param obj: :class:`.InstanceIp` object
-
- """
- # Read in uuid from api-server if not specified in obj
- if not obj.uuid:
- obj.uuid = self.fq_name_to_id('instance-ip', obj.get_fq_name())
-
- # Ignore fields with None value in json representation
- json_param = json.dumps(obj, default = self._obj_serializer)
- json_body = '{"instance-ip":' + json_param + '}'
-
- id = obj.uuid
- uri = vnc_api.gen.resource_client.InstanceIp.resource_uri_base['instance-ip'] + '/' + id
- content = self._request_server(rest.OP_PUT, uri, data = json_body)
- for ref_name in obj._pending_ref_updates:
- ref_orig = set([(x.get('uuid'), tuple(x.get('to', [])), x.get('attr')) for x in getattr(obj, '_original_' + ref_name, [])])
- ref_new = set([(x.get('uuid'), tuple(x.get('to', [])), x.get('attr')) for x in getattr(obj, ref_name, [])])
- for ref in ref_orig - ref_new:
- self.ref_update('instance-ip', obj.uuid, ref_name, ref[0], list(ref[1]), 'DELETE')
- for ref in ref_new - ref_orig:
- self.ref_update('instance-ip', obj.uuid, ref_name, ref[0], list(ref[1]), 'ADD', ref[2])
- obj.clear_pending_updates()
-
- return content
- #end instance_ip_update
-
- def instance_ips_list(self, back_ref_id = None, obj_uuids = None, fields = None, detail = False, count = False, filters = None):
- """List all instance-ips."""
- return self.resource_list('instance-ip', back_ref_id = back_ref_id, obj_uuids=obj_uuids, fields=fields, detail=detail, count=count, filters=filters)
- #end instance_ips_list
-
- def instance_ip_delete(self, fq_name = None, id = None, ifmap_id = None):
- """Delete instance-ip from the system.
-
- :param fq_name: Fully qualified name of instance-ip
- :param id: UUID of instance-ip
- :param ifmap_id: IFMAP id of instance-ip
-
- """
- (args_ok, result) = self._read_args_to_id(obj_type = 'instance-ip', fq_name = fq_name, id = id, ifmap_id = ifmap_id)
- if not args_ok:
- return result
-
- id = result
- uri = vnc_api.gen.resource_client.InstanceIp.resource_uri_base['instance-ip'] + '/' + id
-
- content = self._request_server(rest.OP_DELETE, uri)
- #end instance_ip_delete
-
- def get_default_instance_ip_id(self):
- """Return UUID of default instance-ip."""
- return self.fq_name_to_id('instance-ip', vnc_api.gen.resource_client.InstanceIp().get_fq_name())
- #end get_default_instance_ip_delete
-
- def network_policy_create(self, obj):
- """Create new network-policy.
-
- :param obj: :class:`.NetworkPolicy` object
-
- """
- obj._pending_field_updates |= obj._pending_ref_updates
- obj._pending_ref_updates = set([])
- # Ignore fields with None value in json representation
- json_param = json.dumps(obj, default = self._obj_serializer)
- json_body = '{"network-policy":' + json_param + '}'
- content = self._request_server(rest.OP_POST,
- vnc_api.gen.resource_client.NetworkPolicy.create_uri,
- data = json_body)
-
- network_policy_dict = json.loads(content)['network-policy']
- obj.uuid = network_policy_dict['uuid']
- if 'parent_uuid' in network_policy_dict:
- obj.parent_uuid = network_policy_dict['parent_uuid']
-
- obj.set_server_conn(self)
-
- return obj.uuid
- #end network_policy_create
-
- def network_policy_read(self, fq_name = None, fq_name_str = None, id = None, ifmap_id = None, fields = None):
- """Return network-policy information.
-
- :param fq_name: Fully qualified name of network-policy
- :param fq_name_str: Fully qualified name string of network-policy
- :param id: UUID of network-policy
- :param ifmap_id: IFMAP id of network-policy
- :returns: :class:`.NetworkPolicy` object
-
- """
- (args_ok, result) = self._read_args_to_id('network-policy', fq_name, fq_name_str, id, ifmap_id)
- if not args_ok:
- return result
-
- id = result
- uri = vnc_api.gen.resource_client.NetworkPolicy.resource_uri_base['network-policy'] + '/' + id
-
- if fields:
- comma_sep_fields = ','.join(f for f in fields)
- query_params = {'fields': comma_sep_fields}
- else:
- query_params = {'exclude_back_refs':True,
- 'exclude_children':True,}
- content = self._request_server(rest.OP_GET, uri, query_params)
-
- obj_dict = json.loads(content)['network-policy']
- network_policy_obj = vnc_api.gen.resource_client.NetworkPolicy.from_dict(**obj_dict)
- network_policy_obj.clear_pending_updates()
- network_policy_obj.set_server_conn(self)
-
- return network_policy_obj
- #end network_policy_read
-
- def network_policy_update(self, obj):
- """Update network-policy.
-
- :param obj: :class:`.NetworkPolicy` object
-
- """
- # Read in uuid from api-server if not specified in obj
- if not obj.uuid:
- obj.uuid = self.fq_name_to_id('network-policy', obj.get_fq_name())
-
- # Ignore fields with None value in json representation
- json_param = json.dumps(obj, default = self._obj_serializer)
- json_body = '{"network-policy":' + json_param + '}'
-
- id = obj.uuid
- uri = vnc_api.gen.resource_client.NetworkPolicy.resource_uri_base['network-policy'] + '/' + id
- content = self._request_server(rest.OP_PUT, uri, data = json_body)
- for ref_name in obj._pending_ref_updates:
- ref_orig = set([(x.get('uuid'), tuple(x.get('to', [])), x.get('attr')) for x in getattr(obj, '_original_' + ref_name, [])])
- ref_new = set([(x.get('uuid'), tuple(x.get('to', [])), x.get('attr')) for x in getattr(obj, ref_name, [])])
- for ref in ref_orig - ref_new:
- self.ref_update('network-policy', obj.uuid, ref_name, ref[0], list(ref[1]), 'DELETE')
- for ref in ref_new - ref_orig:
- self.ref_update('network-policy', obj.uuid, ref_name, ref[0], list(ref[1]), 'ADD', ref[2])
- obj.clear_pending_updates()
-
- return content
- #end network_policy_update
-
- def network_policys_list(self, parent_id = None, parent_fq_name = None, obj_uuids = None, fields = None, detail = False, count = False, filters = None):
- """List all network-policys.
-
- :param parent_id: UUID of parent as optional search filter
- :param parent_fq_name: full qualified name of parent as optional search filter
- :returns: list of :class:`.NetworkPolicy` objects
-
- """
- return self.resource_list('network-policy', parent_id = parent_id, parent_fq_name = parent_fq_name, obj_uuids=obj_uuids, fields=fields, detail=detail, count=count, filters=filters)
- #end network_policys_list
-
- def network_policy_delete(self, fq_name = None, id = None, ifmap_id = None):
- """Delete network-policy from the system.
-
- :param fq_name: Fully qualified name of network-policy
- :param id: UUID of network-policy
- :param ifmap_id: IFMAP id of network-policy
-
- """
- (args_ok, result) = self._read_args_to_id(obj_type = 'network-policy', fq_name = fq_name, id = id, ifmap_id = ifmap_id)
- if not args_ok:
- return result
-
- id = result
- uri = vnc_api.gen.resource_client.NetworkPolicy.resource_uri_base['network-policy'] + '/' + id
-
- content = self._request_server(rest.OP_DELETE, uri)
- #end network_policy_delete
-
- def get_default_network_policy_id(self):
- """Return UUID of default network-policy."""
- return self.fq_name_to_id('network-policy', vnc_api.gen.resource_client.NetworkPolicy().get_fq_name())
- #end get_default_network_policy_delete
-
- def loadbalancer_pool_create(self, obj):
- """Create new loadbalancer-pool.
-
- :param obj: :class:`.LoadbalancerPool` object
-
- """
- obj._pending_field_updates |= obj._pending_ref_updates
- obj._pending_ref_updates = set([])
- # Ignore fields with None value in json representation
- json_param = json.dumps(obj, default = self._obj_serializer)
- json_body = '{"loadbalancer-pool":' + json_param + '}'
- content = self._request_server(rest.OP_POST,
- vnc_api.gen.resource_client.LoadbalancerPool.create_uri,
- data = json_body)
-
- loadbalancer_pool_dict = json.loads(content)['loadbalancer-pool']
- obj.uuid = loadbalancer_pool_dict['uuid']
- if 'parent_uuid' in loadbalancer_pool_dict:
- obj.parent_uuid = loadbalancer_pool_dict['parent_uuid']
-
- obj.set_server_conn(self)
-
- return obj.uuid
- #end loadbalancer_pool_create
-
- def loadbalancer_pool_read(self, fq_name = None, fq_name_str = None, id = None, ifmap_id = None, fields = None):
- """Return loadbalancer-pool information.
-
- :param fq_name: Fully qualified name of loadbalancer-pool
- :param fq_name_str: Fully qualified name string of loadbalancer-pool
- :param id: UUID of loadbalancer-pool
- :param ifmap_id: IFMAP id of loadbalancer-pool
- :returns: :class:`.LoadbalancerPool` object
-
- """
- (args_ok, result) = self._read_args_to_id('loadbalancer-pool', fq_name, fq_name_str, id, ifmap_id)
- if not args_ok:
- return result
-
- id = result
- uri = vnc_api.gen.resource_client.LoadbalancerPool.resource_uri_base['loadbalancer-pool'] + '/' + id
-
- if fields:
- comma_sep_fields = ','.join(f for f in fields)
- query_params = {'fields': comma_sep_fields}
- else:
- query_params = {'exclude_back_refs':True,
- 'exclude_children':True,}
- content = self._request_server(rest.OP_GET, uri, query_params)
-
- obj_dict = json.loads(content)['loadbalancer-pool']
- loadbalancer_pool_obj = vnc_api.gen.resource_client.LoadbalancerPool.from_dict(**obj_dict)
- loadbalancer_pool_obj.clear_pending_updates()
- loadbalancer_pool_obj.set_server_conn(self)
-
- return loadbalancer_pool_obj
- #end loadbalancer_pool_read
-
- def loadbalancer_pool_update(self, obj):
- """Update loadbalancer-pool.
-
- :param obj: :class:`.LoadbalancerPool` object
-
- """
- # Read in uuid from api-server if not specified in obj
- if not obj.uuid:
- obj.uuid = self.fq_name_to_id('loadbalancer-pool', obj.get_fq_name())
-
- # Ignore fields with None value in json representation
- json_param = json.dumps(obj, default = self._obj_serializer)
- json_body = '{"loadbalancer-pool":' + json_param + '}'
-
- id = obj.uuid
- uri = vnc_api.gen.resource_client.LoadbalancerPool.resource_uri_base['loadbalancer-pool'] + '/' + id
- content = self._request_server(rest.OP_PUT, uri, data = json_body)
- for ref_name in obj._pending_ref_updates:
- ref_orig = set([(x.get('uuid'), tuple(x.get('to', [])), x.get('attr')) for x in getattr(obj, '_original_' + ref_name, [])])
- ref_new = set([(x.get('uuid'), tuple(x.get('to', [])), x.get('attr')) for x in getattr(obj, ref_name, [])])
- for ref in ref_orig - ref_new:
- self.ref_update('loadbalancer-pool', obj.uuid, ref_name, ref[0], list(ref[1]), 'DELETE')
- for ref in ref_new - ref_orig:
- self.ref_update('loadbalancer-pool', obj.uuid, ref_name, ref[0], list(ref[1]), 'ADD', ref[2])
- obj.clear_pending_updates()
-
- return content
- #end loadbalancer_pool_update
-
- def loadbalancer_pools_list(self, parent_id = None, parent_fq_name = None, back_ref_id = None, obj_uuids = None, fields = None, detail = False, count = False, filters = None):
- """List all loadbalancer-pools.
-
- :param parent_id: UUID of parent as optional search filter
- :param parent_fq_name: full qualified name of parent as optional search filter
- :returns: list of :class:`.LoadbalancerPool` objects
-
- """
- return self.resource_list('loadbalancer-pool', parent_id = parent_id, parent_fq_name = parent_fq_name, back_ref_id = back_ref_id, obj_uuids=obj_uuids, fields=fields, detail=detail, count=count, filters=filters)
- #end loadbalancer_pools_list
-
- def loadbalancer_pool_delete(self, fq_name = None, id = None, ifmap_id = None):
- """Delete loadbalancer-pool from the system.
-
- :param fq_name: Fully qualified name of loadbalancer-pool
- :param id: UUID of loadbalancer-pool
- :param ifmap_id: IFMAP id of loadbalancer-pool
-
- """
- (args_ok, result) = self._read_args_to_id(obj_type = 'loadbalancer-pool', fq_name = fq_name, id = id, ifmap_id = ifmap_id)
- if not args_ok:
- return result
-
- id = result
- uri = vnc_api.gen.resource_client.LoadbalancerPool.resource_uri_base['loadbalancer-pool'] + '/' + id
-
- content = self._request_server(rest.OP_DELETE, uri)
- #end loadbalancer_pool_delete
-
- def get_default_loadbalancer_pool_id(self):
- """Return UUID of default loadbalancer-pool."""
- return self.fq_name_to_id('loadbalancer-pool', vnc_api.gen.resource_client.LoadbalancerPool().get_fq_name())
- #end get_default_loadbalancer_pool_delete
-
- def virtual_DNS_record_create(self, obj):
- """Create new virtual-DNS-record.
-
- :param obj: :class:`.VirtualDnsRecord` object
-
- """
- obj._pending_field_updates |= obj._pending_ref_updates
- obj._pending_ref_updates = set([])
- # Ignore fields with None value in json representation
- json_param = json.dumps(obj, default = self._obj_serializer)
- json_body = '{"virtual-DNS-record":' + json_param + '}'
- content = self._request_server(rest.OP_POST,
- vnc_api.gen.resource_client.VirtualDnsRecord.create_uri,
- data = json_body)
-
- virtual_DNS_record_dict = json.loads(content)['virtual-DNS-record']
- obj.uuid = virtual_DNS_record_dict['uuid']
- if 'parent_uuid' in virtual_DNS_record_dict:
- obj.parent_uuid = virtual_DNS_record_dict['parent_uuid']
-
- obj.set_server_conn(self)
-
- return obj.uuid
- #end virtual_DNS_record_create
-
- def virtual_DNS_record_read(self, fq_name = None, fq_name_str = None, id = None, ifmap_id = None, fields = None):
- """Return virtual-DNS-record information.
-
- :param fq_name: Fully qualified name of virtual-DNS-record
- :param fq_name_str: Fully qualified name string of virtual-DNS-record
- :param id: UUID of virtual-DNS-record
- :param ifmap_id: IFMAP id of virtual-DNS-record
- :returns: :class:`.VirtualDnsRecord` object
-
- """
- (args_ok, result) = self._read_args_to_id('virtual-DNS-record', fq_name, fq_name_str, id, ifmap_id)
- if not args_ok:
- return result
-
- id = result
- uri = vnc_api.gen.resource_client.VirtualDnsRecord.resource_uri_base['virtual-DNS-record'] + '/' + id
-
- if fields:
- comma_sep_fields = ','.join(f for f in fields)
- query_params = {'fields': comma_sep_fields}
- else:
- query_params = {'exclude_back_refs':True,
- 'exclude_children':True,}
- content = self._request_server(rest.OP_GET, uri, query_params)
-
- obj_dict = json.loads(content)['virtual-DNS-record']
- virtual_DNS_record_obj = vnc_api.gen.resource_client.VirtualDnsRecord.from_dict(**obj_dict)
- virtual_DNS_record_obj.clear_pending_updates()
- virtual_DNS_record_obj.set_server_conn(self)
-
- return virtual_DNS_record_obj
- #end virtual_DNS_record_read
-
- def virtual_DNS_record_update(self, obj):
- """Update virtual-DNS-record.
-
- :param obj: :class:`.VirtualDnsRecord` object
-
- """
- # Read in uuid from api-server if not specified in obj
- if not obj.uuid:
- obj.uuid = self.fq_name_to_id('virtual-DNS-record', obj.get_fq_name())
-
- # Ignore fields with None value in json representation
- json_param = json.dumps(obj, default = self._obj_serializer)
- json_body = '{"virtual-DNS-record":' + json_param + '}'
-
- id = obj.uuid
- uri = vnc_api.gen.resource_client.VirtualDnsRecord.resource_uri_base['virtual-DNS-record'] + '/' + id
- content = self._request_server(rest.OP_PUT, uri, data = json_body)
- for ref_name in obj._pending_ref_updates:
- ref_orig = set([(x.get('uuid'), tuple(x.get('to', [])), x.get('attr')) for x in getattr(obj, '_original_' + ref_name, [])])
- ref_new = set([(x.get('uuid'), tuple(x.get('to', [])), x.get('attr')) for x in getattr(obj, ref_name, [])])
- for ref in ref_orig - ref_new:
- self.ref_update('virtual-DNS-record', obj.uuid, ref_name, ref[0], list(ref[1]), 'DELETE')
- for ref in ref_new - ref_orig:
- self.ref_update('virtual-DNS-record', obj.uuid, ref_name, ref[0], list(ref[1]), 'ADD', ref[2])
- obj.clear_pending_updates()
-
- return content
- #end virtual_DNS_record_update
-
- def virtual_DNS_records_list(self, parent_id = None, parent_fq_name = None, obj_uuids = None, fields = None, detail = False, count = False, filters = None):
- """List all virtual-DNS-records.
-
- :param parent_id: UUID of parent as optional search filter
- :param parent_fq_name: full qualified name of parent as optional search filter
- :returns: list of :class:`.VirtualDnsRecord` objects
-
- """
- return self.resource_list('virtual-DNS-record', parent_id = parent_id, parent_fq_name = parent_fq_name, obj_uuids=obj_uuids, fields=fields, detail=detail, count=count, filters=filters)
- #end virtual_DNS_records_list
-
- def virtual_DNS_record_delete(self, fq_name = None, id = None, ifmap_id = None):
- """Delete virtual-DNS-record from the system.
-
- :param fq_name: Fully qualified name of virtual-DNS-record
- :param id: UUID of virtual-DNS-record
- :param ifmap_id: IFMAP id of virtual-DNS-record
-
- """
- (args_ok, result) = self._read_args_to_id(obj_type = 'virtual-DNS-record', fq_name = fq_name, id = id, ifmap_id = ifmap_id)
- if not args_ok:
- return result
-
- id = result
- uri = vnc_api.gen.resource_client.VirtualDnsRecord.resource_uri_base['virtual-DNS-record'] + '/' + id
-
- content = self._request_server(rest.OP_DELETE, uri)
- #end virtual_DNS_record_delete
-
- def get_default_virtual_DNS_record_id(self):
- """Return UUID of default virtual-DNS-record."""
- return self.fq_name_to_id('virtual-DNS-record', vnc_api.gen.resource_client.VirtualDnsRecord().get_fq_name())
- #end get_default_virtual_DNS_record_delete
-
- def route_target_create(self, obj):
- """Create new route-target.
-
- :param obj: :class:`.RouteTarget` object
-
- """
- obj._pending_field_updates |= obj._pending_ref_updates
- obj._pending_ref_updates = set([])
- # Ignore fields with None value in json representation
- json_param = json.dumps(obj, default = self._obj_serializer)
- json_body = '{"route-target":' + json_param + '}'
- content = self._request_server(rest.OP_POST,
- vnc_api.gen.resource_client.RouteTarget.create_uri,
- data = json_body)
-
- route_target_dict = json.loads(content)['route-target']
- obj.uuid = route_target_dict['uuid']
- if 'parent_uuid' in route_target_dict:
- obj.parent_uuid = route_target_dict['parent_uuid']
-
- obj.set_server_conn(self)
-
- return obj.uuid
- #end route_target_create
-
- def route_target_read(self, fq_name = None, fq_name_str = None, id = None, ifmap_id = None, fields = None):
- """Return route-target information.
-
- :param fq_name: Fully qualified name of route-target
- :param fq_name_str: Fully qualified name string of route-target
- :param id: UUID of route-target
- :param ifmap_id: IFMAP id of route-target
- :returns: :class:`.RouteTarget` object
-
- """
- (args_ok, result) = self._read_args_to_id('route-target', fq_name, fq_name_str, id, ifmap_id)
- if not args_ok:
- return result
-
- id = result
- uri = vnc_api.gen.resource_client.RouteTarget.resource_uri_base['route-target'] + '/' + id
-
- if fields:
- comma_sep_fields = ','.join(f for f in fields)
- query_params = {'fields': comma_sep_fields}
- else:
- query_params = {'exclude_back_refs':True,
- 'exclude_children':True,}
- content = self._request_server(rest.OP_GET, uri, query_params)
-
- obj_dict = json.loads(content)['route-target']
- route_target_obj = vnc_api.gen.resource_client.RouteTarget.from_dict(**obj_dict)
- route_target_obj.clear_pending_updates()
- route_target_obj.set_server_conn(self)
-
- return route_target_obj
- #end route_target_read
-
- def route_target_update(self, obj):
- """Update route-target.
-
- :param obj: :class:`.RouteTarget` object
-
- """
- # Read in uuid from api-server if not specified in obj
- if not obj.uuid:
- obj.uuid = self.fq_name_to_id('route-target', obj.get_fq_name())
-
- # Ignore fields with None value in json representation
- json_param = json.dumps(obj, default = self._obj_serializer)
- json_body = '{"route-target":' + json_param + '}'
-
- id = obj.uuid
- uri = vnc_api.gen.resource_client.RouteTarget.resource_uri_base['route-target'] + '/' + id
- content = self._request_server(rest.OP_PUT, uri, data = json_body)
- for ref_name in obj._pending_ref_updates:
- ref_orig = set([(x.get('uuid'), tuple(x.get('to', [])), x.get('attr')) for x in getattr(obj, '_original_' + ref_name, [])])
- ref_new = set([(x.get('uuid'), tuple(x.get('to', [])), x.get('attr')) for x in getattr(obj, ref_name, [])])
- for ref in ref_orig - ref_new:
- self.ref_update('route-target', obj.uuid, ref_name, ref[0], list(ref[1]), 'DELETE')
- for ref in ref_new - ref_orig:
- self.ref_update('route-target', obj.uuid, ref_name, ref[0], list(ref[1]), 'ADD', ref[2])
- obj.clear_pending_updates()
-
- return content
- #end route_target_update
-
- def route_targets_list(self, obj_uuids = None, fields = None, detail = False, count = False, filters = None):
- """List all route-targets."""
- return self.resource_list('route-target', obj_uuids=obj_uuids, fields=fields, detail=detail, count=count, filters=filters)
- #end route_targets_list
-
- def route_target_delete(self, fq_name = None, id = None, ifmap_id = None):
- """Delete route-target from the system.
-
- :param fq_name: Fully qualified name of route-target
- :param id: UUID of route-target
- :param ifmap_id: IFMAP id of route-target
-
- """
- (args_ok, result) = self._read_args_to_id(obj_type = 'route-target', fq_name = fq_name, id = id, ifmap_id = ifmap_id)
- if not args_ok:
- return result
-
- id = result
- uri = vnc_api.gen.resource_client.RouteTarget.resource_uri_base['route-target'] + '/' + id
-
- content = self._request_server(rest.OP_DELETE, uri)
- #end route_target_delete
-
- def get_default_route_target_id(self):
- """Return UUID of default route-target."""
- return self.fq_name_to_id('route-target', vnc_api.gen.resource_client.RouteTarget().get_fq_name())
- #end get_default_route_target_delete
-
- def floating_ip_create(self, obj):
- """Create new floating-ip.
-
- :param obj: :class:`.FloatingIp` object
-
- """
- obj._pending_field_updates |= obj._pending_ref_updates
- obj._pending_ref_updates = set([])
- # Ignore fields with None value in json representation
- json_param = json.dumps(obj, default = self._obj_serializer)
- json_body = '{"floating-ip":' + json_param + '}'
- content = self._request_server(rest.OP_POST,
- vnc_api.gen.resource_client.FloatingIp.create_uri,
- data = json_body)
-
- floating_ip_dict = json.loads(content)['floating-ip']
- obj.uuid = floating_ip_dict['uuid']
- if 'parent_uuid' in floating_ip_dict:
- obj.parent_uuid = floating_ip_dict['parent_uuid']
-
- obj.set_server_conn(self)
-
- return obj.uuid
- #end floating_ip_create
-
- def floating_ip_read(self, fq_name = None, fq_name_str = None, id = None, ifmap_id = None, fields = None):
- """Return floating-ip information.
-
- :param fq_name: Fully qualified name of floating-ip
- :param fq_name_str: Fully qualified name string of floating-ip
- :param id: UUID of floating-ip
- :param ifmap_id: IFMAP id of floating-ip
- :returns: :class:`.FloatingIp` object
-
- """
- (args_ok, result) = self._read_args_to_id('floating-ip', fq_name, fq_name_str, id, ifmap_id)
- if not args_ok:
- return result
-
- id = result
- uri = vnc_api.gen.resource_client.FloatingIp.resource_uri_base['floating-ip'] + '/' + id
-
- if fields:
- comma_sep_fields = ','.join(f for f in fields)
- query_params = {'fields': comma_sep_fields}
- else:
- query_params = {'exclude_back_refs':True,
- 'exclude_children':True,}
- content = self._request_server(rest.OP_GET, uri, query_params)
-
- obj_dict = json.loads(content)['floating-ip']
- floating_ip_obj = vnc_api.gen.resource_client.FloatingIp.from_dict(**obj_dict)
- floating_ip_obj.clear_pending_updates()
- floating_ip_obj.set_server_conn(self)
-
- return floating_ip_obj
- #end floating_ip_read
-
- def floating_ip_update(self, obj):
- """Update floating-ip.
-
- :param obj: :class:`.FloatingIp` object
-
- """
- # Read in uuid from api-server if not specified in obj
- if not obj.uuid:
- obj.uuid = self.fq_name_to_id('floating-ip', obj.get_fq_name())
-
- # Ignore fields with None value in json representation
- json_param = json.dumps(obj, default = self._obj_serializer)
- json_body = '{"floating-ip":' + json_param + '}'
-
- id = obj.uuid
- uri = vnc_api.gen.resource_client.FloatingIp.resource_uri_base['floating-ip'] + '/' + id
- content = self._request_server(rest.OP_PUT, uri, data = json_body)
- for ref_name in obj._pending_ref_updates:
- ref_orig = set([(x.get('uuid'), tuple(x.get('to', [])), x.get('attr')) for x in getattr(obj, '_original_' + ref_name, [])])
- ref_new = set([(x.get('uuid'), tuple(x.get('to', [])), x.get('attr')) for x in getattr(obj, ref_name, [])])
- for ref in ref_orig - ref_new:
- self.ref_update('floating-ip', obj.uuid, ref_name, ref[0], list(ref[1]), 'DELETE')
- for ref in ref_new - ref_orig:
- self.ref_update('floating-ip', obj.uuid, ref_name, ref[0], list(ref[1]), 'ADD', ref[2])
- obj.clear_pending_updates()
-
- return content
- #end floating_ip_update
-
- def floating_ips_list(self, parent_id = None, parent_fq_name = None, back_ref_id = None, obj_uuids = None, fields = None, detail = False, count = False, filters = None):
- """List all floating-ips.
-
- :param parent_id: UUID of parent as optional search filter
- :param parent_fq_name: full qualified name of parent as optional search filter
- :returns: list of :class:`.FloatingIp` objects
-
- """
- return self.resource_list('floating-ip', parent_id = parent_id, parent_fq_name = parent_fq_name, back_ref_id = back_ref_id, obj_uuids=obj_uuids, fields=fields, detail=detail, count=count, filters=filters)
- #end floating_ips_list
-
- def floating_ip_delete(self, fq_name = None, id = None, ifmap_id = None):
- """Delete floating-ip from the system.
-
- :param fq_name: Fully qualified name of floating-ip
- :param id: UUID of floating-ip
- :param ifmap_id: IFMAP id of floating-ip
-
- """
- (args_ok, result) = self._read_args_to_id(obj_type = 'floating-ip', fq_name = fq_name, id = id, ifmap_id = ifmap_id)
- if not args_ok:
- return result
-
- id = result
- uri = vnc_api.gen.resource_client.FloatingIp.resource_uri_base['floating-ip'] + '/' + id
-
- content = self._request_server(rest.OP_DELETE, uri)
- #end floating_ip_delete
-
- def get_default_floating_ip_id(self):
- """Return UUID of default floating-ip."""
- return self.fq_name_to_id('floating-ip', vnc_api.gen.resource_client.FloatingIp().get_fq_name())
- #end get_default_floating_ip_delete
-
- def floating_ip_pool_create(self, obj):
- """Create new floating-ip-pool.
-
- :param obj: :class:`.FloatingIpPool` object
-
- """
- obj._pending_field_updates |= obj._pending_ref_updates
- obj._pending_ref_updates = set([])
- # Ignore fields with None value in json representation
- json_param = json.dumps(obj, default = self._obj_serializer)
- json_body = '{"floating-ip-pool":' + json_param + '}'
- content = self._request_server(rest.OP_POST,
- vnc_api.gen.resource_client.FloatingIpPool.create_uri,
- data = json_body)
-
- floating_ip_pool_dict = json.loads(content)['floating-ip-pool']
- obj.uuid = floating_ip_pool_dict['uuid']
- if 'parent_uuid' in floating_ip_pool_dict:
- obj.parent_uuid = floating_ip_pool_dict['parent_uuid']
-
- obj.set_server_conn(self)
-
- return obj.uuid
- #end floating_ip_pool_create
-
- def floating_ip_pool_read(self, fq_name = None, fq_name_str = None, id = None, ifmap_id = None, fields = None):
- """Return floating-ip-pool information.
-
- :param fq_name: Fully qualified name of floating-ip-pool
- :param fq_name_str: Fully qualified name string of floating-ip-pool
- :param id: UUID of floating-ip-pool
- :param ifmap_id: IFMAP id of floating-ip-pool
- :returns: :class:`.FloatingIpPool` object
-
- """
- (args_ok, result) = self._read_args_to_id('floating-ip-pool', fq_name, fq_name_str, id, ifmap_id)
- if not args_ok:
- return result
-
- id = result
- uri = vnc_api.gen.resource_client.FloatingIpPool.resource_uri_base['floating-ip-pool'] + '/' + id
-
- if fields:
- comma_sep_fields = ','.join(f for f in fields)
- query_params = {'fields': comma_sep_fields}
- else:
- query_params = {'exclude_back_refs':True,
- 'exclude_children':True,}
- content = self._request_server(rest.OP_GET, uri, query_params)
-
- obj_dict = json.loads(content)['floating-ip-pool']
- floating_ip_pool_obj = vnc_api.gen.resource_client.FloatingIpPool.from_dict(**obj_dict)
- floating_ip_pool_obj.clear_pending_updates()
- floating_ip_pool_obj.set_server_conn(self)
-
- return floating_ip_pool_obj
- #end floating_ip_pool_read
-
- def floating_ip_pool_update(self, obj):
- """Update floating-ip-pool.
-
- :param obj: :class:`.FloatingIpPool` object
-
- """
- # Read in uuid from api-server if not specified in obj
- if not obj.uuid:
- obj.uuid = self.fq_name_to_id('floating-ip-pool', obj.get_fq_name())
-
- # Ignore fields with None value in json representation
- json_param = json.dumps(obj, default = self._obj_serializer)
- json_body = '{"floating-ip-pool":' + json_param + '}'
-
- id = obj.uuid
- uri = vnc_api.gen.resource_client.FloatingIpPool.resource_uri_base['floating-ip-pool'] + '/' + id
- content = self._request_server(rest.OP_PUT, uri, data = json_body)
- for ref_name in obj._pending_ref_updates:
- ref_orig = set([(x.get('uuid'), tuple(x.get('to', [])), x.get('attr')) for x in getattr(obj, '_original_' + ref_name, [])])
- ref_new = set([(x.get('uuid'), tuple(x.get('to', [])), x.get('attr')) for x in getattr(obj, ref_name, [])])
- for ref in ref_orig - ref_new:
- self.ref_update('floating-ip-pool', obj.uuid, ref_name, ref[0], list(ref[1]), 'DELETE')
- for ref in ref_new - ref_orig:
- self.ref_update('floating-ip-pool', obj.uuid, ref_name, ref[0], list(ref[1]), 'ADD', ref[2])
- obj.clear_pending_updates()
-
- return content
- #end floating_ip_pool_update
-
- def floating_ip_pools_list(self, parent_id = None, parent_fq_name = None, obj_uuids = None, fields = None, detail = False, count = False, filters = None):
- """List all floating-ip-pools.
-
- :param parent_id: UUID of parent as optional search filter
- :param parent_fq_name: full qualified name of parent as optional search filter
- :returns: list of :class:`.FloatingIpPool` objects
-
- """
- return self.resource_list('floating-ip-pool', parent_id = parent_id, parent_fq_name = parent_fq_name, obj_uuids=obj_uuids, fields=fields, detail=detail, count=count, filters=filters)
- #end floating_ip_pools_list
-
- def floating_ip_pool_delete(self, fq_name = None, id = None, ifmap_id = None):
- """Delete floating-ip-pool from the system.
-
- :param fq_name: Fully qualified name of floating-ip-pool
- :param id: UUID of floating-ip-pool
- :param ifmap_id: IFMAP id of floating-ip-pool
-
- """
- (args_ok, result) = self._read_args_to_id(obj_type = 'floating-ip-pool', fq_name = fq_name, id = id, ifmap_id = ifmap_id)
- if not args_ok:
- return result
-
- id = result
- uri = vnc_api.gen.resource_client.FloatingIpPool.resource_uri_base['floating-ip-pool'] + '/' + id
-
- content = self._request_server(rest.OP_DELETE, uri)
- #end floating_ip_pool_delete
-
- def get_default_floating_ip_pool_id(self):
- """Return UUID of default floating-ip-pool."""
- return self.fq_name_to_id('floating-ip-pool', vnc_api.gen.resource_client.FloatingIpPool().get_fq_name())
- #end get_default_floating_ip_pool_delete
-
- def physical_router_create(self, obj):
- """Create new physical-router.
-
- :param obj: :class:`.PhysicalRouter` object
-
- """
- obj._pending_field_updates |= obj._pending_ref_updates
- obj._pending_ref_updates = set([])
- # Ignore fields with None value in json representation
- json_param = json.dumps(obj, default = self._obj_serializer)
- json_body = '{"physical-router":' + json_param + '}'
- content = self._request_server(rest.OP_POST,
- vnc_api.gen.resource_client.PhysicalRouter.create_uri,
- data = json_body)
-
- physical_router_dict = json.loads(content)['physical-router']
- obj.uuid = physical_router_dict['uuid']
- if 'parent_uuid' in physical_router_dict:
- obj.parent_uuid = physical_router_dict['parent_uuid']
-
- obj.set_server_conn(self)
-
- return obj.uuid
- #end physical_router_create
-
- def physical_router_read(self, fq_name = None, fq_name_str = None, id = None, ifmap_id = None, fields = None):
- """Return physical-router information.
-
- :param fq_name: Fully qualified name of physical-router
- :param fq_name_str: Fully qualified name string of physical-router
- :param id: UUID of physical-router
- :param ifmap_id: IFMAP id of physical-router
- :returns: :class:`.PhysicalRouter` object
-
- """
- (args_ok, result) = self._read_args_to_id('physical-router', fq_name, fq_name_str, id, ifmap_id)
- if not args_ok:
- return result
-
- id = result
- uri = vnc_api.gen.resource_client.PhysicalRouter.resource_uri_base['physical-router'] + '/' + id
-
- if fields:
- comma_sep_fields = ','.join(f for f in fields)
- query_params = {'fields': comma_sep_fields}
- else:
- query_params = {'exclude_back_refs':True,
- 'exclude_children':True,}
- content = self._request_server(rest.OP_GET, uri, query_params)
-
- obj_dict = json.loads(content)['physical-router']
- physical_router_obj = vnc_api.gen.resource_client.PhysicalRouter.from_dict(**obj_dict)
- physical_router_obj.clear_pending_updates()
- physical_router_obj.set_server_conn(self)
-
- return physical_router_obj
- #end physical_router_read
-
- def physical_router_update(self, obj):
- """Update physical-router.
-
- :param obj: :class:`.PhysicalRouter` object
-
- """
- # Read in uuid from api-server if not specified in obj
- if not obj.uuid:
- obj.uuid = self.fq_name_to_id('physical-router', obj.get_fq_name())
-
- # Ignore fields with None value in json representation
- json_param = json.dumps(obj, default = self._obj_serializer)
- json_body = '{"physical-router":' + json_param + '}'
-
- id = obj.uuid
- uri = vnc_api.gen.resource_client.PhysicalRouter.resource_uri_base['physical-router'] + '/' + id
- content = self._request_server(rest.OP_PUT, uri, data = json_body)
- for ref_name in obj._pending_ref_updates:
- ref_orig = set([(x.get('uuid'), tuple(x.get('to', [])), x.get('attr')) for x in getattr(obj, '_original_' + ref_name, [])])
- ref_new = set([(x.get('uuid'), tuple(x.get('to', [])), x.get('attr')) for x in getattr(obj, ref_name, [])])
- for ref in ref_orig - ref_new:
- self.ref_update('physical-router', obj.uuid, ref_name, ref[0], list(ref[1]), 'DELETE')
- for ref in ref_new - ref_orig:
- self.ref_update('physical-router', obj.uuid, ref_name, ref[0], list(ref[1]), 'ADD', ref[2])
- obj.clear_pending_updates()
-
- return content
- #end physical_router_update
-
- def physical_routers_list(self, parent_id = None, parent_fq_name = None, back_ref_id = None, obj_uuids = None, fields = None, detail = False, count = False, filters = None):
- """List all physical-routers.
-
- :param parent_id: UUID of parent as optional search filter
- :param parent_fq_name: full qualified name of parent as optional search filter
- :returns: list of :class:`.PhysicalRouter` objects
-
- """
- return self.resource_list('physical-router', parent_id = parent_id, parent_fq_name = parent_fq_name, back_ref_id = back_ref_id, obj_uuids=obj_uuids, fields=fields, detail=detail, count=count, filters=filters)
- #end physical_routers_list
-
- def physical_router_delete(self, fq_name = None, id = None, ifmap_id = None):
- """Delete physical-router from the system.
-
- :param fq_name: Fully qualified name of physical-router
- :param id: UUID of physical-router
- :param ifmap_id: IFMAP id of physical-router
-
- """
- (args_ok, result) = self._read_args_to_id(obj_type = 'physical-router', fq_name = fq_name, id = id, ifmap_id = ifmap_id)
- if not args_ok:
- return result
-
- id = result
- uri = vnc_api.gen.resource_client.PhysicalRouter.resource_uri_base['physical-router'] + '/' + id
-
- content = self._request_server(rest.OP_DELETE, uri)
- #end physical_router_delete
-
- def get_default_physical_router_id(self):
- """Return UUID of default physical-router."""
- return self.fq_name_to_id('physical-router', vnc_api.gen.resource_client.PhysicalRouter().get_fq_name())
- #end get_default_physical_router_delete
-
- def bgp_router_create(self, obj):
- """Create new bgp-router.
-
- :param obj: :class:`.BgpRouter` object
-
- """
- obj._pending_field_updates |= obj._pending_ref_updates
- obj._pending_ref_updates = set([])
- # Ignore fields with None value in json representation
- json_param = json.dumps(obj, default = self._obj_serializer)
- json_body = '{"bgp-router":' + json_param + '}'
- content = self._request_server(rest.OP_POST,
- vnc_api.gen.resource_client.BgpRouter.create_uri,
- data = json_body)
-
- bgp_router_dict = json.loads(content)['bgp-router']
- obj.uuid = bgp_router_dict['uuid']
- if 'parent_uuid' in bgp_router_dict:
- obj.parent_uuid = bgp_router_dict['parent_uuid']
-
- obj.set_server_conn(self)
-
- return obj.uuid
- #end bgp_router_create
-
- def bgp_router_read(self, fq_name = None, fq_name_str = None, id = None, ifmap_id = None, fields = None):
- """Return bgp-router information.
-
- :param fq_name: Fully qualified name of bgp-router
- :param fq_name_str: Fully qualified name string of bgp-router
- :param id: UUID of bgp-router
- :param ifmap_id: IFMAP id of bgp-router
- :returns: :class:`.BgpRouter` object
-
- """
- (args_ok, result) = self._read_args_to_id('bgp-router', fq_name, fq_name_str, id, ifmap_id)
- if not args_ok:
- return result
-
- id = result
- uri = vnc_api.gen.resource_client.BgpRouter.resource_uri_base['bgp-router'] + '/' + id
-
- if fields:
- comma_sep_fields = ','.join(f for f in fields)
- query_params = {'fields': comma_sep_fields}
- else:
- query_params = {'exclude_back_refs':True,
- 'exclude_children':True,}
- content = self._request_server(rest.OP_GET, uri, query_params)
-
- obj_dict = json.loads(content)['bgp-router']
- bgp_router_obj = vnc_api.gen.resource_client.BgpRouter.from_dict(**obj_dict)
- bgp_router_obj.clear_pending_updates()
- bgp_router_obj.set_server_conn(self)
-
- return bgp_router_obj
- #end bgp_router_read
-
- def bgp_router_update(self, obj):
- """Update bgp-router.
-
- :param obj: :class:`.BgpRouter` object
-
- """
- # Read in uuid from api-server if not specified in obj
- if not obj.uuid:
- obj.uuid = self.fq_name_to_id('bgp-router', obj.get_fq_name())
-
- # Ignore fields with None value in json representation
- json_param = json.dumps(obj, default = self._obj_serializer)
- json_body = '{"bgp-router":' + json_param + '}'
-
- id = obj.uuid
- uri = vnc_api.gen.resource_client.BgpRouter.resource_uri_base['bgp-router'] + '/' + id
- content = self._request_server(rest.OP_PUT, uri, data = json_body)
- for ref_name in obj._pending_ref_updates:
- ref_orig = set([(x.get('uuid'), tuple(x.get('to', [])), x.get('attr')) for x in getattr(obj, '_original_' + ref_name, [])])
- ref_new = set([(x.get('uuid'), tuple(x.get('to', [])), x.get('attr')) for x in getattr(obj, ref_name, [])])
- for ref in ref_orig - ref_new:
- self.ref_update('bgp-router', obj.uuid, ref_name, ref[0], list(ref[1]), 'DELETE')
- for ref in ref_new - ref_orig:
- self.ref_update('bgp-router', obj.uuid, ref_name, ref[0], list(ref[1]), 'ADD', ref[2])
- obj.clear_pending_updates()
-
- return content
- #end bgp_router_update
-
- def bgp_routers_list(self, parent_id = None, parent_fq_name = None, back_ref_id = None, obj_uuids = None, fields = None, detail = False, count = False, filters = None):
- """List all bgp-routers.
-
- :param parent_id: UUID of parent as optional search filter
- :param parent_fq_name: full qualified name of parent as optional search filter
- :returns: list of :class:`.BgpRouter` objects
-
- """
- return self.resource_list('bgp-router', parent_id = parent_id, parent_fq_name = parent_fq_name, back_ref_id = back_ref_id, obj_uuids=obj_uuids, fields=fields, detail=detail, count=count, filters=filters)
- #end bgp_routers_list
-
- def bgp_router_delete(self, fq_name = None, id = None, ifmap_id = None):
- """Delete bgp-router from the system.
-
- :param fq_name: Fully qualified name of bgp-router
- :param id: UUID of bgp-router
- :param ifmap_id: IFMAP id of bgp-router
-
- """
- (args_ok, result) = self._read_args_to_id(obj_type = 'bgp-router', fq_name = fq_name, id = id, ifmap_id = ifmap_id)
- if not args_ok:
- return result
-
- id = result
- uri = vnc_api.gen.resource_client.BgpRouter.resource_uri_base['bgp-router'] + '/' + id
-
- content = self._request_server(rest.OP_DELETE, uri)
- #end bgp_router_delete
-
- def get_default_bgp_router_id(self):
- """Return UUID of default bgp-router."""
- return self.fq_name_to_id('bgp-router', vnc_api.gen.resource_client.BgpRouter().get_fq_name())
- #end get_default_bgp_router_delete
-
- def virtual_router_create(self, obj):
- """Create new virtual-router.
-
- :param obj: :class:`.VirtualRouter` object
-
- """
- obj._pending_field_updates |= obj._pending_ref_updates
- obj._pending_ref_updates = set([])
- # Ignore fields with None value in json representation
- json_param = json.dumps(obj, default = self._obj_serializer)
- json_body = '{"virtual-router":' + json_param + '}'
- content = self._request_server(rest.OP_POST,
- vnc_api.gen.resource_client.VirtualRouter.create_uri,
- data = json_body)
-
- virtual_router_dict = json.loads(content)['virtual-router']
- obj.uuid = virtual_router_dict['uuid']
- if 'parent_uuid' in virtual_router_dict:
- obj.parent_uuid = virtual_router_dict['parent_uuid']
-
- obj.set_server_conn(self)
-
- return obj.uuid
- #end virtual_router_create
-
- def virtual_router_read(self, fq_name = None, fq_name_str = None, id = None, ifmap_id = None, fields = None):
- """Return virtual-router information.
-
- :param fq_name: Fully qualified name of virtual-router
- :param fq_name_str: Fully qualified name string of virtual-router
- :param id: UUID of virtual-router
- :param ifmap_id: IFMAP id of virtual-router
- :returns: :class:`.VirtualRouter` object
-
- """
- (args_ok, result) = self._read_args_to_id('virtual-router', fq_name, fq_name_str, id, ifmap_id)
- if not args_ok:
- return result
-
- id = result
- uri = vnc_api.gen.resource_client.VirtualRouter.resource_uri_base['virtual-router'] + '/' + id
-
- if fields:
- comma_sep_fields = ','.join(f for f in fields)
- query_params = {'fields': comma_sep_fields}
- else:
- query_params = {'exclude_back_refs':True,
- 'exclude_children':True,}
- content = self._request_server(rest.OP_GET, uri, query_params)
-
- obj_dict = json.loads(content)['virtual-router']
- virtual_router_obj = vnc_api.gen.resource_client.VirtualRouter.from_dict(**obj_dict)
- virtual_router_obj.clear_pending_updates()
- virtual_router_obj.set_server_conn(self)
-
- return virtual_router_obj
- #end virtual_router_read
-
- def virtual_router_update(self, obj):
- """Update virtual-router.
-
- :param obj: :class:`.VirtualRouter` object
-
- """
- # Read in uuid from api-server if not specified in obj
- if not obj.uuid:
- obj.uuid = self.fq_name_to_id('virtual-router', obj.get_fq_name())
-
- # Ignore fields with None value in json representation
- json_param = json.dumps(obj, default = self._obj_serializer)
- json_body = '{"virtual-router":' + json_param + '}'
-
- id = obj.uuid
- uri = vnc_api.gen.resource_client.VirtualRouter.resource_uri_base['virtual-router'] + '/' + id
- content = self._request_server(rest.OP_PUT, uri, data = json_body)
- for ref_name in obj._pending_ref_updates:
- ref_orig = set([(x.get('uuid'), tuple(x.get('to', [])), x.get('attr')) for x in getattr(obj, '_original_' + ref_name, [])])
- ref_new = set([(x.get('uuid'), tuple(x.get('to', [])), x.get('attr')) for x in getattr(obj, ref_name, [])])
- for ref in ref_orig - ref_new:
- self.ref_update('virtual-router', obj.uuid, ref_name, ref[0], list(ref[1]), 'DELETE')
- for ref in ref_new - ref_orig:
- self.ref_update('virtual-router', obj.uuid, ref_name, ref[0], list(ref[1]), 'ADD', ref[2])
- obj.clear_pending_updates()
-
- return content
- #end virtual_router_update
-
- def virtual_routers_list(self, parent_id = None, parent_fq_name = None, back_ref_id = None, obj_uuids = None, fields = None, detail = False, count = False, filters = None):
- """List all virtual-routers.
-
- :param parent_id: UUID of parent as optional search filter
- :param parent_fq_name: full qualified name of parent as optional search filter
- :returns: list of :class:`.VirtualRouter` objects
-
- """
- return self.resource_list('virtual-router', parent_id = parent_id, parent_fq_name = parent_fq_name, back_ref_id = back_ref_id, obj_uuids=obj_uuids, fields=fields, detail=detail, count=count, filters=filters)
- #end virtual_routers_list
-
- def virtual_router_delete(self, fq_name = None, id = None, ifmap_id = None):
- """Delete virtual-router from the system.
-
- :param fq_name: Fully qualified name of virtual-router
- :param id: UUID of virtual-router
- :param ifmap_id: IFMAP id of virtual-router
-
- """
- (args_ok, result) = self._read_args_to_id(obj_type = 'virtual-router', fq_name = fq_name, id = id, ifmap_id = ifmap_id)
- if not args_ok:
- return result
-
- id = result
- uri = vnc_api.gen.resource_client.VirtualRouter.resource_uri_base['virtual-router'] + '/' + id
-
- content = self._request_server(rest.OP_DELETE, uri)
- #end virtual_router_delete
-
- def get_default_virtual_router_id(self):
- """Return UUID of default virtual-router."""
- return self.fq_name_to_id('virtual-router', vnc_api.gen.resource_client.VirtualRouter().get_fq_name())
- #end get_default_virtual_router_delete
-
- def config_root_create(self, obj):
- """Create new config-root.
-
- :param obj: :class:`.ConfigRoot` object
-
- """
- obj._pending_field_updates |= obj._pending_ref_updates
- obj._pending_ref_updates = set([])
- # Ignore fields with None value in json representation
- json_param = json.dumps(obj, default = self._obj_serializer)
- json_body = '{"config-root":' + json_param + '}'
- content = self._request_server(rest.OP_POST,
- vnc_api.gen.resource_client.ConfigRoot.create_uri,
- data = json_body)
-
- config_root_dict = json.loads(content)['config-root']
- obj.uuid = config_root_dict['uuid']
- if 'parent_uuid' in config_root_dict:
- obj.parent_uuid = config_root_dict['parent_uuid']
-
- obj.set_server_conn(self)
-
- return obj.uuid
- #end config_root_create
-
- def config_root_read(self, fq_name = None, fq_name_str = None, id = None, ifmap_id = None, fields = None):
- """Return config-root information.
-
- :param fq_name: Fully qualified name of config-root
- :param fq_name_str: Fully qualified name string of config-root
- :param id: UUID of config-root
- :param ifmap_id: IFMAP id of config-root
- :returns: :class:`.ConfigRoot` object
-
- """
- (args_ok, result) = self._read_args_to_id('config-root', fq_name, fq_name_str, id, ifmap_id)
- if not args_ok:
- return result
-
- id = result
- uri = vnc_api.gen.resource_client.ConfigRoot.resource_uri_base['config-root'] + '/' + id
-
- if fields:
- comma_sep_fields = ','.join(f for f in fields)
- query_params = {'fields': comma_sep_fields}
- else:
- query_params = {'exclude_back_refs':True,
- 'exclude_children':True,}
- content = self._request_server(rest.OP_GET, uri, query_params)
-
- obj_dict = json.loads(content)['config-root']
- config_root_obj = vnc_api.gen.resource_client.ConfigRoot.from_dict(**obj_dict)
- config_root_obj.clear_pending_updates()
- config_root_obj.set_server_conn(self)
-
- return config_root_obj
- #end config_root_read
-
- def config_root_update(self, obj):
- """Update config-root.
-
- :param obj: :class:`.ConfigRoot` object
-
- """
- # Read in uuid from api-server if not specified in obj
- if not obj.uuid:
- obj.uuid = self.fq_name_to_id('config-root', obj.get_fq_name())
-
- # Ignore fields with None value in json representation
- json_param = json.dumps(obj, default = self._obj_serializer)
- json_body = '{"config-root":' + json_param + '}'
-
- id = obj.uuid
- uri = vnc_api.gen.resource_client.ConfigRoot.resource_uri_base['config-root'] + '/' + id
- content = self._request_server(rest.OP_PUT, uri, data = json_body)
- for ref_name in obj._pending_ref_updates:
- ref_orig = set([(x.get('uuid'), tuple(x.get('to', [])), x.get('attr')) for x in getattr(obj, '_original_' + ref_name, [])])
- ref_new = set([(x.get('uuid'), tuple(x.get('to', [])), x.get('attr')) for x in getattr(obj, ref_name, [])])
- for ref in ref_orig - ref_new:
- self.ref_update('config-root', obj.uuid, ref_name, ref[0], list(ref[1]), 'DELETE')
- for ref in ref_new - ref_orig:
- self.ref_update('config-root', obj.uuid, ref_name, ref[0], list(ref[1]), 'ADD', ref[2])
- obj.clear_pending_updates()
-
- return content
- #end config_root_update
-
- def config_roots_list(self, obj_uuids = None, fields = None, detail = False, count = False, filters = None):
- """List all config-roots."""
- return self.resource_list('config-root', obj_uuids=obj_uuids, fields=fields, detail=detail, count=count, filters=filters)
- #end config_roots_list
-
- def config_root_delete(self, fq_name = None, id = None, ifmap_id = None):
- """Delete config-root from the system.
-
- :param fq_name: Fully qualified name of config-root
- :param id: UUID of config-root
- :param ifmap_id: IFMAP id of config-root
-
- """
- (args_ok, result) = self._read_args_to_id(obj_type = 'config-root', fq_name = fq_name, id = id, ifmap_id = ifmap_id)
- if not args_ok:
- return result
-
- id = result
- uri = vnc_api.gen.resource_client.ConfigRoot.resource_uri_base['config-root'] + '/' + id
-
- content = self._request_server(rest.OP_DELETE, uri)
- #end config_root_delete
-
- def get_default_config_root_id(self):
- """Return UUID of default config-root."""
- return self.fq_name_to_id('config-root', vnc_api.gen.resource_client.ConfigRoot().get_fq_name())
- #end get_default_config_root_delete
-
- def subnet_create(self, obj):
- """Create new subnet.
-
- :param obj: :class:`.Subnet` object
-
- """
- obj._pending_field_updates |= obj._pending_ref_updates
- obj._pending_ref_updates = set([])
- # Ignore fields with None value in json representation
- json_param = json.dumps(obj, default = self._obj_serializer)
- json_body = '{"subnet":' + json_param + '}'
- content = self._request_server(rest.OP_POST,
- vnc_api.gen.resource_client.Subnet.create_uri,
- data = json_body)
-
- subnet_dict = json.loads(content)['subnet']
- obj.uuid = subnet_dict['uuid']
- if 'parent_uuid' in subnet_dict:
- obj.parent_uuid = subnet_dict['parent_uuid']
-
- obj.set_server_conn(self)
-
- return obj.uuid
- #end subnet_create
-
- def subnet_read(self, fq_name = None, fq_name_str = None, id = None, ifmap_id = None, fields = None):
- """Return subnet information.
-
- :param fq_name: Fully qualified name of subnet
- :param fq_name_str: Fully qualified name string of subnet
- :param id: UUID of subnet
- :param ifmap_id: IFMAP id of subnet
- :returns: :class:`.Subnet` object
-
- """
- (args_ok, result) = self._read_args_to_id('subnet', fq_name, fq_name_str, id, ifmap_id)
- if not args_ok:
- return result
-
- id = result
- uri = vnc_api.gen.resource_client.Subnet.resource_uri_base['subnet'] + '/' + id
-
- if fields:
- comma_sep_fields = ','.join(f for f in fields)
- query_params = {'fields': comma_sep_fields}
- else:
- query_params = {'exclude_back_refs':True,
- 'exclude_children':True,}
- content = self._request_server(rest.OP_GET, uri, query_params)
-
- obj_dict = json.loads(content)['subnet']
- subnet_obj = vnc_api.gen.resource_client.Subnet.from_dict(**obj_dict)
- subnet_obj.clear_pending_updates()
- subnet_obj.set_server_conn(self)
-
- return subnet_obj
- #end subnet_read
-
- def subnet_update(self, obj):
- """Update subnet.
-
- :param obj: :class:`.Subnet` object
-
- """
- # Read in uuid from api-server if not specified in obj
- if not obj.uuid:
- obj.uuid = self.fq_name_to_id('subnet', obj.get_fq_name())
-
- # Ignore fields with None value in json representation
- json_param = json.dumps(obj, default = self._obj_serializer)
- json_body = '{"subnet":' + json_param + '}'
-
- id = obj.uuid
- uri = vnc_api.gen.resource_client.Subnet.resource_uri_base['subnet'] + '/' + id
- content = self._request_server(rest.OP_PUT, uri, data = json_body)
- for ref_name in obj._pending_ref_updates:
- ref_orig = set([(x.get('uuid'), tuple(x.get('to', [])), x.get('attr')) for x in getattr(obj, '_original_' + ref_name, [])])
- ref_new = set([(x.get('uuid'), tuple(x.get('to', [])), x.get('attr')) for x in getattr(obj, ref_name, [])])
- for ref in ref_orig - ref_new:
- self.ref_update('subnet', obj.uuid, ref_name, ref[0], list(ref[1]), 'DELETE')
- for ref in ref_new - ref_orig:
- self.ref_update('subnet', obj.uuid, ref_name, ref[0], list(ref[1]), 'ADD', ref[2])
- obj.clear_pending_updates()
-
- return content
- #end subnet_update
-
- def subnets_list(self, back_ref_id = None, obj_uuids = None, fields = None, detail = False, count = False, filters = None):
- """List all subnets."""
- return self.resource_list('subnet', back_ref_id = back_ref_id, obj_uuids=obj_uuids, fields=fields, detail=detail, count=count, filters=filters)
- #end subnets_list
-
- def subnet_delete(self, fq_name = None, id = None, ifmap_id = None):
- """Delete subnet from the system.
-
- :param fq_name: Fully qualified name of subnet
- :param id: UUID of subnet
- :param ifmap_id: IFMAP id of subnet
-
- """
- (args_ok, result) = self._read_args_to_id(obj_type = 'subnet', fq_name = fq_name, id = id, ifmap_id = ifmap_id)
- if not args_ok:
- return result
-
- id = result
- uri = vnc_api.gen.resource_client.Subnet.resource_uri_base['subnet'] + '/' + id
-
- content = self._request_server(rest.OP_DELETE, uri)
- #end subnet_delete
-
- def get_default_subnet_id(self):
- """Return UUID of default subnet."""
- return self.fq_name_to_id('subnet', vnc_api.gen.resource_client.Subnet().get_fq_name())
- #end get_default_subnet_delete
-
- def global_system_config_create(self, obj):
- """Create new global-system-config.
-
- :param obj: :class:`.GlobalSystemConfig` object
-
- """
- obj._pending_field_updates |= obj._pending_ref_updates
- obj._pending_ref_updates = set([])
- # Ignore fields with None value in json representation
- json_param = json.dumps(obj, default = self._obj_serializer)
- json_body = '{"global-system-config":' + json_param + '}'
- content = self._request_server(rest.OP_POST,
- vnc_api.gen.resource_client.GlobalSystemConfig.create_uri,
- data = json_body)
-
- global_system_config_dict = json.loads(content)['global-system-config']
- obj.uuid = global_system_config_dict['uuid']
- if 'parent_uuid' in global_system_config_dict:
- obj.parent_uuid = global_system_config_dict['parent_uuid']
-
- obj.set_server_conn(self)
-
- return obj.uuid
- #end global_system_config_create
-
- def global_system_config_read(self, fq_name = None, fq_name_str = None, id = None, ifmap_id = None, fields = None):
- """Return global-system-config information.
-
- :param fq_name: Fully qualified name of global-system-config
- :param fq_name_str: Fully qualified name string of global-system-config
- :param id: UUID of global-system-config
- :param ifmap_id: IFMAP id of global-system-config
- :returns: :class:`.GlobalSystemConfig` object
-
- """
- (args_ok, result) = self._read_args_to_id('global-system-config', fq_name, fq_name_str, id, ifmap_id)
- if not args_ok:
- return result
-
- id = result
- uri = vnc_api.gen.resource_client.GlobalSystemConfig.resource_uri_base['global-system-config'] + '/' + id
-
- if fields:
- comma_sep_fields = ','.join(f for f in fields)
- query_params = {'fields': comma_sep_fields}
- else:
- query_params = {'exclude_back_refs':True,
- 'exclude_children':True,}
- content = self._request_server(rest.OP_GET, uri, query_params)
-
- obj_dict = json.loads(content)['global-system-config']
- global_system_config_obj = vnc_api.gen.resource_client.GlobalSystemConfig.from_dict(**obj_dict)
- global_system_config_obj.clear_pending_updates()
- global_system_config_obj.set_server_conn(self)
-
- return global_system_config_obj
- #end global_system_config_read
-
- def global_system_config_update(self, obj):
- """Update global-system-config.
-
- :param obj: :class:`.GlobalSystemConfig` object
-
- """
- # Read in uuid from api-server if not specified in obj
- if not obj.uuid:
- obj.uuid = self.fq_name_to_id('global-system-config', obj.get_fq_name())
-
- # Ignore fields with None value in json representation
- json_param = json.dumps(obj, default = self._obj_serializer)
- json_body = '{"global-system-config":' + json_param + '}'
-
- id = obj.uuid
- uri = vnc_api.gen.resource_client.GlobalSystemConfig.resource_uri_base['global-system-config'] + '/' + id
- content = self._request_server(rest.OP_PUT, uri, data = json_body)
- for ref_name in obj._pending_ref_updates:
- ref_orig = set([(x.get('uuid'), tuple(x.get('to', [])), x.get('attr')) for x in getattr(obj, '_original_' + ref_name, [])])
- ref_new = set([(x.get('uuid'), tuple(x.get('to', [])), x.get('attr')) for x in getattr(obj, ref_name, [])])
- for ref in ref_orig - ref_new:
- self.ref_update('global-system-config', obj.uuid, ref_name, ref[0], list(ref[1]), 'DELETE')
- for ref in ref_new - ref_orig:
- self.ref_update('global-system-config', obj.uuid, ref_name, ref[0], list(ref[1]), 'ADD', ref[2])
- obj.clear_pending_updates()
-
- return content
- #end global_system_config_update
-
- def global_system_configs_list(self, parent_id = None, parent_fq_name = None, back_ref_id = None, obj_uuids = None, fields = None, detail = False, count = False, filters = None):
- """List all global-system-configs.
-
- :param parent_id: UUID of parent as optional search filter
- :param parent_fq_name: full qualified name of parent as optional search filter
- :returns: list of :class:`.GlobalSystemConfig` objects
-
- """
- return self.resource_list('global-system-config', parent_id = parent_id, parent_fq_name = parent_fq_name, back_ref_id = back_ref_id, obj_uuids=obj_uuids, fields=fields, detail=detail, count=count, filters=filters)
- #end global_system_configs_list
-
- def global_system_config_delete(self, fq_name = None, id = None, ifmap_id = None):
- """Delete global-system-config from the system.
-
- :param fq_name: Fully qualified name of global-system-config
- :param id: UUID of global-system-config
- :param ifmap_id: IFMAP id of global-system-config
-
- """
- (args_ok, result) = self._read_args_to_id(obj_type = 'global-system-config', fq_name = fq_name, id = id, ifmap_id = ifmap_id)
- if not args_ok:
- return result
-
- id = result
- uri = vnc_api.gen.resource_client.GlobalSystemConfig.resource_uri_base['global-system-config'] + '/' + id
-
- content = self._request_server(rest.OP_DELETE, uri)
- #end global_system_config_delete
-
- def get_default_global_system_config_id(self):
- """Return UUID of default global-system-config."""
- return self.fq_name_to_id('global-system-config', vnc_api.gen.resource_client.GlobalSystemConfig().get_fq_name())
- #end get_default_global_system_config_delete
-
- def service_appliance_create(self, obj):
- """Create new service-appliance.
-
- :param obj: :class:`.ServiceAppliance` object
-
- """
- obj._pending_field_updates |= obj._pending_ref_updates
- obj._pending_ref_updates = set([])
- # Ignore fields with None value in json representation
- json_param = json.dumps(obj, default = self._obj_serializer)
- json_body = '{"service-appliance":' + json_param + '}'
- content = self._request_server(rest.OP_POST,
- vnc_api.gen.resource_client.ServiceAppliance.create_uri,
- data = json_body)
-
- service_appliance_dict = json.loads(content)['service-appliance']
- obj.uuid = service_appliance_dict['uuid']
- if 'parent_uuid' in service_appliance_dict:
- obj.parent_uuid = service_appliance_dict['parent_uuid']
-
- obj.set_server_conn(self)
-
- return obj.uuid
- #end service_appliance_create
-
- def service_appliance_read(self, fq_name = None, fq_name_str = None, id = None, ifmap_id = None, fields = None):
- """Return service-appliance information.
-
- :param fq_name: Fully qualified name of service-appliance
- :param fq_name_str: Fully qualified name string of service-appliance
- :param id: UUID of service-appliance
- :param ifmap_id: IFMAP id of service-appliance
- :returns: :class:`.ServiceAppliance` object
-
- """
- (args_ok, result) = self._read_args_to_id('service-appliance', fq_name, fq_name_str, id, ifmap_id)
- if not args_ok:
- return result
-
- id = result
- uri = vnc_api.gen.resource_client.ServiceAppliance.resource_uri_base['service-appliance'] + '/' + id
-
- if fields:
- comma_sep_fields = ','.join(f for f in fields)
- query_params = {'fields': comma_sep_fields}
- else:
- query_params = {'exclude_back_refs':True,
- 'exclude_children':True,}
- content = self._request_server(rest.OP_GET, uri, query_params)
-
- obj_dict = json.loads(content)['service-appliance']
- service_appliance_obj = vnc_api.gen.resource_client.ServiceAppliance.from_dict(**obj_dict)
- service_appliance_obj.clear_pending_updates()
- service_appliance_obj.set_server_conn(self)
-
- return service_appliance_obj
- #end service_appliance_read
-
- def service_appliance_update(self, obj):
- """Update service-appliance.
-
- :param obj: :class:`.ServiceAppliance` object
-
- """
- # Read in uuid from api-server if not specified in obj
- if not obj.uuid:
- obj.uuid = self.fq_name_to_id('service-appliance', obj.get_fq_name())
-
- # Ignore fields with None value in json representation
- json_param = json.dumps(obj, default = self._obj_serializer)
- json_body = '{"service-appliance":' + json_param + '}'
-
- id = obj.uuid
- uri = vnc_api.gen.resource_client.ServiceAppliance.resource_uri_base['service-appliance'] + '/' + id
- content = self._request_server(rest.OP_PUT, uri, data = json_body)
- for ref_name in obj._pending_ref_updates:
- ref_orig = set([(x.get('uuid'), tuple(x.get('to', [])), x.get('attr')) for x in getattr(obj, '_original_' + ref_name, [])])
- ref_new = set([(x.get('uuid'), tuple(x.get('to', [])), x.get('attr')) for x in getattr(obj, ref_name, [])])
- for ref in ref_orig - ref_new:
- self.ref_update('service-appliance', obj.uuid, ref_name, ref[0], list(ref[1]), 'DELETE')
- for ref in ref_new - ref_orig:
- self.ref_update('service-appliance', obj.uuid, ref_name, ref[0], list(ref[1]), 'ADD', ref[2])
- obj.clear_pending_updates()
-
- return content
- #end service_appliance_update
-
- def service_appliances_list(self, parent_id = None, parent_fq_name = None, obj_uuids = None, fields = None, detail = False, count = False, filters = None):
- """List all service-appliances.
-
- :param parent_id: UUID of parent as optional search filter
- :param parent_fq_name: full qualified name of parent as optional search filter
- :returns: list of :class:`.ServiceAppliance` objects
-
- """
- return self.resource_list('service-appliance', parent_id = parent_id, parent_fq_name = parent_fq_name, obj_uuids=obj_uuids, fields=fields, detail=detail, count=count, filters=filters)
- #end service_appliances_list
-
- def service_appliance_delete(self, fq_name = None, id = None, ifmap_id = None):
- """Delete service-appliance from the system.
-
- :param fq_name: Fully qualified name of service-appliance
- :param id: UUID of service-appliance
- :param ifmap_id: IFMAP id of service-appliance
-
- """
- (args_ok, result) = self._read_args_to_id(obj_type = 'service-appliance', fq_name = fq_name, id = id, ifmap_id = ifmap_id)
- if not args_ok:
- return result
-
- id = result
- uri = vnc_api.gen.resource_client.ServiceAppliance.resource_uri_base['service-appliance'] + '/' + id
-
- content = self._request_server(rest.OP_DELETE, uri)
- #end service_appliance_delete
-
- def get_default_service_appliance_id(self):
- """Return UUID of default service-appliance."""
- return self.fq_name_to_id('service-appliance', vnc_api.gen.resource_client.ServiceAppliance().get_fq_name())
- #end get_default_service_appliance_delete
-
- def service_instance_create(self, obj):
- """Create new service-instance.
-
- :param obj: :class:`.ServiceInstance` object
-
- """
- obj._pending_field_updates |= obj._pending_ref_updates
- obj._pending_ref_updates = set([])
- # Ignore fields with None value in json representation
- json_param = json.dumps(obj, default = self._obj_serializer)
- json_body = '{"service-instance":' + json_param + '}'
- content = self._request_server(rest.OP_POST,
- vnc_api.gen.resource_client.ServiceInstance.create_uri,
- data = json_body)
-
- service_instance_dict = json.loads(content)['service-instance']
- obj.uuid = service_instance_dict['uuid']
- if 'parent_uuid' in service_instance_dict:
- obj.parent_uuid = service_instance_dict['parent_uuid']
-
- obj.set_server_conn(self)
-
- return obj.uuid
- #end service_instance_create
-
- def service_instance_read(self, fq_name = None, fq_name_str = None, id = None, ifmap_id = None, fields = None):
- """Return service-instance information.
-
- :param fq_name: Fully qualified name of service-instance
- :param fq_name_str: Fully qualified name string of service-instance
- :param id: UUID of service-instance
- :param ifmap_id: IFMAP id of service-instance
- :returns: :class:`.ServiceInstance` object
-
- """
- (args_ok, result) = self._read_args_to_id('service-instance', fq_name, fq_name_str, id, ifmap_id)
- if not args_ok:
- return result
-
- id = result
- uri = vnc_api.gen.resource_client.ServiceInstance.resource_uri_base['service-instance'] + '/' + id
-
- if fields:
- comma_sep_fields = ','.join(f for f in fields)
- query_params = {'fields': comma_sep_fields}
- else:
- query_params = {'exclude_back_refs':True,
- 'exclude_children':True,}
- content = self._request_server(rest.OP_GET, uri, query_params)
-
- obj_dict = json.loads(content)['service-instance']
- service_instance_obj = vnc_api.gen.resource_client.ServiceInstance.from_dict(**obj_dict)
- service_instance_obj.clear_pending_updates()
- service_instance_obj.set_server_conn(self)
-
- return service_instance_obj
- #end service_instance_read
-
- def service_instance_update(self, obj):
- """Update service-instance.
-
- :param obj: :class:`.ServiceInstance` object
-
- """
- # Read in uuid from api-server if not specified in obj
- if not obj.uuid:
- obj.uuid = self.fq_name_to_id('service-instance', obj.get_fq_name())
-
- # Ignore fields with None value in json representation
- json_param = json.dumps(obj, default = self._obj_serializer)
- json_body = '{"service-instance":' + json_param + '}'
-
- id = obj.uuid
- uri = vnc_api.gen.resource_client.ServiceInstance.resource_uri_base['service-instance'] + '/' + id
- content = self._request_server(rest.OP_PUT, uri, data = json_body)
- for ref_name in obj._pending_ref_updates:
- ref_orig = set([(x.get('uuid'), tuple(x.get('to', [])), x.get('attr')) for x in getattr(obj, '_original_' + ref_name, [])])
- ref_new = set([(x.get('uuid'), tuple(x.get('to', [])), x.get('attr')) for x in getattr(obj, ref_name, [])])
- for ref in ref_orig - ref_new:
- self.ref_update('service-instance', obj.uuid, ref_name, ref[0], list(ref[1]), 'DELETE')
- for ref in ref_new - ref_orig:
- self.ref_update('service-instance', obj.uuid, ref_name, ref[0], list(ref[1]), 'ADD', ref[2])
- obj.clear_pending_updates()
-
- return content
- #end service_instance_update
-
- def service_instances_list(self, parent_id = None, parent_fq_name = None, back_ref_id = None, obj_uuids = None, fields = None, detail = False, count = False, filters = None):
- """List all service-instances.
-
- :param parent_id: UUID of parent as optional search filter
- :param parent_fq_name: full qualified name of parent as optional search filter
- :returns: list of :class:`.ServiceInstance` objects
-
- """
- return self.resource_list('service-instance', parent_id = parent_id, parent_fq_name = parent_fq_name, back_ref_id = back_ref_id, obj_uuids=obj_uuids, fields=fields, detail=detail, count=count, filters=filters)
- #end service_instances_list
-
- def service_instance_delete(self, fq_name = None, id = None, ifmap_id = None):
- """Delete service-instance from the system.
-
- :param fq_name: Fully qualified name of service-instance
- :param id: UUID of service-instance
- :param ifmap_id: IFMAP id of service-instance
-
- """
- (args_ok, result) = self._read_args_to_id(obj_type = 'service-instance', fq_name = fq_name, id = id, ifmap_id = ifmap_id)
- if not args_ok:
- return result
-
- id = result
- uri = vnc_api.gen.resource_client.ServiceInstance.resource_uri_base['service-instance'] + '/' + id
-
- content = self._request_server(rest.OP_DELETE, uri)
- #end service_instance_delete
-
- def get_default_service_instance_id(self):
- """Return UUID of default service-instance."""
- return self.fq_name_to_id('service-instance', vnc_api.gen.resource_client.ServiceInstance().get_fq_name())
- #end get_default_service_instance_delete
-
- def namespace_create(self, obj):
- """Create new namespace.
-
- :param obj: :class:`.Namespace` object
-
- """
- obj._pending_field_updates |= obj._pending_ref_updates
- obj._pending_ref_updates = set([])
- # Ignore fields with None value in json representation
- json_param = json.dumps(obj, default = self._obj_serializer)
- json_body = '{"namespace":' + json_param + '}'
- content = self._request_server(rest.OP_POST,
- vnc_api.gen.resource_client.Namespace.create_uri,
- data = json_body)
-
- namespace_dict = json.loads(content)['namespace']
- obj.uuid = namespace_dict['uuid']
- if 'parent_uuid' in namespace_dict:
- obj.parent_uuid = namespace_dict['parent_uuid']
-
- obj.set_server_conn(self)
-
- return obj.uuid
- #end namespace_create
-
- def namespace_read(self, fq_name = None, fq_name_str = None, id = None, ifmap_id = None, fields = None):
- """Return namespace information.
-
- :param fq_name: Fully qualified name of namespace
- :param fq_name_str: Fully qualified name string of namespace
- :param id: UUID of namespace
- :param ifmap_id: IFMAP id of namespace
- :returns: :class:`.Namespace` object
-
- """
- (args_ok, result) = self._read_args_to_id('namespace', fq_name, fq_name_str, id, ifmap_id)
- if not args_ok:
- return result
-
- id = result
- uri = vnc_api.gen.resource_client.Namespace.resource_uri_base['namespace'] + '/' + id
-
- if fields:
- comma_sep_fields = ','.join(f for f in fields)
- query_params = {'fields': comma_sep_fields}
- else:
- query_params = {'exclude_back_refs':True,
- 'exclude_children':True,}
- content = self._request_server(rest.OP_GET, uri, query_params)
-
- obj_dict = json.loads(content)['namespace']
- namespace_obj = vnc_api.gen.resource_client.Namespace.from_dict(**obj_dict)
- namespace_obj.clear_pending_updates()
- namespace_obj.set_server_conn(self)
-
- return namespace_obj
- #end namespace_read
-
- def namespace_update(self, obj):
- """Update namespace.
-
- :param obj: :class:`.Namespace` object
-
- """
- # Read in uuid from api-server if not specified in obj
- if not obj.uuid:
- obj.uuid = self.fq_name_to_id('namespace', obj.get_fq_name())
-
- # Ignore fields with None value in json representation
- json_param = json.dumps(obj, default = self._obj_serializer)
- json_body = '{"namespace":' + json_param + '}'
-
- id = obj.uuid
- uri = vnc_api.gen.resource_client.Namespace.resource_uri_base['namespace'] + '/' + id
- content = self._request_server(rest.OP_PUT, uri, data = json_body)
- for ref_name in obj._pending_ref_updates:
- ref_orig = set([(x.get('uuid'), tuple(x.get('to', [])), x.get('attr')) for x in getattr(obj, '_original_' + ref_name, [])])
- ref_new = set([(x.get('uuid'), tuple(x.get('to', [])), x.get('attr')) for x in getattr(obj, ref_name, [])])
- for ref in ref_orig - ref_new:
- self.ref_update('namespace', obj.uuid, ref_name, ref[0], list(ref[1]), 'DELETE')
- for ref in ref_new - ref_orig:
- self.ref_update('namespace', obj.uuid, ref_name, ref[0], list(ref[1]), 'ADD', ref[2])
- obj.clear_pending_updates()
-
- return content
- #end namespace_update
-
- def namespaces_list(self, parent_id = None, parent_fq_name = None, obj_uuids = None, fields = None, detail = False, count = False, filters = None):
- """List all namespaces.
-
- :param parent_id: UUID of parent as optional search filter
- :param parent_fq_name: full qualified name of parent as optional search filter
- :returns: list of :class:`.Namespace` objects
-
- """
- return self.resource_list('namespace', parent_id = parent_id, parent_fq_name = parent_fq_name, obj_uuids=obj_uuids, fields=fields, detail=detail, count=count, filters=filters)
- #end namespaces_list
-
- def namespace_delete(self, fq_name = None, id = None, ifmap_id = None):
- """Delete namespace from the system.
-
- :param fq_name: Fully qualified name of namespace
- :param id: UUID of namespace
- :param ifmap_id: IFMAP id of namespace
-
- """
- (args_ok, result) = self._read_args_to_id(obj_type = 'namespace', fq_name = fq_name, id = id, ifmap_id = ifmap_id)
- if not args_ok:
- return result
-
- id = result
- uri = vnc_api.gen.resource_client.Namespace.resource_uri_base['namespace'] + '/' + id
-
- content = self._request_server(rest.OP_DELETE, uri)
- #end namespace_delete
-
- def get_default_namespace_id(self):
- """Return UUID of default namespace."""
- return self.fq_name_to_id('namespace', vnc_api.gen.resource_client.Namespace().get_fq_name())
- #end get_default_namespace_delete
-
- def logical_interface_create(self, obj):
- """Create new logical-interface.
-
- :param obj: :class:`.LogicalInterface` object
-
- """
- obj._pending_field_updates |= obj._pending_ref_updates
- obj._pending_ref_updates = set([])
- # Ignore fields with None value in json representation
- json_param = json.dumps(obj, default = self._obj_serializer)
- json_body = '{"logical-interface":' + json_param + '}'
- content = self._request_server(rest.OP_POST,
- vnc_api.gen.resource_client.LogicalInterface.create_uri,
- data = json_body)
-
- logical_interface_dict = json.loads(content)['logical-interface']
- obj.uuid = logical_interface_dict['uuid']
- if 'parent_uuid' in logical_interface_dict:
- obj.parent_uuid = logical_interface_dict['parent_uuid']
-
- obj.set_server_conn(self)
-
- return obj.uuid
- #end logical_interface_create
-
- def logical_interface_read(self, fq_name = None, fq_name_str = None, id = None, ifmap_id = None, fields = None):
- """Return logical-interface information.
-
- :param fq_name: Fully qualified name of logical-interface
- :param fq_name_str: Fully qualified name string of logical-interface
- :param id: UUID of logical-interface
- :param ifmap_id: IFMAP id of logical-interface
- :returns: :class:`.LogicalInterface` object
-
- """
- (args_ok, result) = self._read_args_to_id('logical-interface', fq_name, fq_name_str, id, ifmap_id)
- if not args_ok:
- return result
-
- id = result
- uri = vnc_api.gen.resource_client.LogicalInterface.resource_uri_base['logical-interface'] + '/' + id
-
- if fields:
- comma_sep_fields = ','.join(f for f in fields)
- query_params = {'fields': comma_sep_fields}
- else:
- query_params = {'exclude_back_refs':True,
- 'exclude_children':True,}
- content = self._request_server(rest.OP_GET, uri, query_params)
-
- obj_dict = json.loads(content)['logical-interface']
- logical_interface_obj = vnc_api.gen.resource_client.LogicalInterface.from_dict(**obj_dict)
- logical_interface_obj.clear_pending_updates()
- logical_interface_obj.set_server_conn(self)
-
- return logical_interface_obj
- #end logical_interface_read
-
- def logical_interface_update(self, obj):
- """Update logical-interface.
-
- :param obj: :class:`.LogicalInterface` object
-
- """
- # Read in uuid from api-server if not specified in obj
- if not obj.uuid:
- obj.uuid = self.fq_name_to_id('logical-interface', obj.get_fq_name())
-
- # Ignore fields with None value in json representation
- json_param = json.dumps(obj, default = self._obj_serializer)
- json_body = '{"logical-interface":' + json_param + '}'
-
- id = obj.uuid
- uri = vnc_api.gen.resource_client.LogicalInterface.resource_uri_base['logical-interface'] + '/' + id
- content = self._request_server(rest.OP_PUT, uri, data = json_body)
- for ref_name in obj._pending_ref_updates:
- ref_orig = set([(x.get('uuid'), tuple(x.get('to', [])), x.get('attr')) for x in getattr(obj, '_original_' + ref_name, [])])
- ref_new = set([(x.get('uuid'), tuple(x.get('to', [])), x.get('attr')) for x in getattr(obj, ref_name, [])])
- for ref in ref_orig - ref_new:
- self.ref_update('logical-interface', obj.uuid, ref_name, ref[0], list(ref[1]), 'DELETE')
- for ref in ref_new - ref_orig:
- self.ref_update('logical-interface', obj.uuid, ref_name, ref[0], list(ref[1]), 'ADD', ref[2])
- obj.clear_pending_updates()
-
- return content
- #end logical_interface_update
-
- def logical_interfaces_list(self, parent_id = None, parent_fq_name = None, back_ref_id = None, obj_uuids = None, fields = None, detail = False, count = False, filters = None):
- """List all logical-interfaces.
-
- :param parent_id: UUID of parent as optional search filter
- :param parent_fq_name: full qualified name of parent as optional search filter
- :returns: list of :class:`.LogicalInterface` objects
-
- """
- return self.resource_list('logical-interface', parent_id = parent_id, parent_fq_name = parent_fq_name, back_ref_id = back_ref_id, obj_uuids=obj_uuids, fields=fields, detail=detail, count=count, filters=filters)
- #end logical_interfaces_list
-
- def logical_interface_delete(self, fq_name = None, id = None, ifmap_id = None):
- """Delete logical-interface from the system.
-
- :param fq_name: Fully qualified name of logical-interface
- :param id: UUID of logical-interface
- :param ifmap_id: IFMAP id of logical-interface
-
- """
- (args_ok, result) = self._read_args_to_id(obj_type = 'logical-interface', fq_name = fq_name, id = id, ifmap_id = ifmap_id)
- if not args_ok:
- return result
-
- id = result
- uri = vnc_api.gen.resource_client.LogicalInterface.resource_uri_base['logical-interface'] + '/' + id
-
- content = self._request_server(rest.OP_DELETE, uri)
- #end logical_interface_delete
-
- def get_default_logical_interface_id(self):
- """Return UUID of default logical-interface."""
- return self.fq_name_to_id('logical-interface', vnc_api.gen.resource_client.LogicalInterface().get_fq_name())
- #end get_default_logical_interface_delete
-
- def route_table_create(self, obj):
- """Create new route-table.
-
- :param obj: :class:`.RouteTable` object
-
- """
- obj._pending_field_updates |= obj._pending_ref_updates
- obj._pending_ref_updates = set([])
- # Ignore fields with None value in json representation
- json_param = json.dumps(obj, default = self._obj_serializer)
- json_body = '{"route-table":' + json_param + '}'
- content = self._request_server(rest.OP_POST,
- vnc_api.gen.resource_client.RouteTable.create_uri,
- data = json_body)
-
- route_table_dict = json.loads(content)['route-table']
- obj.uuid = route_table_dict['uuid']
- if 'parent_uuid' in route_table_dict:
- obj.parent_uuid = route_table_dict['parent_uuid']
-
- obj.set_server_conn(self)
-
- return obj.uuid
- #end route_table_create
-
- def route_table_read(self, fq_name = None, fq_name_str = None, id = None, ifmap_id = None, fields = None):
- """Return route-table information.
-
- :param fq_name: Fully qualified name of route-table
- :param fq_name_str: Fully qualified name string of route-table
- :param id: UUID of route-table
- :param ifmap_id: IFMAP id of route-table
- :returns: :class:`.RouteTable` object
-
- """
- (args_ok, result) = self._read_args_to_id('route-table', fq_name, fq_name_str, id, ifmap_id)
- if not args_ok:
- return result
-
- id = result
- uri = vnc_api.gen.resource_client.RouteTable.resource_uri_base['route-table'] + '/' + id
-
- if fields:
- comma_sep_fields = ','.join(f for f in fields)
- query_params = {'fields': comma_sep_fields}
- else:
- query_params = {'exclude_back_refs':True,
- 'exclude_children':True,}
- content = self._request_server(rest.OP_GET, uri, query_params)
-
- obj_dict = json.loads(content)['route-table']
- route_table_obj = vnc_api.gen.resource_client.RouteTable.from_dict(**obj_dict)
- route_table_obj.clear_pending_updates()
- route_table_obj.set_server_conn(self)
-
- return route_table_obj
- #end route_table_read
-
- def route_table_update(self, obj):
- """Update route-table.
-
- :param obj: :class:`.RouteTable` object
-
- """
- # Read in uuid from api-server if not specified in obj
- if not obj.uuid:
- obj.uuid = self.fq_name_to_id('route-table', obj.get_fq_name())
-
- # Ignore fields with None value in json representation
- json_param = json.dumps(obj, default = self._obj_serializer)
- json_body = '{"route-table":' + json_param + '}'
-
- id = obj.uuid
- uri = vnc_api.gen.resource_client.RouteTable.resource_uri_base['route-table'] + '/' + id
- content = self._request_server(rest.OP_PUT, uri, data = json_body)
- for ref_name in obj._pending_ref_updates:
- ref_orig = set([(x.get('uuid'), tuple(x.get('to', [])), x.get('attr')) for x in getattr(obj, '_original_' + ref_name, [])])
- ref_new = set([(x.get('uuid'), tuple(x.get('to', [])), x.get('attr')) for x in getattr(obj, ref_name, [])])
- for ref in ref_orig - ref_new:
- self.ref_update('route-table', obj.uuid, ref_name, ref[0], list(ref[1]), 'DELETE')
- for ref in ref_new - ref_orig:
- self.ref_update('route-table', obj.uuid, ref_name, ref[0], list(ref[1]), 'ADD', ref[2])
- obj.clear_pending_updates()
-
- return content
- #end route_table_update
-
- def route_tables_list(self, parent_id = None, parent_fq_name = None, obj_uuids = None, fields = None, detail = False, count = False, filters = None):
- """List all route-tables.
-
- :param parent_id: UUID of parent as optional search filter
- :param parent_fq_name: full qualified name of parent as optional search filter
- :returns: list of :class:`.RouteTable` objects
-
- """
- return self.resource_list('route-table', parent_id = parent_id, parent_fq_name = parent_fq_name, obj_uuids=obj_uuids, fields=fields, detail=detail, count=count, filters=filters)
- #end route_tables_list
-
- def route_table_delete(self, fq_name = None, id = None, ifmap_id = None):
- """Delete route-table from the system.
-
- :param fq_name: Fully qualified name of route-table
- :param id: UUID of route-table
- :param ifmap_id: IFMAP id of route-table
-
- """
- (args_ok, result) = self._read_args_to_id(obj_type = 'route-table', fq_name = fq_name, id = id, ifmap_id = ifmap_id)
- if not args_ok:
- return result
-
- id = result
- uri = vnc_api.gen.resource_client.RouteTable.resource_uri_base['route-table'] + '/' + id
-
- content = self._request_server(rest.OP_DELETE, uri)
- #end route_table_delete
-
- def get_default_route_table_id(self):
- """Return UUID of default route-table."""
- return self.fq_name_to_id('route-table', vnc_api.gen.resource_client.RouteTable().get_fq_name())
- #end get_default_route_table_delete
-
- def physical_interface_create(self, obj):
- """Create new physical-interface.
-
- :param obj: :class:`.PhysicalInterface` object
-
- """
- obj._pending_field_updates |= obj._pending_ref_updates
- obj._pending_ref_updates = set([])
- # Ignore fields with None value in json representation
- json_param = json.dumps(obj, default = self._obj_serializer)
- json_body = '{"physical-interface":' + json_param + '}'
- content = self._request_server(rest.OP_POST,
- vnc_api.gen.resource_client.PhysicalInterface.create_uri,
- data = json_body)
-
- physical_interface_dict = json.loads(content)['physical-interface']
- obj.uuid = physical_interface_dict['uuid']
- if 'parent_uuid' in physical_interface_dict:
- obj.parent_uuid = physical_interface_dict['parent_uuid']
-
- obj.set_server_conn(self)
-
- return obj.uuid
- #end physical_interface_create
-
- def physical_interface_read(self, fq_name = None, fq_name_str = None, id = None, ifmap_id = None, fields = None):
- """Return physical-interface information.
-
- :param fq_name: Fully qualified name of physical-interface
- :param fq_name_str: Fully qualified name string of physical-interface
- :param id: UUID of physical-interface
- :param ifmap_id: IFMAP id of physical-interface
- :returns: :class:`.PhysicalInterface` object
-
- """
- (args_ok, result) = self._read_args_to_id('physical-interface', fq_name, fq_name_str, id, ifmap_id)
- if not args_ok:
- return result
-
- id = result
- uri = vnc_api.gen.resource_client.PhysicalInterface.resource_uri_base['physical-interface'] + '/' + id
-
- if fields:
- comma_sep_fields = ','.join(f for f in fields)
- query_params = {'fields': comma_sep_fields}
- else:
- query_params = {'exclude_back_refs':True,
- 'exclude_children':True,}
- content = self._request_server(rest.OP_GET, uri, query_params)
-
- obj_dict = json.loads(content)['physical-interface']
- physical_interface_obj = vnc_api.gen.resource_client.PhysicalInterface.from_dict(**obj_dict)
- physical_interface_obj.clear_pending_updates()
- physical_interface_obj.set_server_conn(self)
-
- return physical_interface_obj
- #end physical_interface_read
-
- def physical_interface_update(self, obj):
- """Update physical-interface.
-
- :param obj: :class:`.PhysicalInterface` object
-
- """
- # Read in uuid from api-server if not specified in obj
- if not obj.uuid:
- obj.uuid = self.fq_name_to_id('physical-interface', obj.get_fq_name())
-
- # Ignore fields with None value in json representation
- json_param = json.dumps(obj, default = self._obj_serializer)
- json_body = '{"physical-interface":' + json_param + '}'
-
- id = obj.uuid
- uri = vnc_api.gen.resource_client.PhysicalInterface.resource_uri_base['physical-interface'] + '/' + id
- content = self._request_server(rest.OP_PUT, uri, data = json_body)
- for ref_name in obj._pending_ref_updates:
- ref_orig = set([(x.get('uuid'), tuple(x.get('to', [])), x.get('attr')) for x in getattr(obj, '_original_' + ref_name, [])])
- ref_new = set([(x.get('uuid'), tuple(x.get('to', [])), x.get('attr')) for x in getattr(obj, ref_name, [])])
- for ref in ref_orig - ref_new:
- self.ref_update('physical-interface', obj.uuid, ref_name, ref[0], list(ref[1]), 'DELETE')
- for ref in ref_new - ref_orig:
- self.ref_update('physical-interface', obj.uuid, ref_name, ref[0], list(ref[1]), 'ADD', ref[2])
- obj.clear_pending_updates()
-
- return content
- #end physical_interface_update
-
- def physical_interfaces_list(self, parent_id = None, parent_fq_name = None, obj_uuids = None, fields = None, detail = False, count = False, filters = None):
- """List all physical-interfaces.
-
- :param parent_id: UUID of parent as optional search filter
- :param parent_fq_name: full qualified name of parent as optional search filter
- :returns: list of :class:`.PhysicalInterface` objects
-
- """
- return self.resource_list('physical-interface', parent_id = parent_id, parent_fq_name = parent_fq_name, obj_uuids=obj_uuids, fields=fields, detail=detail, count=count, filters=filters)
- #end physical_interfaces_list
-
- def physical_interface_delete(self, fq_name = None, id = None, ifmap_id = None):
- """Delete physical-interface from the system.
-
- :param fq_name: Fully qualified name of physical-interface
- :param id: UUID of physical-interface
- :param ifmap_id: IFMAP id of physical-interface
-
- """
- (args_ok, result) = self._read_args_to_id(obj_type = 'physical-interface', fq_name = fq_name, id = id, ifmap_id = ifmap_id)
- if not args_ok:
- return result
-
- id = result
- uri = vnc_api.gen.resource_client.PhysicalInterface.resource_uri_base['physical-interface'] + '/' + id
-
- content = self._request_server(rest.OP_DELETE, uri)
- #end physical_interface_delete
-
- def get_default_physical_interface_id(self):
- """Return UUID of default physical-interface."""
- return self.fq_name_to_id('physical-interface', vnc_api.gen.resource_client.PhysicalInterface().get_fq_name())
- #end get_default_physical_interface_delete
-
- def access_control_list_create(self, obj):
- """Create new access-control-list.
-
- :param obj: :class:`.AccessControlList` object
-
- """
- obj._pending_field_updates |= obj._pending_ref_updates
- obj._pending_ref_updates = set([])
- # Ignore fields with None value in json representation
- json_param = json.dumps(obj, default = self._obj_serializer)
- json_body = '{"access-control-list":' + json_param + '}'
- content = self._request_server(rest.OP_POST,
- vnc_api.gen.resource_client.AccessControlList.create_uri,
- data = json_body)
-
- access_control_list_dict = json.loads(content)['access-control-list']
- obj.uuid = access_control_list_dict['uuid']
- if 'parent_uuid' in access_control_list_dict:
- obj.parent_uuid = access_control_list_dict['parent_uuid']
-
- obj.set_server_conn(self)
-
- return obj.uuid
- #end access_control_list_create
-
- def access_control_list_read(self, fq_name = None, fq_name_str = None, id = None, ifmap_id = None, fields = None):
- """Return access-control-list information.
-
- :param fq_name: Fully qualified name of access-control-list
- :param fq_name_str: Fully qualified name string of access-control-list
- :param id: UUID of access-control-list
- :param ifmap_id: IFMAP id of access-control-list
- :returns: :class:`.AccessControlList` object
-
- """
- (args_ok, result) = self._read_args_to_id('access-control-list', fq_name, fq_name_str, id, ifmap_id)
- if not args_ok:
- return result
-
- id = result
- uri = vnc_api.gen.resource_client.AccessControlList.resource_uri_base['access-control-list'] + '/' + id
-
- if fields:
- comma_sep_fields = ','.join(f for f in fields)
- query_params = {'fields': comma_sep_fields}
- else:
- query_params = {'exclude_back_refs':True,
- 'exclude_children':True,}
- content = self._request_server(rest.OP_GET, uri, query_params)
-
- obj_dict = json.loads(content)['access-control-list']
- access_control_list_obj = vnc_api.gen.resource_client.AccessControlList.from_dict(**obj_dict)
- access_control_list_obj.clear_pending_updates()
- access_control_list_obj.set_server_conn(self)
-
- return access_control_list_obj
- #end access_control_list_read
-
- def access_control_list_update(self, obj):
- """Update access-control-list.
-
- :param obj: :class:`.AccessControlList` object
-
- """
- # Read in uuid from api-server if not specified in obj
- if not obj.uuid:
- obj.uuid = self.fq_name_to_id('access-control-list', obj.get_fq_name())
-
- # Ignore fields with None value in json representation
- json_param = json.dumps(obj, default = self._obj_serializer)
- json_body = '{"access-control-list":' + json_param + '}'
-
- id = obj.uuid
- uri = vnc_api.gen.resource_client.AccessControlList.resource_uri_base['access-control-list'] + '/' + id
- content = self._request_server(rest.OP_PUT, uri, data = json_body)
- for ref_name in obj._pending_ref_updates:
- ref_orig = set([(x.get('uuid'), tuple(x.get('to', [])), x.get('attr')) for x in getattr(obj, '_original_' + ref_name, [])])
- ref_new = set([(x.get('uuid'), tuple(x.get('to', [])), x.get('attr')) for x in getattr(obj, ref_name, [])])
- for ref in ref_orig - ref_new:
- self.ref_update('access-control-list', obj.uuid, ref_name, ref[0], list(ref[1]), 'DELETE')
- for ref in ref_new - ref_orig:
- self.ref_update('access-control-list', obj.uuid, ref_name, ref[0], list(ref[1]), 'ADD', ref[2])
- obj.clear_pending_updates()
-
- return content
- #end access_control_list_update
-
- def access_control_lists_list(self, parent_id = None, parent_fq_name = None, obj_uuids = None, fields = None, detail = False, count = False, filters = None):
- """List all access-control-lists.
-
- :param parent_id: UUID of parent as optional search filter
- :param parent_fq_name: full qualified name of parent as optional search filter
- :returns: list of :class:`.AccessControlList` objects
-
- """
- return self.resource_list('access-control-list', parent_id = parent_id, parent_fq_name = parent_fq_name, obj_uuids=obj_uuids, fields=fields, detail=detail, count=count, filters=filters)
- #end access_control_lists_list
-
- def access_control_list_delete(self, fq_name = None, id = None, ifmap_id = None):
- """Delete access-control-list from the system.
-
- :param fq_name: Fully qualified name of access-control-list
- :param id: UUID of access-control-list
- :param ifmap_id: IFMAP id of access-control-list
-
- """
- (args_ok, result) = self._read_args_to_id(obj_type = 'access-control-list', fq_name = fq_name, id = id, ifmap_id = ifmap_id)
- if not args_ok:
- return result
-
- id = result
- uri = vnc_api.gen.resource_client.AccessControlList.resource_uri_base['access-control-list'] + '/' + id
-
- content = self._request_server(rest.OP_DELETE, uri)
- #end access_control_list_delete
-
- def get_default_access_control_list_id(self):
- """Return UUID of default access-control-list."""
- return self.fq_name_to_id('access-control-list', vnc_api.gen.resource_client.AccessControlList().get_fq_name())
- #end get_default_access_control_list_delete
-
- def analytics_node_create(self, obj):
- """Create new analytics-node.
-
- :param obj: :class:`.AnalyticsNode` object
-
- """
- obj._pending_field_updates |= obj._pending_ref_updates
- obj._pending_ref_updates = set([])
- # Ignore fields with None value in json representation
- json_param = json.dumps(obj, default = self._obj_serializer)
- json_body = '{"analytics-node":' + json_param + '}'
- content = self._request_server(rest.OP_POST,
- vnc_api.gen.resource_client.AnalyticsNode.create_uri,
- data = json_body)
-
- analytics_node_dict = json.loads(content)['analytics-node']
- obj.uuid = analytics_node_dict['uuid']
- if 'parent_uuid' in analytics_node_dict:
- obj.parent_uuid = analytics_node_dict['parent_uuid']
-
- obj.set_server_conn(self)
-
- return obj.uuid
- #end analytics_node_create
-
- def analytics_node_read(self, fq_name = None, fq_name_str = None, id = None, ifmap_id = None, fields = None):
- """Return analytics-node information.
-
- :param fq_name: Fully qualified name of analytics-node
- :param fq_name_str: Fully qualified name string of analytics-node
- :param id: UUID of analytics-node
- :param ifmap_id: IFMAP id of analytics-node
- :returns: :class:`.AnalyticsNode` object
-
- """
- (args_ok, result) = self._read_args_to_id('analytics-node', fq_name, fq_name_str, id, ifmap_id)
- if not args_ok:
- return result
-
- id = result
- uri = vnc_api.gen.resource_client.AnalyticsNode.resource_uri_base['analytics-node'] + '/' + id
-
- if fields:
- comma_sep_fields = ','.join(f for f in fields)
- query_params = {'fields': comma_sep_fields}
- else:
- query_params = {'exclude_back_refs':True,
- 'exclude_children':True,}
- content = self._request_server(rest.OP_GET, uri, query_params)
-
- obj_dict = json.loads(content)['analytics-node']
- analytics_node_obj = vnc_api.gen.resource_client.AnalyticsNode.from_dict(**obj_dict)
- analytics_node_obj.clear_pending_updates()
- analytics_node_obj.set_server_conn(self)
-
- return analytics_node_obj
- #end analytics_node_read
-
- def analytics_node_update(self, obj):
- """Update analytics-node.
-
- :param obj: :class:`.AnalyticsNode` object
-
- """
- # Read in uuid from api-server if not specified in obj
- if not obj.uuid:
- obj.uuid = self.fq_name_to_id('analytics-node', obj.get_fq_name())
-
- # Ignore fields with None value in json representation
- json_param = json.dumps(obj, default = self._obj_serializer)
- json_body = '{"analytics-node":' + json_param + '}'
-
- id = obj.uuid
- uri = vnc_api.gen.resource_client.AnalyticsNode.resource_uri_base['analytics-node'] + '/' + id
- content = self._request_server(rest.OP_PUT, uri, data = json_body)
- for ref_name in obj._pending_ref_updates:
- ref_orig = set([(x.get('uuid'), tuple(x.get('to', [])), x.get('attr')) for x in getattr(obj, '_original_' + ref_name, [])])
- ref_new = set([(x.get('uuid'), tuple(x.get('to', [])), x.get('attr')) for x in getattr(obj, ref_name, [])])
- for ref in ref_orig - ref_new:
- self.ref_update('analytics-node', obj.uuid, ref_name, ref[0], list(ref[1]), 'DELETE')
- for ref in ref_new - ref_orig:
- self.ref_update('analytics-node', obj.uuid, ref_name, ref[0], list(ref[1]), 'ADD', ref[2])
- obj.clear_pending_updates()
-
- return content
- #end analytics_node_update
-
- def analytics_nodes_list(self, parent_id = None, parent_fq_name = None, obj_uuids = None, fields = None, detail = False, count = False, filters = None):
- """List all analytics-nodes.
-
- :param parent_id: UUID of parent as optional search filter
- :param parent_fq_name: full qualified name of parent as optional search filter
- :returns: list of :class:`.AnalyticsNode` objects
-
- """
- return self.resource_list('analytics-node', parent_id = parent_id, parent_fq_name = parent_fq_name, obj_uuids=obj_uuids, fields=fields, detail=detail, count=count, filters=filters)
- #end analytics_nodes_list
-
- def analytics_node_delete(self, fq_name = None, id = None, ifmap_id = None):
- """Delete analytics-node from the system.
-
- :param fq_name: Fully qualified name of analytics-node
- :param id: UUID of analytics-node
- :param ifmap_id: IFMAP id of analytics-node
-
- """
- (args_ok, result) = self._read_args_to_id(obj_type = 'analytics-node', fq_name = fq_name, id = id, ifmap_id = ifmap_id)
- if not args_ok:
- return result
-
- id = result
- uri = vnc_api.gen.resource_client.AnalyticsNode.resource_uri_base['analytics-node'] + '/' + id
-
- content = self._request_server(rest.OP_DELETE, uri)
- #end analytics_node_delete
-
- def get_default_analytics_node_id(self):
- """Return UUID of default analytics-node."""
- return self.fq_name_to_id('analytics-node', vnc_api.gen.resource_client.AnalyticsNode().get_fq_name())
- #end get_default_analytics_node_delete
-
- def virtual_DNS_create(self, obj):
- """Create new virtual-DNS.
-
- :param obj: :class:`.VirtualDns` object
-
- """
- obj._pending_field_updates |= obj._pending_ref_updates
- obj._pending_ref_updates = set([])
- # Ignore fields with None value in json representation
- json_param = json.dumps(obj, default = self._obj_serializer)
- json_body = '{"virtual-DNS":' + json_param + '}'
- content = self._request_server(rest.OP_POST,
- vnc_api.gen.resource_client.VirtualDns.create_uri,
- data = json_body)
-
- virtual_DNS_dict = json.loads(content)['virtual-DNS']
- obj.uuid = virtual_DNS_dict['uuid']
- if 'parent_uuid' in virtual_DNS_dict:
- obj.parent_uuid = virtual_DNS_dict['parent_uuid']
-
- obj.set_server_conn(self)
-
- return obj.uuid
- #end virtual_DNS_create
-
- def virtual_DNS_read(self, fq_name = None, fq_name_str = None, id = None, ifmap_id = None, fields = None):
- """Return virtual-DNS information.
-
- :param fq_name: Fully qualified name of virtual-DNS
- :param fq_name_str: Fully qualified name string of virtual-DNS
- :param id: UUID of virtual-DNS
- :param ifmap_id: IFMAP id of virtual-DNS
- :returns: :class:`.VirtualDns` object
-
- """
- (args_ok, result) = self._read_args_to_id('virtual-DNS', fq_name, fq_name_str, id, ifmap_id)
- if not args_ok:
- return result
-
- id = result
- uri = vnc_api.gen.resource_client.VirtualDns.resource_uri_base['virtual-DNS'] + '/' + id
-
- if fields:
- comma_sep_fields = ','.join(f for f in fields)
- query_params = {'fields': comma_sep_fields}
- else:
- query_params = {'exclude_back_refs':True,
- 'exclude_children':True,}
- content = self._request_server(rest.OP_GET, uri, query_params)
-
- obj_dict = json.loads(content)['virtual-DNS']
- virtual_DNS_obj = vnc_api.gen.resource_client.VirtualDns.from_dict(**obj_dict)
- virtual_DNS_obj.clear_pending_updates()
- virtual_DNS_obj.set_server_conn(self)
-
- return virtual_DNS_obj
- #end virtual_DNS_read
-
- def virtual_DNS_update(self, obj):
- """Update virtual-DNS.
-
- :param obj: :class:`.VirtualDns` object
-
- """
- # Read in uuid from api-server if not specified in obj
- if not obj.uuid:
- obj.uuid = self.fq_name_to_id('virtual-DNS', obj.get_fq_name())
-
- # Ignore fields with None value in json representation
- json_param = json.dumps(obj, default = self._obj_serializer)
- json_body = '{"virtual-DNS":' + json_param + '}'
-
- id = obj.uuid
- uri = vnc_api.gen.resource_client.VirtualDns.resource_uri_base['virtual-DNS'] + '/' + id
- content = self._request_server(rest.OP_PUT, uri, data = json_body)
- for ref_name in obj._pending_ref_updates:
- ref_orig = set([(x.get('uuid'), tuple(x.get('to', [])), x.get('attr')) for x in getattr(obj, '_original_' + ref_name, [])])
- ref_new = set([(x.get('uuid'), tuple(x.get('to', [])), x.get('attr')) for x in getattr(obj, ref_name, [])])
- for ref in ref_orig - ref_new:
- self.ref_update('virtual-DNS', obj.uuid, ref_name, ref[0], list(ref[1]), 'DELETE')
- for ref in ref_new - ref_orig:
- self.ref_update('virtual-DNS', obj.uuid, ref_name, ref[0], list(ref[1]), 'ADD', ref[2])
- obj.clear_pending_updates()
-
- return content
- #end virtual_DNS_update
-
- def virtual_DNSs_list(self, parent_id = None, parent_fq_name = None, obj_uuids = None, fields = None, detail = False, count = False, filters = None):
- """List all virtual-DNSs.
-
- :param parent_id: UUID of parent as optional search filter
- :param parent_fq_name: full qualified name of parent as optional search filter
- :returns: list of :class:`.VirtualDns` objects
-
- """
- return self.resource_list('virtual-DNS', parent_id = parent_id, parent_fq_name = parent_fq_name, obj_uuids=obj_uuids, fields=fields, detail=detail, count=count, filters=filters)
- #end virtual_DNSs_list
-
- def virtual_DNS_delete(self, fq_name = None, id = None, ifmap_id = None):
- """Delete virtual-DNS from the system.
-
- :param fq_name: Fully qualified name of virtual-DNS
- :param id: UUID of virtual-DNS
- :param ifmap_id: IFMAP id of virtual-DNS
-
- """
- (args_ok, result) = self._read_args_to_id(obj_type = 'virtual-DNS', fq_name = fq_name, id = id, ifmap_id = ifmap_id)
- if not args_ok:
- return result
-
- id = result
- uri = vnc_api.gen.resource_client.VirtualDns.resource_uri_base['virtual-DNS'] + '/' + id
-
- content = self._request_server(rest.OP_DELETE, uri)
- #end virtual_DNS_delete
-
- def get_default_virtual_DNS_id(self):
- """Return UUID of default virtual-DNS."""
- return self.fq_name_to_id('virtual-DNS', vnc_api.gen.resource_client.VirtualDns().get_fq_name())
- #end get_default_virtual_DNS_delete
-
- def customer_attachment_create(self, obj):
- """Create new customer-attachment.
-
- :param obj: :class:`.CustomerAttachment` object
-
- """
- obj._pending_field_updates |= obj._pending_ref_updates
- obj._pending_ref_updates = set([])
- # Ignore fields with None value in json representation
- json_param = json.dumps(obj, default = self._obj_serializer)
- json_body = '{"customer-attachment":' + json_param + '}'
- content = self._request_server(rest.OP_POST,
- vnc_api.gen.resource_client.CustomerAttachment.create_uri,
- data = json_body)
-
- customer_attachment_dict = json.loads(content)['customer-attachment']
- obj.uuid = customer_attachment_dict['uuid']
- if 'parent_uuid' in customer_attachment_dict:
- obj.parent_uuid = customer_attachment_dict['parent_uuid']
-
- obj.set_server_conn(self)
-
- return obj.uuid
- #end customer_attachment_create
-
- def customer_attachment_read(self, fq_name = None, fq_name_str = None, id = None, ifmap_id = None, fields = None):
- """Return customer-attachment information.
-
- :param fq_name: Fully qualified name of customer-attachment
- :param fq_name_str: Fully qualified name string of customer-attachment
- :param id: UUID of customer-attachment
- :param ifmap_id: IFMAP id of customer-attachment
- :returns: :class:`.CustomerAttachment` object
-
- """
- (args_ok, result) = self._read_args_to_id('customer-attachment', fq_name, fq_name_str, id, ifmap_id)
- if not args_ok:
- return result
-
- id = result
- uri = vnc_api.gen.resource_client.CustomerAttachment.resource_uri_base['customer-attachment'] + '/' + id
-
- if fields:
- comma_sep_fields = ','.join(f for f in fields)
- query_params = {'fields': comma_sep_fields}
- else:
- query_params = {'exclude_back_refs':True,
- 'exclude_children':True,}
- content = self._request_server(rest.OP_GET, uri, query_params)
-
- obj_dict = json.loads(content)['customer-attachment']
- customer_attachment_obj = vnc_api.gen.resource_client.CustomerAttachment.from_dict(**obj_dict)
- customer_attachment_obj.clear_pending_updates()
- customer_attachment_obj.set_server_conn(self)
-
- return customer_attachment_obj
- #end customer_attachment_read
-
- def customer_attachment_update(self, obj):
- """Update customer-attachment.
-
- :param obj: :class:`.CustomerAttachment` object
-
- """
- # Read in uuid from api-server if not specified in obj
- if not obj.uuid:
- obj.uuid = self.fq_name_to_id('customer-attachment', obj.get_fq_name())
-
- # Ignore fields with None value in json representation
- json_param = json.dumps(obj, default = self._obj_serializer)
- json_body = '{"customer-attachment":' + json_param + '}'
-
- id = obj.uuid
- uri = vnc_api.gen.resource_client.CustomerAttachment.resource_uri_base['customer-attachment'] + '/' + id
- content = self._request_server(rest.OP_PUT, uri, data = json_body)
- for ref_name in obj._pending_ref_updates:
- ref_orig = set([(x.get('uuid'), tuple(x.get('to', [])), x.get('attr')) for x in getattr(obj, '_original_' + ref_name, [])])
- ref_new = set([(x.get('uuid'), tuple(x.get('to', [])), x.get('attr')) for x in getattr(obj, ref_name, [])])
- for ref in ref_orig - ref_new:
- self.ref_update('customer-attachment', obj.uuid, ref_name, ref[0], list(ref[1]), 'DELETE')
- for ref in ref_new - ref_orig:
- self.ref_update('customer-attachment', obj.uuid, ref_name, ref[0], list(ref[1]), 'ADD', ref[2])
- obj.clear_pending_updates()
-
- return content
- #end customer_attachment_update
-
- def customer_attachments_list(self, back_ref_id = None, obj_uuids = None, fields = None, detail = False, count = False, filters = None):
- """List all customer-attachments."""
- return self.resource_list('customer-attachment', back_ref_id = back_ref_id, obj_uuids=obj_uuids, fields=fields, detail=detail, count=count, filters=filters)
- #end customer_attachments_list
-
- def customer_attachment_delete(self, fq_name = None, id = None, ifmap_id = None):
- """Delete customer-attachment from the system.
-
- :param fq_name: Fully qualified name of customer-attachment
- :param id: UUID of customer-attachment
- :param ifmap_id: IFMAP id of customer-attachment
-
- """
- (args_ok, result) = self._read_args_to_id(obj_type = 'customer-attachment', fq_name = fq_name, id = id, ifmap_id = ifmap_id)
- if not args_ok:
- return result
-
- id = result
- uri = vnc_api.gen.resource_client.CustomerAttachment.resource_uri_base['customer-attachment'] + '/' + id
-
- content = self._request_server(rest.OP_DELETE, uri)
- #end customer_attachment_delete
-
- def get_default_customer_attachment_id(self):
- """Return UUID of default customer-attachment."""
- return self.fq_name_to_id('customer-attachment', vnc_api.gen.resource_client.CustomerAttachment().get_fq_name())
- #end get_default_customer_attachment_delete
-
- def service_appliance_set_create(self, obj):
- """Create new service-appliance-set.
-
- :param obj: :class:`.ServiceApplianceSet` object
-
- """
- obj._pending_field_updates |= obj._pending_ref_updates
- obj._pending_ref_updates = set([])
- # Ignore fields with None value in json representation
- json_param = json.dumps(obj, default = self._obj_serializer)
- json_body = '{"service-appliance-set":' + json_param + '}'
- content = self._request_server(rest.OP_POST,
- vnc_api.gen.resource_client.ServiceApplianceSet.create_uri,
- data = json_body)
-
- service_appliance_set_dict = json.loads(content)['service-appliance-set']
- obj.uuid = service_appliance_set_dict['uuid']
- if 'parent_uuid' in service_appliance_set_dict:
- obj.parent_uuid = service_appliance_set_dict['parent_uuid']
-
- obj.set_server_conn(self)
-
- return obj.uuid
- #end service_appliance_set_create
-
- def service_appliance_set_read(self, fq_name = None, fq_name_str = None, id = None, ifmap_id = None, fields = None):
- """Return service-appliance-set information.
-
- :param fq_name: Fully qualified name of service-appliance-set
- :param fq_name_str: Fully qualified name string of service-appliance-set
- :param id: UUID of service-appliance-set
- :param ifmap_id: IFMAP id of service-appliance-set
- :returns: :class:`.ServiceApplianceSet` object
-
- """
- (args_ok, result) = self._read_args_to_id('service-appliance-set', fq_name, fq_name_str, id, ifmap_id)
- if not args_ok:
- return result
-
- id = result
- uri = vnc_api.gen.resource_client.ServiceApplianceSet.resource_uri_base['service-appliance-set'] + '/' + id
-
- if fields:
- comma_sep_fields = ','.join(f for f in fields)
- query_params = {'fields': comma_sep_fields}
- else:
- query_params = {'exclude_back_refs':True,
- 'exclude_children':True,}
- content = self._request_server(rest.OP_GET, uri, query_params)
-
- obj_dict = json.loads(content)['service-appliance-set']
- service_appliance_set_obj = vnc_api.gen.resource_client.ServiceApplianceSet.from_dict(**obj_dict)
- service_appliance_set_obj.clear_pending_updates()
- service_appliance_set_obj.set_server_conn(self)
-
- return service_appliance_set_obj
- #end service_appliance_set_read
-
- def service_appliance_set_update(self, obj):
- """Update service-appliance-set.
-
- :param obj: :class:`.ServiceApplianceSet` object
-
- """
- # Read in uuid from api-server if not specified in obj
- if not obj.uuid:
- obj.uuid = self.fq_name_to_id('service-appliance-set', obj.get_fq_name())
-
- # Ignore fields with None value in json representation
- json_param = json.dumps(obj, default = self._obj_serializer)
- json_body = '{"service-appliance-set":' + json_param + '}'
-
- id = obj.uuid
- uri = vnc_api.gen.resource_client.ServiceApplianceSet.resource_uri_base['service-appliance-set'] + '/' + id
- content = self._request_server(rest.OP_PUT, uri, data = json_body)
- for ref_name in obj._pending_ref_updates:
- ref_orig = set([(x.get('uuid'), tuple(x.get('to', [])), x.get('attr')) for x in getattr(obj, '_original_' + ref_name, [])])
- ref_new = set([(x.get('uuid'), tuple(x.get('to', [])), x.get('attr')) for x in getattr(obj, ref_name, [])])
- for ref in ref_orig - ref_new:
- self.ref_update('service-appliance-set', obj.uuid, ref_name, ref[0], list(ref[1]), 'DELETE')
- for ref in ref_new - ref_orig:
- self.ref_update('service-appliance-set', obj.uuid, ref_name, ref[0], list(ref[1]), 'ADD', ref[2])
- obj.clear_pending_updates()
-
- return content
- #end service_appliance_set_update
-
- def service_appliance_sets_list(self, parent_id = None, parent_fq_name = None, obj_uuids = None, fields = None, detail = False, count = False, filters = None):
- """List all service-appliance-sets.
-
- :param parent_id: UUID of parent as optional search filter
- :param parent_fq_name: full qualified name of parent as optional search filter
- :returns: list of :class:`.ServiceApplianceSet` objects
-
- """
- return self.resource_list('service-appliance-set', parent_id = parent_id, parent_fq_name = parent_fq_name, obj_uuids=obj_uuids, fields=fields, detail=detail, count=count, filters=filters)
- #end service_appliance_sets_list
-
- def service_appliance_set_delete(self, fq_name = None, id = None, ifmap_id = None):
- """Delete service-appliance-set from the system.
-
- :param fq_name: Fully qualified name of service-appliance-set
- :param id: UUID of service-appliance-set
- :param ifmap_id: IFMAP id of service-appliance-set
-
- """
- (args_ok, result) = self._read_args_to_id(obj_type = 'service-appliance-set', fq_name = fq_name, id = id, ifmap_id = ifmap_id)
- if not args_ok:
- return result
-
- id = result
- uri = vnc_api.gen.resource_client.ServiceApplianceSet.resource_uri_base['service-appliance-set'] + '/' + id
-
- content = self._request_server(rest.OP_DELETE, uri)
- #end service_appliance_set_delete
-
- def get_default_service_appliance_set_id(self):
- """Return UUID of default service-appliance-set."""
- return self.fq_name_to_id('service-appliance-set', vnc_api.gen.resource_client.ServiceApplianceSet().get_fq_name())
- #end get_default_service_appliance_set_delete
-
- def config_node_create(self, obj):
- """Create new config-node.
-
- :param obj: :class:`.ConfigNode` object
-
- """
- obj._pending_field_updates |= obj._pending_ref_updates
- obj._pending_ref_updates = set([])
- # Ignore fields with None value in json representation
- json_param = json.dumps(obj, default = self._obj_serializer)
- json_body = '{"config-node":' + json_param + '}'
- content = self._request_server(rest.OP_POST,
- vnc_api.gen.resource_client.ConfigNode.create_uri,
- data = json_body)
-
- config_node_dict = json.loads(content)['config-node']
- obj.uuid = config_node_dict['uuid']
- if 'parent_uuid' in config_node_dict:
- obj.parent_uuid = config_node_dict['parent_uuid']
-
- obj.set_server_conn(self)
-
- return obj.uuid
- #end config_node_create
-
- def config_node_read(self, fq_name = None, fq_name_str = None, id = None, ifmap_id = None, fields = None):
- """Return config-node information.
-
- :param fq_name: Fully qualified name of config-node
- :param fq_name_str: Fully qualified name string of config-node
- :param id: UUID of config-node
- :param ifmap_id: IFMAP id of config-node
- :returns: :class:`.ConfigNode` object
-
- """
- (args_ok, result) = self._read_args_to_id('config-node', fq_name, fq_name_str, id, ifmap_id)
- if not args_ok:
- return result
-
- id = result
- uri = vnc_api.gen.resource_client.ConfigNode.resource_uri_base['config-node'] + '/' + id
-
- if fields:
- comma_sep_fields = ','.join(f for f in fields)
- query_params = {'fields': comma_sep_fields}
- else:
- query_params = {'exclude_back_refs':True,
- 'exclude_children':True,}
- content = self._request_server(rest.OP_GET, uri, query_params)
-
- obj_dict = json.loads(content)['config-node']
- config_node_obj = vnc_api.gen.resource_client.ConfigNode.from_dict(**obj_dict)
- config_node_obj.clear_pending_updates()
- config_node_obj.set_server_conn(self)
-
- return config_node_obj
- #end config_node_read
-
- def config_node_update(self, obj):
- """Update config-node.
-
- :param obj: :class:`.ConfigNode` object
-
- """
- # Read in uuid from api-server if not specified in obj
- if not obj.uuid:
- obj.uuid = self.fq_name_to_id('config-node', obj.get_fq_name())
-
- # Ignore fields with None value in json representation
- json_param = json.dumps(obj, default = self._obj_serializer)
- json_body = '{"config-node":' + json_param + '}'
-
- id = obj.uuid
- uri = vnc_api.gen.resource_client.ConfigNode.resource_uri_base['config-node'] + '/' + id
- content = self._request_server(rest.OP_PUT, uri, data = json_body)
- for ref_name in obj._pending_ref_updates:
- ref_orig = set([(x.get('uuid'), tuple(x.get('to', [])), x.get('attr')) for x in getattr(obj, '_original_' + ref_name, [])])
- ref_new = set([(x.get('uuid'), tuple(x.get('to', [])), x.get('attr')) for x in getattr(obj, ref_name, [])])
- for ref in ref_orig - ref_new:
- self.ref_update('config-node', obj.uuid, ref_name, ref[0], list(ref[1]), 'DELETE')
- for ref in ref_new - ref_orig:
- self.ref_update('config-node', obj.uuid, ref_name, ref[0], list(ref[1]), 'ADD', ref[2])
- obj.clear_pending_updates()
-
- return content
- #end config_node_update
-
- def config_nodes_list(self, parent_id = None, parent_fq_name = None, obj_uuids = None, fields = None, detail = False, count = False, filters = None):
- """List all config-nodes.
-
- :param parent_id: UUID of parent as optional search filter
- :param parent_fq_name: full qualified name of parent as optional search filter
- :returns: list of :class:`.ConfigNode` objects
-
- """
- return self.resource_list('config-node', parent_id = parent_id, parent_fq_name = parent_fq_name, obj_uuids=obj_uuids, fields=fields, detail=detail, count=count, filters=filters)
- #end config_nodes_list
-
- def config_node_delete(self, fq_name = None, id = None, ifmap_id = None):
- """Delete config-node from the system.
-
- :param fq_name: Fully qualified name of config-node
- :param id: UUID of config-node
- :param ifmap_id: IFMAP id of config-node
-
- """
- (args_ok, result) = self._read_args_to_id(obj_type = 'config-node', fq_name = fq_name, id = id, ifmap_id = ifmap_id)
- if not args_ok:
- return result
-
- id = result
- uri = vnc_api.gen.resource_client.ConfigNode.resource_uri_base['config-node'] + '/' + id
-
- content = self._request_server(rest.OP_DELETE, uri)
- #end config_node_delete
-
- def get_default_config_node_id(self):
- """Return UUID of default config-node."""
- return self.fq_name_to_id('config-node', vnc_api.gen.resource_client.ConfigNode().get_fq_name())
- #end get_default_config_node_delete
-
- def qos_queue_create(self, obj):
- """Create new qos-queue.
-
- :param obj: :class:`.QosQueue` object
-
- """
- obj._pending_field_updates |= obj._pending_ref_updates
- obj._pending_ref_updates = set([])
- # Ignore fields with None value in json representation
- json_param = json.dumps(obj, default = self._obj_serializer)
- json_body = '{"qos-queue":' + json_param + '}'
- content = self._request_server(rest.OP_POST,
- vnc_api.gen.resource_client.QosQueue.create_uri,
- data = json_body)
-
- qos_queue_dict = json.loads(content)['qos-queue']
- obj.uuid = qos_queue_dict['uuid']
- if 'parent_uuid' in qos_queue_dict:
- obj.parent_uuid = qos_queue_dict['parent_uuid']
-
- obj.set_server_conn(self)
-
- return obj.uuid
- #end qos_queue_create
-
- def qos_queue_read(self, fq_name = None, fq_name_str = None, id = None, ifmap_id = None, fields = None):
- """Return qos-queue information.
-
- :param fq_name: Fully qualified name of qos-queue
- :param fq_name_str: Fully qualified name string of qos-queue
- :param id: UUID of qos-queue
- :param ifmap_id: IFMAP id of qos-queue
- :returns: :class:`.QosQueue` object
-
- """
- (args_ok, result) = self._read_args_to_id('qos-queue', fq_name, fq_name_str, id, ifmap_id)
- if not args_ok:
- return result
-
- id = result
- uri = vnc_api.gen.resource_client.QosQueue.resource_uri_base['qos-queue'] + '/' + id
-
- if fields:
- comma_sep_fields = ','.join(f for f in fields)
- query_params = {'fields': comma_sep_fields}
- else:
- query_params = {'exclude_back_refs':True,
- 'exclude_children':True,}
- content = self._request_server(rest.OP_GET, uri, query_params)
-
- obj_dict = json.loads(content)['qos-queue']
- qos_queue_obj = vnc_api.gen.resource_client.QosQueue.from_dict(**obj_dict)
- qos_queue_obj.clear_pending_updates()
- qos_queue_obj.set_server_conn(self)
-
- return qos_queue_obj
- #end qos_queue_read
-
- def qos_queue_update(self, obj):
- """Update qos-queue.
-
- :param obj: :class:`.QosQueue` object
-
- """
- # Read in uuid from api-server if not specified in obj
- if not obj.uuid:
- obj.uuid = self.fq_name_to_id('qos-queue', obj.get_fq_name())
-
- # Ignore fields with None value in json representation
- json_param = json.dumps(obj, default = self._obj_serializer)
- json_body = '{"qos-queue":' + json_param + '}'
-
- id = obj.uuid
- uri = vnc_api.gen.resource_client.QosQueue.resource_uri_base['qos-queue'] + '/' + id
- content = self._request_server(rest.OP_PUT, uri, data = json_body)
- for ref_name in obj._pending_ref_updates:
- ref_orig = set([(x.get('uuid'), tuple(x.get('to', [])), x.get('attr')) for x in getattr(obj, '_original_' + ref_name, [])])
- ref_new = set([(x.get('uuid'), tuple(x.get('to', [])), x.get('attr')) for x in getattr(obj, ref_name, [])])
- for ref in ref_orig - ref_new:
- self.ref_update('qos-queue', obj.uuid, ref_name, ref[0], list(ref[1]), 'DELETE')
- for ref in ref_new - ref_orig:
- self.ref_update('qos-queue', obj.uuid, ref_name, ref[0], list(ref[1]), 'ADD', ref[2])
- obj.clear_pending_updates()
-
- return content
- #end qos_queue_update
-
- def qos_queues_list(self, parent_id = None, parent_fq_name = None, obj_uuids = None, fields = None, detail = False, count = False, filters = None):
- """List all qos-queues.
-
- :param parent_id: UUID of parent as optional search filter
- :param parent_fq_name: full qualified name of parent as optional search filter
- :returns: list of :class:`.QosQueue` objects
-
- """
- return self.resource_list('qos-queue', parent_id = parent_id, parent_fq_name = parent_fq_name, obj_uuids=obj_uuids, fields=fields, detail=detail, count=count, filters=filters)
- #end qos_queues_list
-
- def qos_queue_delete(self, fq_name = None, id = None, ifmap_id = None):
- """Delete qos-queue from the system.
-
- :param fq_name: Fully qualified name of qos-queue
- :param id: UUID of qos-queue
- :param ifmap_id: IFMAP id of qos-queue
-
- """
- (args_ok, result) = self._read_args_to_id(obj_type = 'qos-queue', fq_name = fq_name, id = id, ifmap_id = ifmap_id)
- if not args_ok:
- return result
-
- id = result
- uri = vnc_api.gen.resource_client.QosQueue.resource_uri_base['qos-queue'] + '/' + id
-
- content = self._request_server(rest.OP_DELETE, uri)
- #end qos_queue_delete
-
- def get_default_qos_queue_id(self):
- """Return UUID of default qos-queue."""
- return self.fq_name_to_id('qos-queue', vnc_api.gen.resource_client.QosQueue().get_fq_name())
- #end get_default_qos_queue_delete
-
- def virtual_machine_create(self, obj):
- """Create new virtual-machine.
-
- :param obj: :class:`.VirtualMachine` object
-
- """
- obj._pending_field_updates |= obj._pending_ref_updates
- obj._pending_ref_updates = set([])
- # Ignore fields with None value in json representation
- json_param = json.dumps(obj, default = self._obj_serializer)
- json_body = '{"virtual-machine":' + json_param + '}'
- content = self._request_server(rest.OP_POST,
- vnc_api.gen.resource_client.VirtualMachine.create_uri,
- data = json_body)
-
- virtual_machine_dict = json.loads(content)['virtual-machine']
- obj.uuid = virtual_machine_dict['uuid']
- if 'parent_uuid' in virtual_machine_dict:
- obj.parent_uuid = virtual_machine_dict['parent_uuid']
-
- obj.set_server_conn(self)
-
- return obj.uuid
- #end virtual_machine_create
-
- def virtual_machine_read(self, fq_name = None, fq_name_str = None, id = None, ifmap_id = None, fields = None):
- """Return virtual-machine information.
-
- :param fq_name: Fully qualified name of virtual-machine
- :param fq_name_str: Fully qualified name string of virtual-machine
- :param id: UUID of virtual-machine
- :param ifmap_id: IFMAP id of virtual-machine
- :returns: :class:`.VirtualMachine` object
-
- """
- (args_ok, result) = self._read_args_to_id('virtual-machine', fq_name, fq_name_str, id, ifmap_id)
- if not args_ok:
- return result
-
- id = result
- uri = vnc_api.gen.resource_client.VirtualMachine.resource_uri_base['virtual-machine'] + '/' + id
-
- if fields:
- comma_sep_fields = ','.join(f for f in fields)
- query_params = {'fields': comma_sep_fields}
- else:
- query_params = {'exclude_back_refs':True,
- 'exclude_children':True,}
- content = self._request_server(rest.OP_GET, uri, query_params)
-
- obj_dict = json.loads(content)['virtual-machine']
- virtual_machine_obj = vnc_api.gen.resource_client.VirtualMachine.from_dict(**obj_dict)
- virtual_machine_obj.clear_pending_updates()
- virtual_machine_obj.set_server_conn(self)
-
- return virtual_machine_obj
- #end virtual_machine_read
-
- def virtual_machine_update(self, obj):
- """Update virtual-machine.
-
- :param obj: :class:`.VirtualMachine` object
-
- """
- # Read in uuid from api-server if not specified in obj
- if not obj.uuid:
- obj.uuid = self.fq_name_to_id('virtual-machine', obj.get_fq_name())
-
- # Ignore fields with None value in json representation
- json_param = json.dumps(obj, default = self._obj_serializer)
- json_body = '{"virtual-machine":' + json_param + '}'
-
- id = obj.uuid
- uri = vnc_api.gen.resource_client.VirtualMachine.resource_uri_base['virtual-machine'] + '/' + id
- content = self._request_server(rest.OP_PUT, uri, data = json_body)
- for ref_name in obj._pending_ref_updates:
- ref_orig = set([(x.get('uuid'), tuple(x.get('to', [])), x.get('attr')) for x in getattr(obj, '_original_' + ref_name, [])])
- ref_new = set([(x.get('uuid'), tuple(x.get('to', [])), x.get('attr')) for x in getattr(obj, ref_name, [])])
- for ref in ref_orig - ref_new:
- self.ref_update('virtual-machine', obj.uuid, ref_name, ref[0], list(ref[1]), 'DELETE')
- for ref in ref_new - ref_orig:
- self.ref_update('virtual-machine', obj.uuid, ref_name, ref[0], list(ref[1]), 'ADD', ref[2])
- obj.clear_pending_updates()
-
- return content
- #end virtual_machine_update
-
- def virtual_machines_list(self, back_ref_id = None, obj_uuids = None, fields = None, detail = False, count = False, filters = None):
- """List all virtual-machines."""
- return self.resource_list('virtual-machine', back_ref_id = back_ref_id, obj_uuids=obj_uuids, fields=fields, detail=detail, count=count, filters=filters)
- #end virtual_machines_list
-
- def virtual_machine_delete(self, fq_name = None, id = None, ifmap_id = None):
- """Delete virtual-machine from the system.
-
- :param fq_name: Fully qualified name of virtual-machine
- :param id: UUID of virtual-machine
- :param ifmap_id: IFMAP id of virtual-machine
-
- """
- (args_ok, result) = self._read_args_to_id(obj_type = 'virtual-machine', fq_name = fq_name, id = id, ifmap_id = ifmap_id)
- if not args_ok:
- return result
-
- id = result
- uri = vnc_api.gen.resource_client.VirtualMachine.resource_uri_base['virtual-machine'] + '/' + id
-
- content = self._request_server(rest.OP_DELETE, uri)
- #end virtual_machine_delete
-
- def get_default_virtual_machine_id(self):
- """Return UUID of default virtual-machine."""
- return self.fq_name_to_id('virtual-machine', vnc_api.gen.resource_client.VirtualMachine().get_fq_name())
- #end get_default_virtual_machine_delete
-
- def interface_route_table_create(self, obj):
- """Create new interface-route-table.
-
- :param obj: :class:`.InterfaceRouteTable` object
-
- """
- obj._pending_field_updates |= obj._pending_ref_updates
- obj._pending_ref_updates = set([])
- # Ignore fields with None value in json representation
- json_param = json.dumps(obj, default = self._obj_serializer)
- json_body = '{"interface-route-table":' + json_param + '}'
- content = self._request_server(rest.OP_POST,
- vnc_api.gen.resource_client.InterfaceRouteTable.create_uri,
- data = json_body)
-
- interface_route_table_dict = json.loads(content)['interface-route-table']
- obj.uuid = interface_route_table_dict['uuid']
- if 'parent_uuid' in interface_route_table_dict:
- obj.parent_uuid = interface_route_table_dict['parent_uuid']
-
- obj.set_server_conn(self)
-
- return obj.uuid
- #end interface_route_table_create
-
- def interface_route_table_read(self, fq_name = None, fq_name_str = None, id = None, ifmap_id = None, fields = None):
- """Return interface-route-table information.
-
- :param fq_name: Fully qualified name of interface-route-table
- :param fq_name_str: Fully qualified name string of interface-route-table
- :param id: UUID of interface-route-table
- :param ifmap_id: IFMAP id of interface-route-table
- :returns: :class:`.InterfaceRouteTable` object
-
- """
- (args_ok, result) = self._read_args_to_id('interface-route-table', fq_name, fq_name_str, id, ifmap_id)
- if not args_ok:
- return result
-
- id = result
- uri = vnc_api.gen.resource_client.InterfaceRouteTable.resource_uri_base['interface-route-table'] + '/' + id
-
- if fields:
- comma_sep_fields = ','.join(f for f in fields)
- query_params = {'fields': comma_sep_fields}
- else:
- query_params = {'exclude_back_refs':True,
- 'exclude_children':True,}
- content = self._request_server(rest.OP_GET, uri, query_params)
-
- obj_dict = json.loads(content)['interface-route-table']
- interface_route_table_obj = vnc_api.gen.resource_client.InterfaceRouteTable.from_dict(**obj_dict)
- interface_route_table_obj.clear_pending_updates()
- interface_route_table_obj.set_server_conn(self)
-
- return interface_route_table_obj
- #end interface_route_table_read
-
- def interface_route_table_update(self, obj):
- """Update interface-route-table.
-
- :param obj: :class:`.InterfaceRouteTable` object
-
- """
- # Read in uuid from api-server if not specified in obj
- if not obj.uuid:
- obj.uuid = self.fq_name_to_id('interface-route-table', obj.get_fq_name())
-
- # Ignore fields with None value in json representation
- json_param = json.dumps(obj, default = self._obj_serializer)
- json_body = '{"interface-route-table":' + json_param + '}'
-
- id = obj.uuid
- uri = vnc_api.gen.resource_client.InterfaceRouteTable.resource_uri_base['interface-route-table'] + '/' + id
- content = self._request_server(rest.OP_PUT, uri, data = json_body)
- for ref_name in obj._pending_ref_updates:
- ref_orig = set([(x.get('uuid'), tuple(x.get('to', [])), x.get('attr')) for x in getattr(obj, '_original_' + ref_name, [])])
- ref_new = set([(x.get('uuid'), tuple(x.get('to', [])), x.get('attr')) for x in getattr(obj, ref_name, [])])
- for ref in ref_orig - ref_new:
- self.ref_update('interface-route-table', obj.uuid, ref_name, ref[0], list(ref[1]), 'DELETE')
- for ref in ref_new - ref_orig:
- self.ref_update('interface-route-table', obj.uuid, ref_name, ref[0], list(ref[1]), 'ADD', ref[2])
- obj.clear_pending_updates()
-
- return content
- #end interface_route_table_update
-
- def interface_route_tables_list(self, parent_id = None, parent_fq_name = None, obj_uuids = None, fields = None, detail = False, count = False, filters = None):
- """List all interface-route-tables.
-
- :param parent_id: UUID of parent as optional search filter
- :param parent_fq_name: full qualified name of parent as optional search filter
- :returns: list of :class:`.InterfaceRouteTable` objects
-
- """
- return self.resource_list('interface-route-table', parent_id = parent_id, parent_fq_name = parent_fq_name, obj_uuids=obj_uuids, fields=fields, detail=detail, count=count, filters=filters)
- #end interface_route_tables_list
-
- def interface_route_table_delete(self, fq_name = None, id = None, ifmap_id = None):
- """Delete interface-route-table from the system.
-
- :param fq_name: Fully qualified name of interface-route-table
- :param id: UUID of interface-route-table
- :param ifmap_id: IFMAP id of interface-route-table
-
- """
- (args_ok, result) = self._read_args_to_id(obj_type = 'interface-route-table', fq_name = fq_name, id = id, ifmap_id = ifmap_id)
- if not args_ok:
- return result
-
- id = result
- uri = vnc_api.gen.resource_client.InterfaceRouteTable.resource_uri_base['interface-route-table'] + '/' + id
-
- content = self._request_server(rest.OP_DELETE, uri)
- #end interface_route_table_delete
-
- def get_default_interface_route_table_id(self):
- """Return UUID of default interface-route-table."""
- return self.fq_name_to_id('interface-route-table', vnc_api.gen.resource_client.InterfaceRouteTable().get_fq_name())
- #end get_default_interface_route_table_delete
-
- def service_template_create(self, obj):
- """Create new service-template.
-
- :param obj: :class:`.ServiceTemplate` object
-
- """
- obj._pending_field_updates |= obj._pending_ref_updates
- obj._pending_ref_updates = set([])
- # Ignore fields with None value in json representation
- json_param = json.dumps(obj, default = self._obj_serializer)
- json_body = '{"service-template":' + json_param + '}'
- content = self._request_server(rest.OP_POST,
- vnc_api.gen.resource_client.ServiceTemplate.create_uri,
- data = json_body)
-
- service_template_dict = json.loads(content)['service-template']
- obj.uuid = service_template_dict['uuid']
- if 'parent_uuid' in service_template_dict:
- obj.parent_uuid = service_template_dict['parent_uuid']
-
- obj.set_server_conn(self)
-
- return obj.uuid
- #end service_template_create
-
- def service_template_read(self, fq_name = None, fq_name_str = None, id = None, ifmap_id = None, fields = None):
- """Return service-template information.
-
- :param fq_name: Fully qualified name of service-template
- :param fq_name_str: Fully qualified name string of service-template
- :param id: UUID of service-template
- :param ifmap_id: IFMAP id of service-template
- :returns: :class:`.ServiceTemplate` object
-
- """
- (args_ok, result) = self._read_args_to_id('service-template', fq_name, fq_name_str, id, ifmap_id)
- if not args_ok:
- return result
-
- id = result
- uri = vnc_api.gen.resource_client.ServiceTemplate.resource_uri_base['service-template'] + '/' + id
-
- if fields:
- comma_sep_fields = ','.join(f for f in fields)
- query_params = {'fields': comma_sep_fields}
- else:
- query_params = {'exclude_back_refs':True,
- 'exclude_children':True,}
- content = self._request_server(rest.OP_GET, uri, query_params)
-
- obj_dict = json.loads(content)['service-template']
- service_template_obj = vnc_api.gen.resource_client.ServiceTemplate.from_dict(**obj_dict)
- service_template_obj.clear_pending_updates()
- service_template_obj.set_server_conn(self)
-
- return service_template_obj
- #end service_template_read
-
- def service_template_update(self, obj):
- """Update service-template.
-
- :param obj: :class:`.ServiceTemplate` object
-
- """
- # Read in uuid from api-server if not specified in obj
- if not obj.uuid:
- obj.uuid = self.fq_name_to_id('service-template', obj.get_fq_name())
-
- # Ignore fields with None value in json representation
- json_param = json.dumps(obj, default = self._obj_serializer)
- json_body = '{"service-template":' + json_param + '}'
-
- id = obj.uuid
- uri = vnc_api.gen.resource_client.ServiceTemplate.resource_uri_base['service-template'] + '/' + id
- content = self._request_server(rest.OP_PUT, uri, data = json_body)
- for ref_name in obj._pending_ref_updates:
- ref_orig = set([(x.get('uuid'), tuple(x.get('to', [])), x.get('attr')) for x in getattr(obj, '_original_' + ref_name, [])])
- ref_new = set([(x.get('uuid'), tuple(x.get('to', [])), x.get('attr')) for x in getattr(obj, ref_name, [])])
- for ref in ref_orig - ref_new:
- self.ref_update('service-template', obj.uuid, ref_name, ref[0], list(ref[1]), 'DELETE')
- for ref in ref_new - ref_orig:
- self.ref_update('service-template', obj.uuid, ref_name, ref[0], list(ref[1]), 'ADD', ref[2])
- obj.clear_pending_updates()
-
- return content
- #end service_template_update
-
- def service_templates_list(self, parent_id = None, parent_fq_name = None, obj_uuids = None, fields = None, detail = False, count = False, filters = None):
- """List all service-templates.
-
- :param parent_id: UUID of parent as optional search filter
- :param parent_fq_name: full qualified name of parent as optional search filter
- :returns: list of :class:`.ServiceTemplate` objects
-
- """
- return self.resource_list('service-template', parent_id = parent_id, parent_fq_name = parent_fq_name, obj_uuids=obj_uuids, fields=fields, detail=detail, count=count, filters=filters)
- #end service_templates_list
-
- def service_template_delete(self, fq_name = None, id = None, ifmap_id = None):
- """Delete service-template from the system.
-
- :param fq_name: Fully qualified name of service-template
- :param id: UUID of service-template
- :param ifmap_id: IFMAP id of service-template
-
- """
- (args_ok, result) = self._read_args_to_id(obj_type = 'service-template', fq_name = fq_name, id = id, ifmap_id = ifmap_id)
- if not args_ok:
- return result
-
- id = result
- uri = vnc_api.gen.resource_client.ServiceTemplate.resource_uri_base['service-template'] + '/' + id
-
- content = self._request_server(rest.OP_DELETE, uri)
- #end service_template_delete
-
- def get_default_service_template_id(self):
- """Return UUID of default service-template."""
- return self.fq_name_to_id('service-template', vnc_api.gen.resource_client.ServiceTemplate().get_fq_name())
- #end get_default_service_template_delete
-
- def virtual_ip_create(self, obj):
- """Create new virtual-ip.
-
- :param obj: :class:`.VirtualIp` object
-
- """
- obj._pending_field_updates |= obj._pending_ref_updates
- obj._pending_ref_updates = set([])
- # Ignore fields with None value in json representation
- json_param = json.dumps(obj, default = self._obj_serializer)
- json_body = '{"virtual-ip":' + json_param + '}'
- content = self._request_server(rest.OP_POST,
- vnc_api.gen.resource_client.VirtualIp.create_uri,
- data = json_body)
-
- virtual_ip_dict = json.loads(content)['virtual-ip']
- obj.uuid = virtual_ip_dict['uuid']
- if 'parent_uuid' in virtual_ip_dict:
- obj.parent_uuid = virtual_ip_dict['parent_uuid']
-
- obj.set_server_conn(self)
-
- return obj.uuid
- #end virtual_ip_create
-
- def virtual_ip_read(self, fq_name = None, fq_name_str = None, id = None, ifmap_id = None, fields = None):
- """Return virtual-ip information.
-
- :param fq_name: Fully qualified name of virtual-ip
- :param fq_name_str: Fully qualified name string of virtual-ip
- :param id: UUID of virtual-ip
- :param ifmap_id: IFMAP id of virtual-ip
- :returns: :class:`.VirtualIp` object
-
- """
- (args_ok, result) = self._read_args_to_id('virtual-ip', fq_name, fq_name_str, id, ifmap_id)
- if not args_ok:
- return result
-
- id = result
- uri = vnc_api.gen.resource_client.VirtualIp.resource_uri_base['virtual-ip'] + '/' + id
-
- if fields:
- comma_sep_fields = ','.join(f for f in fields)
- query_params = {'fields': comma_sep_fields}
- else:
- query_params = {'exclude_back_refs':True,
- 'exclude_children':True,}
- content = self._request_server(rest.OP_GET, uri, query_params)
-
- obj_dict = json.loads(content)['virtual-ip']
- virtual_ip_obj = vnc_api.gen.resource_client.VirtualIp.from_dict(**obj_dict)
- virtual_ip_obj.clear_pending_updates()
- virtual_ip_obj.set_server_conn(self)
-
- return virtual_ip_obj
- #end virtual_ip_read
-
- def virtual_ip_update(self, obj):
- """Update virtual-ip.
-
- :param obj: :class:`.VirtualIp` object
-
- """
- # Read in uuid from api-server if not specified in obj
- if not obj.uuid:
- obj.uuid = self.fq_name_to_id('virtual-ip', obj.get_fq_name())
-
- # Ignore fields with None value in json representation
- json_param = json.dumps(obj, default = self._obj_serializer)
- json_body = '{"virtual-ip":' + json_param + '}'
-
- id = obj.uuid
- uri = vnc_api.gen.resource_client.VirtualIp.resource_uri_base['virtual-ip'] + '/' + id
- content = self._request_server(rest.OP_PUT, uri, data = json_body)
- for ref_name in obj._pending_ref_updates:
- ref_orig = set([(x.get('uuid'), tuple(x.get('to', [])), x.get('attr')) for x in getattr(obj, '_original_' + ref_name, [])])
- ref_new = set([(x.get('uuid'), tuple(x.get('to', [])), x.get('attr')) for x in getattr(obj, ref_name, [])])
- for ref in ref_orig - ref_new:
- self.ref_update('virtual-ip', obj.uuid, ref_name, ref[0], list(ref[1]), 'DELETE')
- for ref in ref_new - ref_orig:
- self.ref_update('virtual-ip', obj.uuid, ref_name, ref[0], list(ref[1]), 'ADD', ref[2])
- obj.clear_pending_updates()
-
- return content
- #end virtual_ip_update
-
- def virtual_ips_list(self, parent_id = None, parent_fq_name = None, back_ref_id = None, obj_uuids = None, fields = None, detail = False, count = False, filters = None):
- """List all virtual-ips.
-
- :param parent_id: UUID of parent as optional search filter
- :param parent_fq_name: full qualified name of parent as optional search filter
- :returns: list of :class:`.VirtualIp` objects
-
- """
- return self.resource_list('virtual-ip', parent_id = parent_id, parent_fq_name = parent_fq_name, back_ref_id = back_ref_id, obj_uuids=obj_uuids, fields=fields, detail=detail, count=count, filters=filters)
- #end virtual_ips_list
-
- def virtual_ip_delete(self, fq_name = None, id = None, ifmap_id = None):
- """Delete virtual-ip from the system.
-
- :param fq_name: Fully qualified name of virtual-ip
- :param id: UUID of virtual-ip
- :param ifmap_id: IFMAP id of virtual-ip
-
- """
- (args_ok, result) = self._read_args_to_id(obj_type = 'virtual-ip', fq_name = fq_name, id = id, ifmap_id = ifmap_id)
- if not args_ok:
- return result
-
- id = result
- uri = vnc_api.gen.resource_client.VirtualIp.resource_uri_base['virtual-ip'] + '/' + id
-
- content = self._request_server(rest.OP_DELETE, uri)
- #end virtual_ip_delete
-
- def get_default_virtual_ip_id(self):
- """Return UUID of default virtual-ip."""
- return self.fq_name_to_id('virtual-ip', vnc_api.gen.resource_client.VirtualIp().get_fq_name())
- #end get_default_virtual_ip_delete
-
- def loadbalancer_member_create(self, obj):
- """Create new loadbalancer-member.
-
- :param obj: :class:`.LoadbalancerMember` object
-
- """
- obj._pending_field_updates |= obj._pending_ref_updates
- obj._pending_ref_updates = set([])
- # Ignore fields with None value in json representation
- json_param = json.dumps(obj, default = self._obj_serializer)
- json_body = '{"loadbalancer-member":' + json_param + '}'
- content = self._request_server(rest.OP_POST,
- vnc_api.gen.resource_client.LoadbalancerMember.create_uri,
- data = json_body)
-
- loadbalancer_member_dict = json.loads(content)['loadbalancer-member']
- obj.uuid = loadbalancer_member_dict['uuid']
- if 'parent_uuid' in loadbalancer_member_dict:
- obj.parent_uuid = loadbalancer_member_dict['parent_uuid']
-
- obj.set_server_conn(self)
-
- return obj.uuid
- #end loadbalancer_member_create
-
- def loadbalancer_member_read(self, fq_name = None, fq_name_str = None, id = None, ifmap_id = None, fields = None):
- """Return loadbalancer-member information.
-
- :param fq_name: Fully qualified name of loadbalancer-member
- :param fq_name_str: Fully qualified name string of loadbalancer-member
- :param id: UUID of loadbalancer-member
- :param ifmap_id: IFMAP id of loadbalancer-member
- :returns: :class:`.LoadbalancerMember` object
-
- """
- (args_ok, result) = self._read_args_to_id('loadbalancer-member', fq_name, fq_name_str, id, ifmap_id)
- if not args_ok:
- return result
-
- id = result
- uri = vnc_api.gen.resource_client.LoadbalancerMember.resource_uri_base['loadbalancer-member'] + '/' + id
-
- if fields:
- comma_sep_fields = ','.join(f for f in fields)
- query_params = {'fields': comma_sep_fields}
- else:
- query_params = {'exclude_back_refs':True,
- 'exclude_children':True,}
- content = self._request_server(rest.OP_GET, uri, query_params)
-
- obj_dict = json.loads(content)['loadbalancer-member']
- loadbalancer_member_obj = vnc_api.gen.resource_client.LoadbalancerMember.from_dict(**obj_dict)
- loadbalancer_member_obj.clear_pending_updates()
- loadbalancer_member_obj.set_server_conn(self)
-
- return loadbalancer_member_obj
- #end loadbalancer_member_read
-
- def loadbalancer_member_update(self, obj):
- """Update loadbalancer-member.
-
- :param obj: :class:`.LoadbalancerMember` object
-
- """
- # Read in uuid from api-server if not specified in obj
- if not obj.uuid:
- obj.uuid = self.fq_name_to_id('loadbalancer-member', obj.get_fq_name())
-
- # Ignore fields with None value in json representation
- json_param = json.dumps(obj, default = self._obj_serializer)
- json_body = '{"loadbalancer-member":' + json_param + '}'
-
- id = obj.uuid
- uri = vnc_api.gen.resource_client.LoadbalancerMember.resource_uri_base['loadbalancer-member'] + '/' + id
- content = self._request_server(rest.OP_PUT, uri, data = json_body)
- for ref_name in obj._pending_ref_updates:
- ref_orig = set([(x.get('uuid'), tuple(x.get('to', [])), x.get('attr')) for x in getattr(obj, '_original_' + ref_name, [])])
- ref_new = set([(x.get('uuid'), tuple(x.get('to', [])), x.get('attr')) for x in getattr(obj, ref_name, [])])
- for ref in ref_orig - ref_new:
- self.ref_update('loadbalancer-member', obj.uuid, ref_name, ref[0], list(ref[1]), 'DELETE')
- for ref in ref_new - ref_orig:
- self.ref_update('loadbalancer-member', obj.uuid, ref_name, ref[0], list(ref[1]), 'ADD', ref[2])
- obj.clear_pending_updates()
-
- return content
- #end loadbalancer_member_update
-
- def loadbalancer_members_list(self, parent_id = None, parent_fq_name = None, obj_uuids = None, fields = None, detail = False, count = False, filters = None):
- """List all loadbalancer-members.
-
- :param parent_id: UUID of parent as optional search filter
- :param parent_fq_name: full qualified name of parent as optional search filter
- :returns: list of :class:`.LoadbalancerMember` objects
-
- """
- return self.resource_list('loadbalancer-member', parent_id = parent_id, parent_fq_name = parent_fq_name, obj_uuids=obj_uuids, fields=fields, detail=detail, count=count, filters=filters)
- #end loadbalancer_members_list
-
- def loadbalancer_member_delete(self, fq_name = None, id = None, ifmap_id = None):
- """Delete loadbalancer-member from the system.
-
- :param fq_name: Fully qualified name of loadbalancer-member
- :param id: UUID of loadbalancer-member
- :param ifmap_id: IFMAP id of loadbalancer-member
-
- """
- (args_ok, result) = self._read_args_to_id(obj_type = 'loadbalancer-member', fq_name = fq_name, id = id, ifmap_id = ifmap_id)
- if not args_ok:
- return result
-
- id = result
- uri = vnc_api.gen.resource_client.LoadbalancerMember.resource_uri_base['loadbalancer-member'] + '/' + id
-
- content = self._request_server(rest.OP_DELETE, uri)
- #end loadbalancer_member_delete
-
- def get_default_loadbalancer_member_id(self):
- """Return UUID of default loadbalancer-member."""
- return self.fq_name_to_id('loadbalancer-member', vnc_api.gen.resource_client.LoadbalancerMember().get_fq_name())
- #end get_default_loadbalancer_member_delete
-
- def security_group_create(self, obj):
- """Create new security-group.
-
- :param obj: :class:`.SecurityGroup` object
-
- """
- obj._pending_field_updates |= obj._pending_ref_updates
- obj._pending_ref_updates = set([])
- # Ignore fields with None value in json representation
- json_param = json.dumps(obj, default = self._obj_serializer)
- json_body = '{"security-group":' + json_param + '}'
- content = self._request_server(rest.OP_POST,
- vnc_api.gen.resource_client.SecurityGroup.create_uri,
- data = json_body)
-
- security_group_dict = json.loads(content)['security-group']
- obj.uuid = security_group_dict['uuid']
- if 'parent_uuid' in security_group_dict:
- obj.parent_uuid = security_group_dict['parent_uuid']
-
- obj.set_server_conn(self)
-
- return obj.uuid
- #end security_group_create
-
- def security_group_read(self, fq_name = None, fq_name_str = None, id = None, ifmap_id = None, fields = None):
- """Return security-group information.
-
- :param fq_name: Fully qualified name of security-group
- :param fq_name_str: Fully qualified name string of security-group
- :param id: UUID of security-group
- :param ifmap_id: IFMAP id of security-group
- :returns: :class:`.SecurityGroup` object
-
- """
- (args_ok, result) = self._read_args_to_id('security-group', fq_name, fq_name_str, id, ifmap_id)
- if not args_ok:
- return result
-
- id = result
- uri = vnc_api.gen.resource_client.SecurityGroup.resource_uri_base['security-group'] + '/' + id
-
- if fields:
- comma_sep_fields = ','.join(f for f in fields)
- query_params = {'fields': comma_sep_fields}
- else:
- query_params = {'exclude_back_refs':True,
- 'exclude_children':True,}
- content = self._request_server(rest.OP_GET, uri, query_params)
-
- obj_dict = json.loads(content)['security-group']
- security_group_obj = vnc_api.gen.resource_client.SecurityGroup.from_dict(**obj_dict)
- security_group_obj.clear_pending_updates()
- security_group_obj.set_server_conn(self)
-
- return security_group_obj
- #end security_group_read
-
- def security_group_update(self, obj):
- """Update security-group.
-
- :param obj: :class:`.SecurityGroup` object
-
- """
- # Read in uuid from api-server if not specified in obj
- if not obj.uuid:
- obj.uuid = self.fq_name_to_id('security-group', obj.get_fq_name())
-
- # Ignore fields with None value in json representation
- json_param = json.dumps(obj, default = self._obj_serializer)
- json_body = '{"security-group":' + json_param + '}'
-
- id = obj.uuid
- uri = vnc_api.gen.resource_client.SecurityGroup.resource_uri_base['security-group'] + '/' + id
- content = self._request_server(rest.OP_PUT, uri, data = json_body)
- for ref_name in obj._pending_ref_updates:
- ref_orig = set([(x.get('uuid'), tuple(x.get('to', [])), x.get('attr')) for x in getattr(obj, '_original_' + ref_name, [])])
- ref_new = set([(x.get('uuid'), tuple(x.get('to', [])), x.get('attr')) for x in getattr(obj, ref_name, [])])
- for ref in ref_orig - ref_new:
- self.ref_update('security-group', obj.uuid, ref_name, ref[0], list(ref[1]), 'DELETE')
- for ref in ref_new - ref_orig:
- self.ref_update('security-group', obj.uuid, ref_name, ref[0], list(ref[1]), 'ADD', ref[2])
- obj.clear_pending_updates()
-
- return content
- #end security_group_update
-
- def security_groups_list(self, parent_id = None, parent_fq_name = None, obj_uuids = None, fields = None, detail = False, count = False, filters = None):
- """List all security-groups.
-
- :param parent_id: UUID of parent as optional search filter
- :param parent_fq_name: full qualified name of parent as optional search filter
- :returns: list of :class:`.SecurityGroup` objects
-
- """
- return self.resource_list('security-group', parent_id = parent_id, parent_fq_name = parent_fq_name, obj_uuids=obj_uuids, fields=fields, detail=detail, count=count, filters=filters)
- #end security_groups_list
-
- def security_group_delete(self, fq_name = None, id = None, ifmap_id = None):
- """Delete security-group from the system.
-
- :param fq_name: Fully qualified name of security-group
- :param id: UUID of security-group
- :param ifmap_id: IFMAP id of security-group
-
- """
- (args_ok, result) = self._read_args_to_id(obj_type = 'security-group', fq_name = fq_name, id = id, ifmap_id = ifmap_id)
- if not args_ok:
- return result
-
- id = result
- uri = vnc_api.gen.resource_client.SecurityGroup.resource_uri_base['security-group'] + '/' + id
-
- content = self._request_server(rest.OP_DELETE, uri)
- #end security_group_delete
-
- def get_default_security_group_id(self):
- """Return UUID of default security-group."""
- return self.fq_name_to_id('security-group', vnc_api.gen.resource_client.SecurityGroup().get_fq_name())
- #end get_default_security_group_delete
-
- def provider_attachment_create(self, obj):
- """Create new provider-attachment.
-
- :param obj: :class:`.ProviderAttachment` object
-
- """
- obj._pending_field_updates |= obj._pending_ref_updates
- obj._pending_ref_updates = set([])
- # Ignore fields with None value in json representation
- json_param = json.dumps(obj, default = self._obj_serializer)
- json_body = '{"provider-attachment":' + json_param + '}'
- content = self._request_server(rest.OP_POST,
- vnc_api.gen.resource_client.ProviderAttachment.create_uri,
- data = json_body)
-
- provider_attachment_dict = json.loads(content)['provider-attachment']
- obj.uuid = provider_attachment_dict['uuid']
- if 'parent_uuid' in provider_attachment_dict:
- obj.parent_uuid = provider_attachment_dict['parent_uuid']
-
- obj.set_server_conn(self)
-
- return obj.uuid
- #end provider_attachment_create
-
- def provider_attachment_read(self, fq_name = None, fq_name_str = None, id = None, ifmap_id = None, fields = None):
- """Return provider-attachment information.
-
- :param fq_name: Fully qualified name of provider-attachment
- :param fq_name_str: Fully qualified name string of provider-attachment
- :param id: UUID of provider-attachment
- :param ifmap_id: IFMAP id of provider-attachment
- :returns: :class:`.ProviderAttachment` object
-
- """
- (args_ok, result) = self._read_args_to_id('provider-attachment', fq_name, fq_name_str, id, ifmap_id)
- if not args_ok:
- return result
-
- id = result
- uri = vnc_api.gen.resource_client.ProviderAttachment.resource_uri_base['provider-attachment'] + '/' + id
-
- if fields:
- comma_sep_fields = ','.join(f for f in fields)
- query_params = {'fields': comma_sep_fields}
- else:
- query_params = {'exclude_back_refs':True,
- 'exclude_children':True,}
- content = self._request_server(rest.OP_GET, uri, query_params)
-
- obj_dict = json.loads(content)['provider-attachment']
- provider_attachment_obj = vnc_api.gen.resource_client.ProviderAttachment.from_dict(**obj_dict)
- provider_attachment_obj.clear_pending_updates()
- provider_attachment_obj.set_server_conn(self)
-
- return provider_attachment_obj
- #end provider_attachment_read
-
- def provider_attachment_update(self, obj):
- """Update provider-attachment.
-
- :param obj: :class:`.ProviderAttachment` object
-
- """
- # Read in uuid from api-server if not specified in obj
- if not obj.uuid:
- obj.uuid = self.fq_name_to_id('provider-attachment', obj.get_fq_name())
-
- # Ignore fields with None value in json representation
- json_param = json.dumps(obj, default = self._obj_serializer)
- json_body = '{"provider-attachment":' + json_param + '}'
-
- id = obj.uuid
- uri = vnc_api.gen.resource_client.ProviderAttachment.resource_uri_base['provider-attachment'] + '/' + id
- content = self._request_server(rest.OP_PUT, uri, data = json_body)
- for ref_name in obj._pending_ref_updates:
- ref_orig = set([(x.get('uuid'), tuple(x.get('to', [])), x.get('attr')) for x in getattr(obj, '_original_' + ref_name, [])])
- ref_new = set([(x.get('uuid'), tuple(x.get('to', [])), x.get('attr')) for x in getattr(obj, ref_name, [])])
- for ref in ref_orig - ref_new:
- self.ref_update('provider-attachment', obj.uuid, ref_name, ref[0], list(ref[1]), 'DELETE')
- for ref in ref_new - ref_orig:
- self.ref_update('provider-attachment', obj.uuid, ref_name, ref[0], list(ref[1]), 'ADD', ref[2])
- obj.clear_pending_updates()
-
- return content
- #end provider_attachment_update
-
- def provider_attachments_list(self, back_ref_id = None, obj_uuids = None, fields = None, detail = False, count = False, filters = None):
- """List all provider-attachments."""
- return self.resource_list('provider-attachment', back_ref_id = back_ref_id, obj_uuids=obj_uuids, fields=fields, detail=detail, count=count, filters=filters)
- #end provider_attachments_list
-
- def provider_attachment_delete(self, fq_name = None, id = None, ifmap_id = None):
- """Delete provider-attachment from the system.
-
- :param fq_name: Fully qualified name of provider-attachment
- :param id: UUID of provider-attachment
- :param ifmap_id: IFMAP id of provider-attachment
-
- """
- (args_ok, result) = self._read_args_to_id(obj_type = 'provider-attachment', fq_name = fq_name, id = id, ifmap_id = ifmap_id)
- if not args_ok:
- return result
-
- id = result
- uri = vnc_api.gen.resource_client.ProviderAttachment.resource_uri_base['provider-attachment'] + '/' + id
-
- content = self._request_server(rest.OP_DELETE, uri)
- #end provider_attachment_delete
-
- def get_default_provider_attachment_id(self):
- """Return UUID of default provider-attachment."""
- return self.fq_name_to_id('provider-attachment', vnc_api.gen.resource_client.ProviderAttachment().get_fq_name())
- #end get_default_provider_attachment_delete
-
- def virtual_machine_interface_create(self, obj):
- """Create new virtual-machine-interface.
-
- :param obj: :class:`.VirtualMachineInterface` object
-
- """
- obj._pending_field_updates |= obj._pending_ref_updates
- obj._pending_ref_updates = set([])
- # Ignore fields with None value in json representation
- json_param = json.dumps(obj, default = self._obj_serializer)
- json_body = '{"virtual-machine-interface":' + json_param + '}'
- content = self._request_server(rest.OP_POST,
- vnc_api.gen.resource_client.VirtualMachineInterface.create_uri,
- data = json_body)
-
- virtual_machine_interface_dict = json.loads(content)['virtual-machine-interface']
- obj.uuid = virtual_machine_interface_dict['uuid']
- if 'parent_uuid' in virtual_machine_interface_dict:
- obj.parent_uuid = virtual_machine_interface_dict['parent_uuid']
-
- obj.set_server_conn(self)
-
- return obj.uuid
- #end virtual_machine_interface_create
-
- def virtual_machine_interface_read(self, fq_name = None, fq_name_str = None, id = None, ifmap_id = None, fields = None):
- """Return virtual-machine-interface information.
-
- :param fq_name: Fully qualified name of virtual-machine-interface
- :param fq_name_str: Fully qualified name string of virtual-machine-interface
- :param id: UUID of virtual-machine-interface
- :param ifmap_id: IFMAP id of virtual-machine-interface
- :returns: :class:`.VirtualMachineInterface` object
-
- """
- (args_ok, result) = self._read_args_to_id('virtual-machine-interface', fq_name, fq_name_str, id, ifmap_id)
- if not args_ok:
- return result
-
- id = result
- uri = vnc_api.gen.resource_client.VirtualMachineInterface.resource_uri_base['virtual-machine-interface'] + '/' + id
-
- if fields:
- comma_sep_fields = ','.join(f for f in fields)
- query_params = {'fields': comma_sep_fields}
- else:
- query_params = {'exclude_back_refs':True,
- 'exclude_children':True,}
- content = self._request_server(rest.OP_GET, uri, query_params)
-
- obj_dict = json.loads(content)['virtual-machine-interface']
- virtual_machine_interface_obj = vnc_api.gen.resource_client.VirtualMachineInterface.from_dict(**obj_dict)
- virtual_machine_interface_obj.clear_pending_updates()
- virtual_machine_interface_obj.set_server_conn(self)
-
- return virtual_machine_interface_obj
- #end virtual_machine_interface_read
-
- def virtual_machine_interface_update(self, obj):
- """Update virtual-machine-interface.
-
- :param obj: :class:`.VirtualMachineInterface` object
-
- """
- # Read in uuid from api-server if not specified in obj
- if not obj.uuid:
- obj.uuid = self.fq_name_to_id('virtual-machine-interface', obj.get_fq_name())
-
- # Ignore fields with None value in json representation
- json_param = json.dumps(obj, default = self._obj_serializer)
- json_body = '{"virtual-machine-interface":' + json_param + '}'
-
- id = obj.uuid
- uri = vnc_api.gen.resource_client.VirtualMachineInterface.resource_uri_base['virtual-machine-interface'] + '/' + id
- content = self._request_server(rest.OP_PUT, uri, data = json_body)
- for ref_name in obj._pending_ref_updates:
- ref_orig = set([(x.get('uuid'), tuple(x.get('to', [])), x.get('attr')) for x in getattr(obj, '_original_' + ref_name, [])])
- ref_new = set([(x.get('uuid'), tuple(x.get('to', [])), x.get('attr')) for x in getattr(obj, ref_name, [])])
- for ref in ref_orig - ref_new:
- self.ref_update('virtual-machine-interface', obj.uuid, ref_name, ref[0], list(ref[1]), 'DELETE')
- for ref in ref_new - ref_orig:
- self.ref_update('virtual-machine-interface', obj.uuid, ref_name, ref[0], list(ref[1]), 'ADD', ref[2])
- obj.clear_pending_updates()
-
- return content
- #end virtual_machine_interface_update
-
- def virtual_machine_interfaces_list(self, parent_id = None, parent_fq_name = None, back_ref_id = None, obj_uuids = None, fields = None, detail = False, count = False, filters = None):
- """List all virtual-machine-interfaces.
-
- :param parent_id: UUID of parent as optional search filter
- :param parent_fq_name: full qualified name of parent as optional search filter
- :returns: list of :class:`.VirtualMachineInterface` objects
-
- """
- return self.resource_list('virtual-machine-interface', parent_id = parent_id, parent_fq_name = parent_fq_name, back_ref_id = back_ref_id, obj_uuids=obj_uuids, fields=fields, detail=detail, count=count, filters=filters)
- #end virtual_machine_interfaces_list
-
- def virtual_machine_interface_delete(self, fq_name = None, id = None, ifmap_id = None):
- """Delete virtual-machine-interface from the system.
-
- :param fq_name: Fully qualified name of virtual-machine-interface
- :param id: UUID of virtual-machine-interface
- :param ifmap_id: IFMAP id of virtual-machine-interface
-
- """
- (args_ok, result) = self._read_args_to_id(obj_type = 'virtual-machine-interface', fq_name = fq_name, id = id, ifmap_id = ifmap_id)
- if not args_ok:
- return result
-
- id = result
- uri = vnc_api.gen.resource_client.VirtualMachineInterface.resource_uri_base['virtual-machine-interface'] + '/' + id
-
- content = self._request_server(rest.OP_DELETE, uri)
- #end virtual_machine_interface_delete
-
- def get_default_virtual_machine_interface_id(self):
- """Return UUID of default virtual-machine-interface."""
- return self.fq_name_to_id('virtual-machine-interface', vnc_api.gen.resource_client.VirtualMachineInterface().get_fq_name())
- #end get_default_virtual_machine_interface_delete
-
- def loadbalancer_healthmonitor_create(self, obj):
- """Create new loadbalancer-healthmonitor.
-
- :param obj: :class:`.LoadbalancerHealthmonitor` object
-
- """
- obj._pending_field_updates |= obj._pending_ref_updates
- obj._pending_ref_updates = set([])
- # Ignore fields with None value in json representation
- json_param = json.dumps(obj, default = self._obj_serializer)
- json_body = '{"loadbalancer-healthmonitor":' + json_param + '}'
- content = self._request_server(rest.OP_POST,
- vnc_api.gen.resource_client.LoadbalancerHealthmonitor.create_uri,
- data = json_body)
-
- loadbalancer_healthmonitor_dict = json.loads(content)['loadbalancer-healthmonitor']
- obj.uuid = loadbalancer_healthmonitor_dict['uuid']
- if 'parent_uuid' in loadbalancer_healthmonitor_dict:
- obj.parent_uuid = loadbalancer_healthmonitor_dict['parent_uuid']
-
- obj.set_server_conn(self)
-
- return obj.uuid
- #end loadbalancer_healthmonitor_create
-
- def loadbalancer_healthmonitor_read(self, fq_name = None, fq_name_str = None, id = None, ifmap_id = None, fields = None):
- """Return loadbalancer-healthmonitor information.
-
- :param fq_name: Fully qualified name of loadbalancer-healthmonitor
- :param fq_name_str: Fully qualified name string of loadbalancer-healthmonitor
- :param id: UUID of loadbalancer-healthmonitor
- :param ifmap_id: IFMAP id of loadbalancer-healthmonitor
- :returns: :class:`.LoadbalancerHealthmonitor` object
-
- """
- (args_ok, result) = self._read_args_to_id('loadbalancer-healthmonitor', fq_name, fq_name_str, id, ifmap_id)
- if not args_ok:
- return result
-
- id = result
- uri = vnc_api.gen.resource_client.LoadbalancerHealthmonitor.resource_uri_base['loadbalancer-healthmonitor'] + '/' + id
-
- if fields:
- comma_sep_fields = ','.join(f for f in fields)
- query_params = {'fields': comma_sep_fields}
- else:
- query_params = {'exclude_back_refs':True,
- 'exclude_children':True,}
- content = self._request_server(rest.OP_GET, uri, query_params)
-
- obj_dict = json.loads(content)['loadbalancer-healthmonitor']
- loadbalancer_healthmonitor_obj = vnc_api.gen.resource_client.LoadbalancerHealthmonitor.from_dict(**obj_dict)
- loadbalancer_healthmonitor_obj.clear_pending_updates()
- loadbalancer_healthmonitor_obj.set_server_conn(self)
-
- return loadbalancer_healthmonitor_obj
- #end loadbalancer_healthmonitor_read
-
- def loadbalancer_healthmonitor_update(self, obj):
- """Update loadbalancer-healthmonitor.
-
- :param obj: :class:`.LoadbalancerHealthmonitor` object
-
- """
- # Read in uuid from api-server if not specified in obj
- if not obj.uuid:
- obj.uuid = self.fq_name_to_id('loadbalancer-healthmonitor', obj.get_fq_name())
-
- # Ignore fields with None value in json representation
- json_param = json.dumps(obj, default = self._obj_serializer)
- json_body = '{"loadbalancer-healthmonitor":' + json_param + '}'
-
- id = obj.uuid
- uri = vnc_api.gen.resource_client.LoadbalancerHealthmonitor.resource_uri_base['loadbalancer-healthmonitor'] + '/' + id
- content = self._request_server(rest.OP_PUT, uri, data = json_body)
- for ref_name in obj._pending_ref_updates:
- ref_orig = set([(x.get('uuid'), tuple(x.get('to', [])), x.get('attr')) for x in getattr(obj, '_original_' + ref_name, [])])
- ref_new = set([(x.get('uuid'), tuple(x.get('to', [])), x.get('attr')) for x in getattr(obj, ref_name, [])])
- for ref in ref_orig - ref_new:
- self.ref_update('loadbalancer-healthmonitor', obj.uuid, ref_name, ref[0], list(ref[1]), 'DELETE')
- for ref in ref_new - ref_orig:
- self.ref_update('loadbalancer-healthmonitor', obj.uuid, ref_name, ref[0], list(ref[1]), 'ADD', ref[2])
- obj.clear_pending_updates()
-
- return content
- #end loadbalancer_healthmonitor_update
-
- def loadbalancer_healthmonitors_list(self, parent_id = None, parent_fq_name = None, obj_uuids = None, fields = None, detail = False, count = False, filters = None):
- """List all loadbalancer-healthmonitors.
-
- :param parent_id: UUID of parent as optional search filter
- :param parent_fq_name: full qualified name of parent as optional search filter
- :returns: list of :class:`.LoadbalancerHealthmonitor` objects
-
- """
- return self.resource_list('loadbalancer-healthmonitor', parent_id = parent_id, parent_fq_name = parent_fq_name, obj_uuids=obj_uuids, fields=fields, detail=detail, count=count, filters=filters)
- #end loadbalancer_healthmonitors_list
-
- def loadbalancer_healthmonitor_delete(self, fq_name = None, id = None, ifmap_id = None):
- """Delete loadbalancer-healthmonitor from the system.
-
- :param fq_name: Fully qualified name of loadbalancer-healthmonitor
- :param id: UUID of loadbalancer-healthmonitor
- :param ifmap_id: IFMAP id of loadbalancer-healthmonitor
-
- """
- (args_ok, result) = self._read_args_to_id(obj_type = 'loadbalancer-healthmonitor', fq_name = fq_name, id = id, ifmap_id = ifmap_id)
- if not args_ok:
- return result
-
- id = result
- uri = vnc_api.gen.resource_client.LoadbalancerHealthmonitor.resource_uri_base['loadbalancer-healthmonitor'] + '/' + id
-
- content = self._request_server(rest.OP_DELETE, uri)
- #end loadbalancer_healthmonitor_delete
-
- def get_default_loadbalancer_healthmonitor_id(self):
- """Return UUID of default loadbalancer-healthmonitor."""
- return self.fq_name_to_id('loadbalancer-healthmonitor', vnc_api.gen.resource_client.LoadbalancerHealthmonitor().get_fq_name())
- #end get_default_loadbalancer_healthmonitor_delete
-
- def virtual_network_create(self, obj):
- """Create new virtual-network.
-
- :param obj: :class:`.VirtualNetwork` object
-
- """
- obj._pending_field_updates |= obj._pending_ref_updates
- obj._pending_ref_updates = set([])
- # Ignore fields with None value in json representation
- json_param = json.dumps(obj, default = self._obj_serializer)
- json_body = '{"virtual-network":' + json_param + '}'
- content = self._request_server(rest.OP_POST,
- vnc_api.gen.resource_client.VirtualNetwork.create_uri,
- data = json_body)
-
- virtual_network_dict = json.loads(content)['virtual-network']
- obj.uuid = virtual_network_dict['uuid']
- if 'parent_uuid' in virtual_network_dict:
- obj.parent_uuid = virtual_network_dict['parent_uuid']
-
- obj.set_server_conn(self)
-
- return obj.uuid
- #end virtual_network_create
-
- def virtual_network_read(self, fq_name = None, fq_name_str = None, id = None, ifmap_id = None, fields = None):
- """Return virtual-network information.
-
- :param fq_name: Fully qualified name of virtual-network
- :param fq_name_str: Fully qualified name string of virtual-network
- :param id: UUID of virtual-network
- :param ifmap_id: IFMAP id of virtual-network
- :returns: :class:`.VirtualNetwork` object
-
- """
- (args_ok, result) = self._read_args_to_id('virtual-network', fq_name, fq_name_str, id, ifmap_id)
- if not args_ok:
- return result
-
- id = result
- uri = vnc_api.gen.resource_client.VirtualNetwork.resource_uri_base['virtual-network'] + '/' + id
-
- if fields:
- comma_sep_fields = ','.join(f for f in fields)
- query_params = {'fields': comma_sep_fields}
- else:
- query_params = {'exclude_back_refs':True,
- 'exclude_children':True,}
- content = self._request_server(rest.OP_GET, uri, query_params)
-
- obj_dict = json.loads(content)['virtual-network']
- virtual_network_obj = vnc_api.gen.resource_client.VirtualNetwork.from_dict(**obj_dict)
- virtual_network_obj.clear_pending_updates()
- virtual_network_obj.set_server_conn(self)
-
- return virtual_network_obj
- #end virtual_network_read
-
- def virtual_network_update(self, obj):
- """Update virtual-network.
-
- :param obj: :class:`.VirtualNetwork` object
-
- """
- # Read in uuid from api-server if not specified in obj
- if not obj.uuid:
- obj.uuid = self.fq_name_to_id('virtual-network', obj.get_fq_name())
-
- # Ignore fields with None value in json representation
- json_param = json.dumps(obj, default = self._obj_serializer)
- json_body = '{"virtual-network":' + json_param + '}'
-
- id = obj.uuid
- uri = vnc_api.gen.resource_client.VirtualNetwork.resource_uri_base['virtual-network'] + '/' + id
- content = self._request_server(rest.OP_PUT, uri, data = json_body)
- for ref_name in obj._pending_ref_updates:
- ref_orig = set([(x.get('uuid'), tuple(x.get('to', [])), x.get('attr')) for x in getattr(obj, '_original_' + ref_name, [])])
- ref_new = set([(x.get('uuid'), tuple(x.get('to', [])), x.get('attr')) for x in getattr(obj, ref_name, [])])
- for ref in ref_orig - ref_new:
- self.ref_update('virtual-network', obj.uuid, ref_name, ref[0], list(ref[1]), 'DELETE')
- for ref in ref_new - ref_orig:
- self.ref_update('virtual-network', obj.uuid, ref_name, ref[0], list(ref[1]), 'ADD', ref[2])
- obj.clear_pending_updates()
-
- return content
- #end virtual_network_update
-
- def virtual_networks_list(self, parent_id = None, parent_fq_name = None, back_ref_id = None, obj_uuids = None, fields = None, detail = False, count = False, filters = None):
- """List all virtual-networks.
-
- :param parent_id: UUID of parent as optional search filter
- :param parent_fq_name: full qualified name of parent as optional search filter
- :returns: list of :class:`.VirtualNetwork` objects
-
- """
- return self.resource_list('virtual-network', parent_id = parent_id, parent_fq_name = parent_fq_name, back_ref_id = back_ref_id, obj_uuids=obj_uuids, fields=fields, detail=detail, count=count, filters=filters)
- #end virtual_networks_list
-
- def virtual_network_delete(self, fq_name = None, id = None, ifmap_id = None):
- """Delete virtual-network from the system.
-
- :param fq_name: Fully qualified name of virtual-network
- :param id: UUID of virtual-network
- :param ifmap_id: IFMAP id of virtual-network
-
- """
- (args_ok, result) = self._read_args_to_id(obj_type = 'virtual-network', fq_name = fq_name, id = id, ifmap_id = ifmap_id)
- if not args_ok:
- return result
-
- id = result
- uri = vnc_api.gen.resource_client.VirtualNetwork.resource_uri_base['virtual-network'] + '/' + id
-
- content = self._request_server(rest.OP_DELETE, uri)
- #end virtual_network_delete
-
- def get_default_virtual_network_id(self):
- """Return UUID of default virtual-network."""
- return self.fq_name_to_id('virtual-network', vnc_api.gen.resource_client.VirtualNetwork().get_fq_name())
- #end get_default_virtual_network_delete
-
- def project_create(self, obj):
- """Create new project.
-
- :param obj: :class:`.Project` object
-
- """
- obj._pending_field_updates |= obj._pending_ref_updates
- obj._pending_ref_updates = set([])
- # Ignore fields with None value in json representation
- json_param = json.dumps(obj, default = self._obj_serializer)
- json_body = '{"project":' + json_param + '}'
- content = self._request_server(rest.OP_POST,
- vnc_api.gen.resource_client.Project.create_uri,
- data = json_body)
-
- project_dict = json.loads(content)['project']
- obj.uuid = project_dict['uuid']
- if 'parent_uuid' in project_dict:
- obj.parent_uuid = project_dict['parent_uuid']
-
- obj.set_server_conn(self)
-
- return obj.uuid
- #end project_create
-
- def project_read(self, fq_name = None, fq_name_str = None, id = None, ifmap_id = None, fields = None):
- """Return project information.
-
- :param fq_name: Fully qualified name of project
- :param fq_name_str: Fully qualified name string of project
- :param id: UUID of project
- :param ifmap_id: IFMAP id of project
- :returns: :class:`.Project` object
-
- """
- (args_ok, result) = self._read_args_to_id('project', fq_name, fq_name_str, id, ifmap_id)
- if not args_ok:
- return result
-
- id = result
- uri = vnc_api.gen.resource_client.Project.resource_uri_base['project'] + '/' + id
-
- if fields:
- comma_sep_fields = ','.join(f for f in fields)
- query_params = {'fields': comma_sep_fields}
- else:
- query_params = {'exclude_back_refs':True,
- 'exclude_children':True,}
- content = self._request_server(rest.OP_GET, uri, query_params)
-
- obj_dict = json.loads(content)['project']
- project_obj = vnc_api.gen.resource_client.Project.from_dict(**obj_dict)
- project_obj.clear_pending_updates()
- project_obj.set_server_conn(self)
-
- return project_obj
- #end project_read
-
- def project_update(self, obj):
- """Update project.
-
- :param obj: :class:`.Project` object
-
- """
- # Read in uuid from api-server if not specified in obj
- if not obj.uuid:
- obj.uuid = self.fq_name_to_id('project', obj.get_fq_name())
-
- # Ignore fields with None value in json representation
- json_param = json.dumps(obj, default = self._obj_serializer)
- json_body = '{"project":' + json_param + '}'
-
- id = obj.uuid
- uri = vnc_api.gen.resource_client.Project.resource_uri_base['project'] + '/' + id
- content = self._request_server(rest.OP_PUT, uri, data = json_body)
- for ref_name in obj._pending_ref_updates:
- ref_orig = set([(x.get('uuid'), tuple(x.get('to', [])), x.get('attr')) for x in getattr(obj, '_original_' + ref_name, [])])
- ref_new = set([(x.get('uuid'), tuple(x.get('to', [])), x.get('attr')) for x in getattr(obj, ref_name, [])])
- for ref in ref_orig - ref_new:
- self.ref_update('project', obj.uuid, ref_name, ref[0], list(ref[1]), 'DELETE')
- for ref in ref_new - ref_orig:
- self.ref_update('project', obj.uuid, ref_name, ref[0], list(ref[1]), 'ADD', ref[2])
- obj.clear_pending_updates()
-
- return content
- #end project_update
-
- def projects_list(self, parent_id = None, parent_fq_name = None, back_ref_id = None, obj_uuids = None, fields = None, detail = False, count = False, filters = None):
- """List all projects.
-
- :param parent_id: UUID of parent as optional search filter
- :param parent_fq_name: full qualified name of parent as optional search filter
- :returns: list of :class:`.Project` objects
-
- """
- return self.resource_list('project', parent_id = parent_id, parent_fq_name = parent_fq_name, back_ref_id = back_ref_id, obj_uuids=obj_uuids, fields=fields, detail=detail, count=count, filters=filters)
- #end projects_list
-
- def project_delete(self, fq_name = None, id = None, ifmap_id = None):
- """Delete project from the system.
-
- :param fq_name: Fully qualified name of project
- :param id: UUID of project
- :param ifmap_id: IFMAP id of project
-
- """
- (args_ok, result) = self._read_args_to_id(obj_type = 'project', fq_name = fq_name, id = id, ifmap_id = ifmap_id)
- if not args_ok:
- return result
-
- id = result
- uri = vnc_api.gen.resource_client.Project.resource_uri_base['project'] + '/' + id
-
- content = self._request_server(rest.OP_DELETE, uri)
- #end project_delete
-
- def get_default_project_id(self):
- """Return UUID of default project."""
- return self.fq_name_to_id('project', vnc_api.gen.resource_client.Project().get_fq_name())
- #end get_default_project_delete
-
- def qos_forwarding_class_create(self, obj):
- """Create new qos-forwarding-class.
-
- :param obj: :class:`.QosForwardingClass` object
-
- """
- obj._pending_field_updates |= obj._pending_ref_updates
- obj._pending_ref_updates = set([])
- # Ignore fields with None value in json representation
- json_param = json.dumps(obj, default = self._obj_serializer)
- json_body = '{"qos-forwarding-class":' + json_param + '}'
- content = self._request_server(rest.OP_POST,
- vnc_api.gen.resource_client.QosForwardingClass.create_uri,
- data = json_body)
-
- qos_forwarding_class_dict = json.loads(content)['qos-forwarding-class']
- obj.uuid = qos_forwarding_class_dict['uuid']
- if 'parent_uuid' in qos_forwarding_class_dict:
- obj.parent_uuid = qos_forwarding_class_dict['parent_uuid']
-
- obj.set_server_conn(self)
-
- return obj.uuid
- #end qos_forwarding_class_create
-
- def qos_forwarding_class_read(self, fq_name = None, fq_name_str = None, id = None, ifmap_id = None, fields = None):
- """Return qos-forwarding-class information.
-
- :param fq_name: Fully qualified name of qos-forwarding-class
- :param fq_name_str: Fully qualified name string of qos-forwarding-class
- :param id: UUID of qos-forwarding-class
- :param ifmap_id: IFMAP id of qos-forwarding-class
- :returns: :class:`.QosForwardingClass` object
-
- """
- (args_ok, result) = self._read_args_to_id('qos-forwarding-class', fq_name, fq_name_str, id, ifmap_id)
- if not args_ok:
- return result
-
- id = result
- uri = vnc_api.gen.resource_client.QosForwardingClass.resource_uri_base['qos-forwarding-class'] + '/' + id
-
- if fields:
- comma_sep_fields = ','.join(f for f in fields)
- query_params = {'fields': comma_sep_fields}
- else:
- query_params = {'exclude_back_refs':True,
- 'exclude_children':True,}
- content = self._request_server(rest.OP_GET, uri, query_params)
-
- obj_dict = json.loads(content)['qos-forwarding-class']
- qos_forwarding_class_obj = vnc_api.gen.resource_client.QosForwardingClass.from_dict(**obj_dict)
- qos_forwarding_class_obj.clear_pending_updates()
- qos_forwarding_class_obj.set_server_conn(self)
-
- return qos_forwarding_class_obj
- #end qos_forwarding_class_read
-
- def qos_forwarding_class_update(self, obj):
- """Update qos-forwarding-class.
-
- :param obj: :class:`.QosForwardingClass` object
-
- """
- # Read in uuid from api-server if not specified in obj
- if not obj.uuid:
- obj.uuid = self.fq_name_to_id('qos-forwarding-class', obj.get_fq_name())
-
- # Ignore fields with None value in json representation
- json_param = json.dumps(obj, default = self._obj_serializer)
- json_body = '{"qos-forwarding-class":' + json_param + '}'
-
- id = obj.uuid
- uri = vnc_api.gen.resource_client.QosForwardingClass.resource_uri_base['qos-forwarding-class'] + '/' + id
- content = self._request_server(rest.OP_PUT, uri, data = json_body)
- for ref_name in obj._pending_ref_updates:
- ref_orig = set([(x.get('uuid'), tuple(x.get('to', [])), x.get('attr')) for x in getattr(obj, '_original_' + ref_name, [])])
- ref_new = set([(x.get('uuid'), tuple(x.get('to', [])), x.get('attr')) for x in getattr(obj, ref_name, [])])
- for ref in ref_orig - ref_new:
- self.ref_update('qos-forwarding-class', obj.uuid, ref_name, ref[0], list(ref[1]), 'DELETE')
- for ref in ref_new - ref_orig:
- self.ref_update('qos-forwarding-class', obj.uuid, ref_name, ref[0], list(ref[1]), 'ADD', ref[2])
- obj.clear_pending_updates()
-
- return content
- #end qos_forwarding_class_update
-
- def qos_forwarding_classs_list(self, parent_id = None, parent_fq_name = None, back_ref_id = None, obj_uuids = None, fields = None, detail = False, count = False, filters = None):
- """List all qos-forwarding-classs.
-
- :param parent_id: UUID of parent as optional search filter
- :param parent_fq_name: full qualified name of parent as optional search filter
- :returns: list of :class:`.QosForwardingClass` objects
-
- """
- return self.resource_list('qos-forwarding-class', parent_id = parent_id, parent_fq_name = parent_fq_name, back_ref_id = back_ref_id, obj_uuids=obj_uuids, fields=fields, detail=detail, count=count, filters=filters)
- #end qos_forwarding_classs_list
-
- def qos_forwarding_class_delete(self, fq_name = None, id = None, ifmap_id = None):
- """Delete qos-forwarding-class from the system.
-
- :param fq_name: Fully qualified name of qos-forwarding-class
- :param id: UUID of qos-forwarding-class
- :param ifmap_id: IFMAP id of qos-forwarding-class
-
- """
- (args_ok, result) = self._read_args_to_id(obj_type = 'qos-forwarding-class', fq_name = fq_name, id = id, ifmap_id = ifmap_id)
- if not args_ok:
- return result
-
- id = result
- uri = vnc_api.gen.resource_client.QosForwardingClass.resource_uri_base['qos-forwarding-class'] + '/' + id
-
- content = self._request_server(rest.OP_DELETE, uri)
- #end qos_forwarding_class_delete
-
- def get_default_qos_forwarding_class_id(self):
- """Return UUID of default qos-forwarding-class."""
- return self.fq_name_to_id('qos-forwarding-class', vnc_api.gen.resource_client.QosForwardingClass().get_fq_name())
- #end get_default_qos_forwarding_class_delete
-
- def database_node_create(self, obj):
- """Create new database-node.
-
- :param obj: :class:`.DatabaseNode` object
-
- """
- obj._pending_field_updates |= obj._pending_ref_updates
- obj._pending_ref_updates = set([])
- # Ignore fields with None value in json representation
- json_param = json.dumps(obj, default = self._obj_serializer)
- json_body = '{"database-node":' + json_param + '}'
- content = self._request_server(rest.OP_POST,
- vnc_api.gen.resource_client.DatabaseNode.create_uri,
- data = json_body)
-
- database_node_dict = json.loads(content)['database-node']
- obj.uuid = database_node_dict['uuid']
- if 'parent_uuid' in database_node_dict:
- obj.parent_uuid = database_node_dict['parent_uuid']
-
- obj.set_server_conn(self)
-
- return obj.uuid
- #end database_node_create
-
- def database_node_read(self, fq_name = None, fq_name_str = None, id = None, ifmap_id = None, fields = None):
- """Return database-node information.
-
- :param fq_name: Fully qualified name of database-node
- :param fq_name_str: Fully qualified name string of database-node
- :param id: UUID of database-node
- :param ifmap_id: IFMAP id of database-node
- :returns: :class:`.DatabaseNode` object
-
- """
- (args_ok, result) = self._read_args_to_id('database-node', fq_name, fq_name_str, id, ifmap_id)
- if not args_ok:
- return result
-
- id = result
- uri = vnc_api.gen.resource_client.DatabaseNode.resource_uri_base['database-node'] + '/' + id
-
- if fields:
- comma_sep_fields = ','.join(f for f in fields)
- query_params = {'fields': comma_sep_fields}
- else:
- query_params = {'exclude_back_refs':True,
- 'exclude_children':True,}
- content = self._request_server(rest.OP_GET, uri, query_params)
-
- obj_dict = json.loads(content)['database-node']
- database_node_obj = vnc_api.gen.resource_client.DatabaseNode.from_dict(**obj_dict)
- database_node_obj.clear_pending_updates()
- database_node_obj.set_server_conn(self)
-
- return database_node_obj
- #end database_node_read
-
- def database_node_update(self, obj):
- """Update database-node.
-
- :param obj: :class:`.DatabaseNode` object
-
- """
- # Read in uuid from api-server if not specified in obj
- if not obj.uuid:
- obj.uuid = self.fq_name_to_id('database-node', obj.get_fq_name())
-
- # Ignore fields with None value in json representation
- json_param = json.dumps(obj, default = self._obj_serializer)
- json_body = '{"database-node":' + json_param + '}'
-
- id = obj.uuid
- uri = vnc_api.gen.resource_client.DatabaseNode.resource_uri_base['database-node'] + '/' + id
- content = self._request_server(rest.OP_PUT, uri, data = json_body)
- for ref_name in obj._pending_ref_updates:
- ref_orig = set([(x.get('uuid'), tuple(x.get('to', [])), x.get('attr')) for x in getattr(obj, '_original_' + ref_name, [])])
- ref_new = set([(x.get('uuid'), tuple(x.get('to', [])), x.get('attr')) for x in getattr(obj, ref_name, [])])
- for ref in ref_orig - ref_new:
- self.ref_update('database-node', obj.uuid, ref_name, ref[0], list(ref[1]), 'DELETE')
- for ref in ref_new - ref_orig:
- self.ref_update('database-node', obj.uuid, ref_name, ref[0], list(ref[1]), 'ADD', ref[2])
- obj.clear_pending_updates()
-
- return content
- #end database_node_update
-
- def database_nodes_list(self, parent_id = None, parent_fq_name = None, obj_uuids = None, fields = None, detail = False, count = False, filters = None):
- """List all database-nodes.
-
- :param parent_id: UUID of parent as optional search filter
- :param parent_fq_name: full qualified name of parent as optional search filter
- :returns: list of :class:`.DatabaseNode` objects
-
- """
- return self.resource_list('database-node', parent_id = parent_id, parent_fq_name = parent_fq_name, obj_uuids=obj_uuids, fields=fields, detail=detail, count=count, filters=filters)
- #end database_nodes_list
-
- def database_node_delete(self, fq_name = None, id = None, ifmap_id = None):
- """Delete database-node from the system.
-
- :param fq_name: Fully qualified name of database-node
- :param id: UUID of database-node
- :param ifmap_id: IFMAP id of database-node
-
- """
- (args_ok, result) = self._read_args_to_id(obj_type = 'database-node', fq_name = fq_name, id = id, ifmap_id = ifmap_id)
- if not args_ok:
- return result
-
- id = result
- uri = vnc_api.gen.resource_client.DatabaseNode.resource_uri_base['database-node'] + '/' + id
-
- content = self._request_server(rest.OP_DELETE, uri)
- #end database_node_delete
-
- def get_default_database_node_id(self):
- """Return UUID of default database-node."""
- return self.fq_name_to_id('database-node', vnc_api.gen.resource_client.DatabaseNode().get_fq_name())
- #end get_default_database_node_delete
-
- def routing_instance_create(self, obj):
- """Create new routing-instance.
-
- :param obj: :class:`.RoutingInstance` object
-
- """
- obj._pending_field_updates |= obj._pending_ref_updates
- obj._pending_ref_updates = set([])
- # Ignore fields with None value in json representation
- json_param = json.dumps(obj, default = self._obj_serializer)
- json_body = '{"routing-instance":' + json_param + '}'
- content = self._request_server(rest.OP_POST,
- vnc_api.gen.resource_client.RoutingInstance.create_uri,
- data = json_body)
-
- routing_instance_dict = json.loads(content)['routing-instance']
- obj.uuid = routing_instance_dict['uuid']
- if 'parent_uuid' in routing_instance_dict:
- obj.parent_uuid = routing_instance_dict['parent_uuid']
-
- obj.set_server_conn(self)
-
- return obj.uuid
- #end routing_instance_create
-
- def routing_instance_read(self, fq_name = None, fq_name_str = None, id = None, ifmap_id = None, fields = None):
- """Return routing-instance information.
-
- :param fq_name: Fully qualified name of routing-instance
- :param fq_name_str: Fully qualified name string of routing-instance
- :param id: UUID of routing-instance
- :param ifmap_id: IFMAP id of routing-instance
- :returns: :class:`.RoutingInstance` object
-
- """
- (args_ok, result) = self._read_args_to_id('routing-instance', fq_name, fq_name_str, id, ifmap_id)
- if not args_ok:
- return result
-
- id = result
- uri = vnc_api.gen.resource_client.RoutingInstance.resource_uri_base['routing-instance'] + '/' + id
-
- if fields:
- comma_sep_fields = ','.join(f for f in fields)
- query_params = {'fields': comma_sep_fields}
- else:
- query_params = {'exclude_back_refs':True,
- 'exclude_children':True,}
- content = self._request_server(rest.OP_GET, uri, query_params)
-
- obj_dict = json.loads(content)['routing-instance']
- routing_instance_obj = vnc_api.gen.resource_client.RoutingInstance.from_dict(**obj_dict)
- routing_instance_obj.clear_pending_updates()
- routing_instance_obj.set_server_conn(self)
-
- return routing_instance_obj
- #end routing_instance_read
-
- def routing_instance_update(self, obj):
- """Update routing-instance.
-
- :param obj: :class:`.RoutingInstance` object
-
- """
- # Read in uuid from api-server if not specified in obj
- if not obj.uuid:
- obj.uuid = self.fq_name_to_id('routing-instance', obj.get_fq_name())
-
- # Ignore fields with None value in json representation
- json_param = json.dumps(obj, default = self._obj_serializer)
- json_body = '{"routing-instance":' + json_param + '}'
-
- id = obj.uuid
- uri = vnc_api.gen.resource_client.RoutingInstance.resource_uri_base['routing-instance'] + '/' + id
- content = self._request_server(rest.OP_PUT, uri, data = json_body)
- for ref_name in obj._pending_ref_updates:
- ref_orig = set([(x.get('uuid'), tuple(x.get('to', [])), x.get('attr')) for x in getattr(obj, '_original_' + ref_name, [])])
- ref_new = set([(x.get('uuid'), tuple(x.get('to', [])), x.get('attr')) for x in getattr(obj, ref_name, [])])
- for ref in ref_orig - ref_new:
- self.ref_update('routing-instance', obj.uuid, ref_name, ref[0], list(ref[1]), 'DELETE')
- for ref in ref_new - ref_orig:
- self.ref_update('routing-instance', obj.uuid, ref_name, ref[0], list(ref[1]), 'ADD', ref[2])
- obj.clear_pending_updates()
-
- return content
- #end routing_instance_update
-
- def routing_instances_list(self, parent_id = None, parent_fq_name = None, back_ref_id = None, obj_uuids = None, fields = None, detail = False, count = False, filters = None):
- """List all routing-instances.
-
- :param parent_id: UUID of parent as optional search filter
- :param parent_fq_name: full qualified name of parent as optional search filter
- :returns: list of :class:`.RoutingInstance` objects
-
- """
- return self.resource_list('routing-instance', parent_id = parent_id, parent_fq_name = parent_fq_name, back_ref_id = back_ref_id, obj_uuids=obj_uuids, fields=fields, detail=detail, count=count, filters=filters)
- #end routing_instances_list
-
- def routing_instance_delete(self, fq_name = None, id = None, ifmap_id = None):
- """Delete routing-instance from the system.
-
- :param fq_name: Fully qualified name of routing-instance
- :param id: UUID of routing-instance
- :param ifmap_id: IFMAP id of routing-instance
-
- """
- (args_ok, result) = self._read_args_to_id(obj_type = 'routing-instance', fq_name = fq_name, id = id, ifmap_id = ifmap_id)
- if not args_ok:
- return result
-
- id = result
- uri = vnc_api.gen.resource_client.RoutingInstance.resource_uri_base['routing-instance'] + '/' + id
-
- content = self._request_server(rest.OP_DELETE, uri)
- #end routing_instance_delete
-
- def get_default_routing_instance_id(self):
- """Return UUID of default routing-instance."""
- return self.fq_name_to_id('routing-instance', vnc_api.gen.resource_client.RoutingInstance().get_fq_name())
- #end get_default_routing_instance_delete
-
- def network_ipam_create(self, obj):
- """Create new network-ipam.
-
- :param obj: :class:`.NetworkIpam` object
-
- """
- obj._pending_field_updates |= obj._pending_ref_updates
- obj._pending_ref_updates = set([])
- # Ignore fields with None value in json representation
- json_param = json.dumps(obj, default = self._obj_serializer)
- json_body = '{"network-ipam":' + json_param + '}'
- content = self._request_server(rest.OP_POST,
- vnc_api.gen.resource_client.NetworkIpam.create_uri,
- data = json_body)
-
- network_ipam_dict = json.loads(content)['network-ipam']
- obj.uuid = network_ipam_dict['uuid']
- if 'parent_uuid' in network_ipam_dict:
- obj.parent_uuid = network_ipam_dict['parent_uuid']
-
- obj.set_server_conn(self)
-
- return obj.uuid
- #end network_ipam_create
-
- def network_ipam_read(self, fq_name = None, fq_name_str = None, id = None, ifmap_id = None, fields = None):
- """Return network-ipam information.
-
- :param fq_name: Fully qualified name of network-ipam
- :param fq_name_str: Fully qualified name string of network-ipam
- :param id: UUID of network-ipam
- :param ifmap_id: IFMAP id of network-ipam
- :returns: :class:`.NetworkIpam` object
-
- """
- (args_ok, result) = self._read_args_to_id('network-ipam', fq_name, fq_name_str, id, ifmap_id)
- if not args_ok:
- return result
-
- id = result
- uri = vnc_api.gen.resource_client.NetworkIpam.resource_uri_base['network-ipam'] + '/' + id
-
- if fields:
- comma_sep_fields = ','.join(f for f in fields)
- query_params = {'fields': comma_sep_fields}
- else:
- query_params = {'exclude_back_refs':True,
- 'exclude_children':True,}
- content = self._request_server(rest.OP_GET, uri, query_params)
-
- obj_dict = json.loads(content)['network-ipam']
- network_ipam_obj = vnc_api.gen.resource_client.NetworkIpam.from_dict(**obj_dict)
- network_ipam_obj.clear_pending_updates()
- network_ipam_obj.set_server_conn(self)
-
- return network_ipam_obj
- #end network_ipam_read
-
- def network_ipam_update(self, obj):
- """Update network-ipam.
-
- :param obj: :class:`.NetworkIpam` object
-
- """
- # Read in uuid from api-server if not specified in obj
- if not obj.uuid:
- obj.uuid = self.fq_name_to_id('network-ipam', obj.get_fq_name())
-
- # Ignore fields with None value in json representation
- json_param = json.dumps(obj, default = self._obj_serializer)
- json_body = '{"network-ipam":' + json_param + '}'
-
- id = obj.uuid
- uri = vnc_api.gen.resource_client.NetworkIpam.resource_uri_base['network-ipam'] + '/' + id
- content = self._request_server(rest.OP_PUT, uri, data = json_body)
- for ref_name in obj._pending_ref_updates:
- ref_orig = set([(x.get('uuid'), tuple(x.get('to', [])), x.get('attr')) for x in getattr(obj, '_original_' + ref_name, [])])
- ref_new = set([(x.get('uuid'), tuple(x.get('to', [])), x.get('attr')) for x in getattr(obj, ref_name, [])])
- for ref in ref_orig - ref_new:
- self.ref_update('network-ipam', obj.uuid, ref_name, ref[0], list(ref[1]), 'DELETE')
- for ref in ref_new - ref_orig:
- self.ref_update('network-ipam', obj.uuid, ref_name, ref[0], list(ref[1]), 'ADD', ref[2])
- obj.clear_pending_updates()
-
- return content
- #end network_ipam_update
-
- def network_ipams_list(self, parent_id = None, parent_fq_name = None, back_ref_id = None, obj_uuids = None, fields = None, detail = False, count = False, filters = None):
- """List all network-ipams.
-
- :param parent_id: UUID of parent as optional search filter
- :param parent_fq_name: full qualified name of parent as optional search filter
- :returns: list of :class:`.NetworkIpam` objects
-
- """
- return self.resource_list('network-ipam', parent_id = parent_id, parent_fq_name = parent_fq_name, back_ref_id = back_ref_id, obj_uuids=obj_uuids, fields=fields, detail=detail, count=count, filters=filters)
- #end network_ipams_list
-
- def network_ipam_delete(self, fq_name = None, id = None, ifmap_id = None):
- """Delete network-ipam from the system.
-
- :param fq_name: Fully qualified name of network-ipam
- :param id: UUID of network-ipam
- :param ifmap_id: IFMAP id of network-ipam
-
- """
- (args_ok, result) = self._read_args_to_id(obj_type = 'network-ipam', fq_name = fq_name, id = id, ifmap_id = ifmap_id)
- if not args_ok:
- return result
-
- id = result
- uri = vnc_api.gen.resource_client.NetworkIpam.resource_uri_base['network-ipam'] + '/' + id
-
- content = self._request_server(rest.OP_DELETE, uri)
- #end network_ipam_delete
-
- def get_default_network_ipam_id(self):
- """Return UUID of default network-ipam."""
- return self.fq_name_to_id('network-ipam', vnc_api.gen.resource_client.NetworkIpam().get_fq_name())
- #end get_default_network_ipam_delete
-
- def logical_router_create(self, obj):
- """Create new logical-router.
-
- :param obj: :class:`.LogicalRouter` object
-
- """
- obj._pending_field_updates |= obj._pending_ref_updates
- obj._pending_ref_updates = set([])
- # Ignore fields with None value in json representation
- json_param = json.dumps(obj, default = self._obj_serializer)
- json_body = '{"logical-router":' + json_param + '}'
- content = self._request_server(rest.OP_POST,
- vnc_api.gen.resource_client.LogicalRouter.create_uri,
- data = json_body)
-
- logical_router_dict = json.loads(content)['logical-router']
- obj.uuid = logical_router_dict['uuid']
- if 'parent_uuid' in logical_router_dict:
- obj.parent_uuid = logical_router_dict['parent_uuid']
-
- obj.set_server_conn(self)
-
- return obj.uuid
- #end logical_router_create
-
- def logical_router_read(self, fq_name = None, fq_name_str = None, id = None, ifmap_id = None, fields = None):
- """Return logical-router information.
-
- :param fq_name: Fully qualified name of logical-router
- :param fq_name_str: Fully qualified name string of logical-router
- :param id: UUID of logical-router
- :param ifmap_id: IFMAP id of logical-router
- :returns: :class:`.LogicalRouter` object
-
- """
- (args_ok, result) = self._read_args_to_id('logical-router', fq_name, fq_name_str, id, ifmap_id)
- if not args_ok:
- return result
-
- id = result
- uri = vnc_api.gen.resource_client.LogicalRouter.resource_uri_base['logical-router'] + '/' + id
-
- if fields:
- comma_sep_fields = ','.join(f for f in fields)
- query_params = {'fields': comma_sep_fields}
- else:
- query_params = {'exclude_back_refs':True,
- 'exclude_children':True,}
- content = self._request_server(rest.OP_GET, uri, query_params)
-
- obj_dict = json.loads(content)['logical-router']
- logical_router_obj = vnc_api.gen.resource_client.LogicalRouter.from_dict(**obj_dict)
- logical_router_obj.clear_pending_updates()
- logical_router_obj.set_server_conn(self)
-
- return logical_router_obj
- #end logical_router_read
-
- def logical_router_update(self, obj):
- """Update logical-router.
-
- :param obj: :class:`.LogicalRouter` object
-
- """
- # Read in uuid from api-server if not specified in obj
- if not obj.uuid:
- obj.uuid = self.fq_name_to_id('logical-router', obj.get_fq_name())
-
- # Ignore fields with None value in json representation
- json_param = json.dumps(obj, default = self._obj_serializer)
- json_body = '{"logical-router":' + json_param + '}'
-
- id = obj.uuid
- uri = vnc_api.gen.resource_client.LogicalRouter.resource_uri_base['logical-router'] + '/' + id
- content = self._request_server(rest.OP_PUT, uri, data = json_body)
- for ref_name in obj._pending_ref_updates:
- ref_orig = set([(x.get('uuid'), tuple(x.get('to', [])), x.get('attr')) for x in getattr(obj, '_original_' + ref_name, [])])
- ref_new = set([(x.get('uuid'), tuple(x.get('to', [])), x.get('attr')) for x in getattr(obj, ref_name, [])])
- for ref in ref_orig - ref_new:
- self.ref_update('logical-router', obj.uuid, ref_name, ref[0], list(ref[1]), 'DELETE')
- for ref in ref_new - ref_orig:
- self.ref_update('logical-router', obj.uuid, ref_name, ref[0], list(ref[1]), 'ADD', ref[2])
- obj.clear_pending_updates()
-
- return content
- #end logical_router_update
-
- def logical_routers_list(self, parent_id = None, parent_fq_name = None, back_ref_id = None, obj_uuids = None, fields = None, detail = False, count = False, filters = None):
- """List all logical-routers.
-
- :param parent_id: UUID of parent as optional search filter
- :param parent_fq_name: full qualified name of parent as optional search filter
- :returns: list of :class:`.LogicalRouter` objects
-
- """
- return self.resource_list('logical-router', parent_id = parent_id, parent_fq_name = parent_fq_name, back_ref_id = back_ref_id, obj_uuids=obj_uuids, fields=fields, detail=detail, count=count, filters=filters)
- #end logical_routers_list
-
- def logical_router_delete(self, fq_name = None, id = None, ifmap_id = None):
- """Delete logical-router from the system.
-
- :param fq_name: Fully qualified name of logical-router
- :param id: UUID of logical-router
- :param ifmap_id: IFMAP id of logical-router
-
- """
- (args_ok, result) = self._read_args_to_id(obj_type = 'logical-router', fq_name = fq_name, id = id, ifmap_id = ifmap_id)
- if not args_ok:
- return result
-
- id = result
- uri = vnc_api.gen.resource_client.LogicalRouter.resource_uri_base['logical-router'] + '/' + id
-
- content = self._request_server(rest.OP_DELETE, uri)
- #end logical_router_delete
-
- def get_default_logical_router_id(self):
- """Return UUID of default logical-router."""
- return self.fq_name_to_id('logical-router', vnc_api.gen.resource_client.LogicalRouter().get_fq_name())
- #end get_default_logical_router_delete
-
-#end class VncApiClientGen
-
- prop_name_to_xsd_type = {
-
- }
-
-ConnectionDriverBase.register (VncApiClientGen)
diff --git a/Testcases/vnc_api/gen/vnc_api_client_gen.pyc b/Testcases/vnc_api/gen/vnc_api_client_gen.pyc
deleted file mode 100644
index 088d86b..0000000
--- a/Testcases/vnc_api/gen/vnc_api_client_gen.pyc
+++ /dev/null
Binary files differ
diff --git a/Testcases/vnc_api/gen/vnc_api_extension_gen.py b/Testcases/vnc_api/gen/vnc_api_extension_gen.py
deleted file mode 100644
index 63de5e7..0000000
--- a/Testcases/vnc_api/gen/vnc_api_extension_gen.py
+++ /dev/null
@@ -1,2469 +0,0 @@
-
-# AUTO-GENERATED file from IFMapApiGenerator. Do Not Edit!
-
-class ResourceApiGen(object):
- def pre_domain_create(self, resource_dict):
- """
- Method called before domain is created
- """
- pass
- #end pre_domain_create
-
- def post_domain_create(self, resource_dict):
- """
- Method called after domain is created
- """
- pass
- #end post_domain_create
-
- def pre_domain_read(self, resource_id):
- """
- Method called before domain is read
- """
- pass
- #end pre_domain_read
-
- def post_domain_read(self, resource_id, resource_dict):
- """
- Method called after domain is read
- """
- pass
- #end post_domain_read
-
- def pre_domain_update(self, resource_id, resource_dict):
- """
- Method called before domain is updated
- """
- pass
- #end pre_domain_update
-
- def post_domain_update(self, resource_id, resource_dict):
- """
- Method called after domain is updated
- """
- pass
- #end post_domain_update
-
- def pre_domain_delete(self, resource_id):
- """
- Method called before domain is deleted
- """
- pass
- #end pre_domain_delete
-
- def post_domain_delete(self, resource_id, resource_dict):
- """
- Method called after domain is deleted
- """
- pass
- #end post_domain_delete
-
- def pre_global_vrouter_config_create(self, resource_dict):
- """
- Method called before global-vrouter-config is created
- """
- pass
- #end pre_global_vrouter_config_create
-
- def post_global_vrouter_config_create(self, resource_dict):
- """
- Method called after global-vrouter-config is created
- """
- pass
- #end post_global_vrouter_config_create
-
- def pre_global_vrouter_config_read(self, resource_id):
- """
- Method called before global-vrouter-config is read
- """
- pass
- #end pre_global_vrouter_config_read
-
- def post_global_vrouter_config_read(self, resource_id, resource_dict):
- """
- Method called after global-vrouter-config is read
- """
- pass
- #end post_global_vrouter_config_read
-
- def pre_global_vrouter_config_update(self, resource_id, resource_dict):
- """
- Method called before global-vrouter-config is updated
- """
- pass
- #end pre_global_vrouter_config_update
-
- def post_global_vrouter_config_update(self, resource_id, resource_dict):
- """
- Method called after global-vrouter-config is updated
- """
- pass
- #end post_global_vrouter_config_update
-
- def pre_global_vrouter_config_delete(self, resource_id):
- """
- Method called before global-vrouter-config is deleted
- """
- pass
- #end pre_global_vrouter_config_delete
-
- def post_global_vrouter_config_delete(self, resource_id, resource_dict):
- """
- Method called after global-vrouter-config is deleted
- """
- pass
- #end post_global_vrouter_config_delete
-
- def pre_instance_ip_create(self, resource_dict):
- """
- Method called before instance-ip is created
- """
- pass
- #end pre_instance_ip_create
-
- def post_instance_ip_create(self, resource_dict):
- """
- Method called after instance-ip is created
- """
- pass
- #end post_instance_ip_create
-
- def pre_instance_ip_read(self, resource_id):
- """
- Method called before instance-ip is read
- """
- pass
- #end pre_instance_ip_read
-
- def post_instance_ip_read(self, resource_id, resource_dict):
- """
- Method called after instance-ip is read
- """
- pass
- #end post_instance_ip_read
-
- def pre_instance_ip_update(self, resource_id, resource_dict):
- """
- Method called before instance-ip is updated
- """
- pass
- #end pre_instance_ip_update
-
- def post_instance_ip_update(self, resource_id, resource_dict):
- """
- Method called after instance-ip is updated
- """
- pass
- #end post_instance_ip_update
-
- def pre_instance_ip_delete(self, resource_id):
- """
- Method called before instance-ip is deleted
- """
- pass
- #end pre_instance_ip_delete
-
- def post_instance_ip_delete(self, resource_id, resource_dict):
- """
- Method called after instance-ip is deleted
- """
- pass
- #end post_instance_ip_delete
-
- def pre_network_policy_create(self, resource_dict):
- """
- Method called before network-policy is created
- """
- pass
- #end pre_network_policy_create
-
- def post_network_policy_create(self, resource_dict):
- """
- Method called after network-policy is created
- """
- pass
- #end post_network_policy_create
-
- def pre_network_policy_read(self, resource_id):
- """
- Method called before network-policy is read
- """
- pass
- #end pre_network_policy_read
-
- def post_network_policy_read(self, resource_id, resource_dict):
- """
- Method called after network-policy is read
- """
- pass
- #end post_network_policy_read
-
- def pre_network_policy_update(self, resource_id, resource_dict):
- """
- Method called before network-policy is updated
- """
- pass
- #end pre_network_policy_update
-
- def post_network_policy_update(self, resource_id, resource_dict):
- """
- Method called after network-policy is updated
- """
- pass
- #end post_network_policy_update
-
- def pre_network_policy_delete(self, resource_id):
- """
- Method called before network-policy is deleted
- """
- pass
- #end pre_network_policy_delete
-
- def post_network_policy_delete(self, resource_id, resource_dict):
- """
- Method called after network-policy is deleted
- """
- pass
- #end post_network_policy_delete
-
- def pre_loadbalancer_pool_create(self, resource_dict):
- """
- Method called before loadbalancer-pool is created
- """
- pass
- #end pre_loadbalancer_pool_create
-
- def post_loadbalancer_pool_create(self, resource_dict):
- """
- Method called after loadbalancer-pool is created
- """
- pass
- #end post_loadbalancer_pool_create
-
- def pre_loadbalancer_pool_read(self, resource_id):
- """
- Method called before loadbalancer-pool is read
- """
- pass
- #end pre_loadbalancer_pool_read
-
- def post_loadbalancer_pool_read(self, resource_id, resource_dict):
- """
- Method called after loadbalancer-pool is read
- """
- pass
- #end post_loadbalancer_pool_read
-
- def pre_loadbalancer_pool_update(self, resource_id, resource_dict):
- """
- Method called before loadbalancer-pool is updated
- """
- pass
- #end pre_loadbalancer_pool_update
-
- def post_loadbalancer_pool_update(self, resource_id, resource_dict):
- """
- Method called after loadbalancer-pool is updated
- """
- pass
- #end post_loadbalancer_pool_update
-
- def pre_loadbalancer_pool_delete(self, resource_id):
- """
- Method called before loadbalancer-pool is deleted
- """
- pass
- #end pre_loadbalancer_pool_delete
-
- def post_loadbalancer_pool_delete(self, resource_id, resource_dict):
- """
- Method called after loadbalancer-pool is deleted
- """
- pass
- #end post_loadbalancer_pool_delete
-
- def pre_virtual_DNS_record_create(self, resource_dict):
- """
- Method called before virtual-DNS-record is created
- """
- pass
- #end pre_virtual_DNS_record_create
-
- def post_virtual_DNS_record_create(self, resource_dict):
- """
- Method called after virtual-DNS-record is created
- """
- pass
- #end post_virtual_DNS_record_create
-
- def pre_virtual_DNS_record_read(self, resource_id):
- """
- Method called before virtual-DNS-record is read
- """
- pass
- #end pre_virtual_DNS_record_read
-
- def post_virtual_DNS_record_read(self, resource_id, resource_dict):
- """
- Method called after virtual-DNS-record is read
- """
- pass
- #end post_virtual_DNS_record_read
-
- def pre_virtual_DNS_record_update(self, resource_id, resource_dict):
- """
- Method called before virtual-DNS-record is updated
- """
- pass
- #end pre_virtual_DNS_record_update
-
- def post_virtual_DNS_record_update(self, resource_id, resource_dict):
- """
- Method called after virtual-DNS-record is updated
- """
- pass
- #end post_virtual_DNS_record_update
-
- def pre_virtual_DNS_record_delete(self, resource_id):
- """
- Method called before virtual-DNS-record is deleted
- """
- pass
- #end pre_virtual_DNS_record_delete
-
- def post_virtual_DNS_record_delete(self, resource_id, resource_dict):
- """
- Method called after virtual-DNS-record is deleted
- """
- pass
- #end post_virtual_DNS_record_delete
-
- def pre_route_target_create(self, resource_dict):
- """
- Method called before route-target is created
- """
- pass
- #end pre_route_target_create
-
- def post_route_target_create(self, resource_dict):
- """
- Method called after route-target is created
- """
- pass
- #end post_route_target_create
-
- def pre_route_target_read(self, resource_id):
- """
- Method called before route-target is read
- """
- pass
- #end pre_route_target_read
-
- def post_route_target_read(self, resource_id, resource_dict):
- """
- Method called after route-target is read
- """
- pass
- #end post_route_target_read
-
- def pre_route_target_update(self, resource_id, resource_dict):
- """
- Method called before route-target is updated
- """
- pass
- #end pre_route_target_update
-
- def post_route_target_update(self, resource_id, resource_dict):
- """
- Method called after route-target is updated
- """
- pass
- #end post_route_target_update
-
- def pre_route_target_delete(self, resource_id):
- """
- Method called before route-target is deleted
- """
- pass
- #end pre_route_target_delete
-
- def post_route_target_delete(self, resource_id, resource_dict):
- """
- Method called after route-target is deleted
- """
- pass
- #end post_route_target_delete
-
- def pre_floating_ip_create(self, resource_dict):
- """
- Method called before floating-ip is created
- """
- pass
- #end pre_floating_ip_create
-
- def post_floating_ip_create(self, resource_dict):
- """
- Method called after floating-ip is created
- """
- pass
- #end post_floating_ip_create
-
- def pre_floating_ip_read(self, resource_id):
- """
- Method called before floating-ip is read
- """
- pass
- #end pre_floating_ip_read
-
- def post_floating_ip_read(self, resource_id, resource_dict):
- """
- Method called after floating-ip is read
- """
- pass
- #end post_floating_ip_read
-
- def pre_floating_ip_update(self, resource_id, resource_dict):
- """
- Method called before floating-ip is updated
- """
- pass
- #end pre_floating_ip_update
-
- def post_floating_ip_update(self, resource_id, resource_dict):
- """
- Method called after floating-ip is updated
- """
- pass
- #end post_floating_ip_update
-
- def pre_floating_ip_delete(self, resource_id):
- """
- Method called before floating-ip is deleted
- """
- pass
- #end pre_floating_ip_delete
-
- def post_floating_ip_delete(self, resource_id, resource_dict):
- """
- Method called after floating-ip is deleted
- """
- pass
- #end post_floating_ip_delete
-
- def pre_floating_ip_pool_create(self, resource_dict):
- """
- Method called before floating-ip-pool is created
- """
- pass
- #end pre_floating_ip_pool_create
-
- def post_floating_ip_pool_create(self, resource_dict):
- """
- Method called after floating-ip-pool is created
- """
- pass
- #end post_floating_ip_pool_create
-
- def pre_floating_ip_pool_read(self, resource_id):
- """
- Method called before floating-ip-pool is read
- """
- pass
- #end pre_floating_ip_pool_read
-
- def post_floating_ip_pool_read(self, resource_id, resource_dict):
- """
- Method called after floating-ip-pool is read
- """
- pass
- #end post_floating_ip_pool_read
-
- def pre_floating_ip_pool_update(self, resource_id, resource_dict):
- """
- Method called before floating-ip-pool is updated
- """
- pass
- #end pre_floating_ip_pool_update
-
- def post_floating_ip_pool_update(self, resource_id, resource_dict):
- """
- Method called after floating-ip-pool is updated
- """
- pass
- #end post_floating_ip_pool_update
-
- def pre_floating_ip_pool_delete(self, resource_id):
- """
- Method called before floating-ip-pool is deleted
- """
- pass
- #end pre_floating_ip_pool_delete
-
- def post_floating_ip_pool_delete(self, resource_id, resource_dict):
- """
- Method called after floating-ip-pool is deleted
- """
- pass
- #end post_floating_ip_pool_delete
-
- def pre_physical_router_create(self, resource_dict):
- """
- Method called before physical-router is created
- """
- pass
- #end pre_physical_router_create
-
- def post_physical_router_create(self, resource_dict):
- """
- Method called after physical-router is created
- """
- pass
- #end post_physical_router_create
-
- def pre_physical_router_read(self, resource_id):
- """
- Method called before physical-router is read
- """
- pass
- #end pre_physical_router_read
-
- def post_physical_router_read(self, resource_id, resource_dict):
- """
- Method called after physical-router is read
- """
- pass
- #end post_physical_router_read
-
- def pre_physical_router_update(self, resource_id, resource_dict):
- """
- Method called before physical-router is updated
- """
- pass
- #end pre_physical_router_update
-
- def post_physical_router_update(self, resource_id, resource_dict):
- """
- Method called after physical-router is updated
- """
- pass
- #end post_physical_router_update
-
- def pre_physical_router_delete(self, resource_id):
- """
- Method called before physical-router is deleted
- """
- pass
- #end pre_physical_router_delete
-
- def post_physical_router_delete(self, resource_id, resource_dict):
- """
- Method called after physical-router is deleted
- """
- pass
- #end post_physical_router_delete
-
- def pre_bgp_router_create(self, resource_dict):
- """
- Method called before bgp-router is created
- """
- pass
- #end pre_bgp_router_create
-
- def post_bgp_router_create(self, resource_dict):
- """
- Method called after bgp-router is created
- """
- pass
- #end post_bgp_router_create
-
- def pre_bgp_router_read(self, resource_id):
- """
- Method called before bgp-router is read
- """
- pass
- #end pre_bgp_router_read
-
- def post_bgp_router_read(self, resource_id, resource_dict):
- """
- Method called after bgp-router is read
- """
- pass
- #end post_bgp_router_read
-
- def pre_bgp_router_update(self, resource_id, resource_dict):
- """
- Method called before bgp-router is updated
- """
- pass
- #end pre_bgp_router_update
-
- def post_bgp_router_update(self, resource_id, resource_dict):
- """
- Method called after bgp-router is updated
- """
- pass
- #end post_bgp_router_update
-
- def pre_bgp_router_delete(self, resource_id):
- """
- Method called before bgp-router is deleted
- """
- pass
- #end pre_bgp_router_delete
-
- def post_bgp_router_delete(self, resource_id, resource_dict):
- """
- Method called after bgp-router is deleted
- """
- pass
- #end post_bgp_router_delete
-
- def pre_virtual_router_create(self, resource_dict):
- """
- Method called before virtual-router is created
- """
- pass
- #end pre_virtual_router_create
-
- def post_virtual_router_create(self, resource_dict):
- """
- Method called after virtual-router is created
- """
- pass
- #end post_virtual_router_create
-
- def pre_virtual_router_read(self, resource_id):
- """
- Method called before virtual-router is read
- """
- pass
- #end pre_virtual_router_read
-
- def post_virtual_router_read(self, resource_id, resource_dict):
- """
- Method called after virtual-router is read
- """
- pass
- #end post_virtual_router_read
-
- def pre_virtual_router_update(self, resource_id, resource_dict):
- """
- Method called before virtual-router is updated
- """
- pass
- #end pre_virtual_router_update
-
- def post_virtual_router_update(self, resource_id, resource_dict):
- """
- Method called after virtual-router is updated
- """
- pass
- #end post_virtual_router_update
-
- def pre_virtual_router_delete(self, resource_id):
- """
- Method called before virtual-router is deleted
- """
- pass
- #end pre_virtual_router_delete
-
- def post_virtual_router_delete(self, resource_id, resource_dict):
- """
- Method called after virtual-router is deleted
- """
- pass
- #end post_virtual_router_delete
-
- def pre_config_root_create(self, resource_dict):
- """
- Method called before config-root is created
- """
- pass
- #end pre_config_root_create
-
- def post_config_root_create(self, resource_dict):
- """
- Method called after config-root is created
- """
- pass
- #end post_config_root_create
-
- def pre_config_root_read(self, resource_id):
- """
- Method called before config-root is read
- """
- pass
- #end pre_config_root_read
-
- def post_config_root_read(self, resource_id, resource_dict):
- """
- Method called after config-root is read
- """
- pass
- #end post_config_root_read
-
- def pre_config_root_update(self, resource_id, resource_dict):
- """
- Method called before config-root is updated
- """
- pass
- #end pre_config_root_update
-
- def post_config_root_update(self, resource_id, resource_dict):
- """
- Method called after config-root is updated
- """
- pass
- #end post_config_root_update
-
- def pre_config_root_delete(self, resource_id):
- """
- Method called before config-root is deleted
- """
- pass
- #end pre_config_root_delete
-
- def post_config_root_delete(self, resource_id, resource_dict):
- """
- Method called after config-root is deleted
- """
- pass
- #end post_config_root_delete
-
- def pre_subnet_create(self, resource_dict):
- """
- Method called before subnet is created
- """
- pass
- #end pre_subnet_create
-
- def post_subnet_create(self, resource_dict):
- """
- Method called after subnet is created
- """
- pass
- #end post_subnet_create
-
- def pre_subnet_read(self, resource_id):
- """
- Method called before subnet is read
- """
- pass
- #end pre_subnet_read
-
- def post_subnet_read(self, resource_id, resource_dict):
- """
- Method called after subnet is read
- """
- pass
- #end post_subnet_read
-
- def pre_subnet_update(self, resource_id, resource_dict):
- """
- Method called before subnet is updated
- """
- pass
- #end pre_subnet_update
-
- def post_subnet_update(self, resource_id, resource_dict):
- """
- Method called after subnet is updated
- """
- pass
- #end post_subnet_update
-
- def pre_subnet_delete(self, resource_id):
- """
- Method called before subnet is deleted
- """
- pass
- #end pre_subnet_delete
-
- def post_subnet_delete(self, resource_id, resource_dict):
- """
- Method called after subnet is deleted
- """
- pass
- #end post_subnet_delete
-
- def pre_global_system_config_create(self, resource_dict):
- """
- Method called before global-system-config is created
- """
- pass
- #end pre_global_system_config_create
-
- def post_global_system_config_create(self, resource_dict):
- """
- Method called after global-system-config is created
- """
- pass
- #end post_global_system_config_create
-
- def pre_global_system_config_read(self, resource_id):
- """
- Method called before global-system-config is read
- """
- pass
- #end pre_global_system_config_read
-
- def post_global_system_config_read(self, resource_id, resource_dict):
- """
- Method called after global-system-config is read
- """
- pass
- #end post_global_system_config_read
-
- def pre_global_system_config_update(self, resource_id, resource_dict):
- """
- Method called before global-system-config is updated
- """
- pass
- #end pre_global_system_config_update
-
- def post_global_system_config_update(self, resource_id, resource_dict):
- """
- Method called after global-system-config is updated
- """
- pass
- #end post_global_system_config_update
-
- def pre_global_system_config_delete(self, resource_id):
- """
- Method called before global-system-config is deleted
- """
- pass
- #end pre_global_system_config_delete
-
- def post_global_system_config_delete(self, resource_id, resource_dict):
- """
- Method called after global-system-config is deleted
- """
- pass
- #end post_global_system_config_delete
-
- def pre_service_appliance_create(self, resource_dict):
- """
- Method called before service-appliance is created
- """
- pass
- #end pre_service_appliance_create
-
- def post_service_appliance_create(self, resource_dict):
- """
- Method called after service-appliance is created
- """
- pass
- #end post_service_appliance_create
-
- def pre_service_appliance_read(self, resource_id):
- """
- Method called before service-appliance is read
- """
- pass
- #end pre_service_appliance_read
-
- def post_service_appliance_read(self, resource_id, resource_dict):
- """
- Method called after service-appliance is read
- """
- pass
- #end post_service_appliance_read
-
- def pre_service_appliance_update(self, resource_id, resource_dict):
- """
- Method called before service-appliance is updated
- """
- pass
- #end pre_service_appliance_update
-
- def post_service_appliance_update(self, resource_id, resource_dict):
- """
- Method called after service-appliance is updated
- """
- pass
- #end post_service_appliance_update
-
- def pre_service_appliance_delete(self, resource_id):
- """
- Method called before service-appliance is deleted
- """
- pass
- #end pre_service_appliance_delete
-
- def post_service_appliance_delete(self, resource_id, resource_dict):
- """
- Method called after service-appliance is deleted
- """
- pass
- #end post_service_appliance_delete
-
- def pre_service_instance_create(self, resource_dict):
- """
- Method called before service-instance is created
- """
- pass
- #end pre_service_instance_create
-
- def post_service_instance_create(self, resource_dict):
- """
- Method called after service-instance is created
- """
- pass
- #end post_service_instance_create
-
- def pre_service_instance_read(self, resource_id):
- """
- Method called before service-instance is read
- """
- pass
- #end pre_service_instance_read
-
- def post_service_instance_read(self, resource_id, resource_dict):
- """
- Method called after service-instance is read
- """
- pass
- #end post_service_instance_read
-
- def pre_service_instance_update(self, resource_id, resource_dict):
- """
- Method called before service-instance is updated
- """
- pass
- #end pre_service_instance_update
-
- def post_service_instance_update(self, resource_id, resource_dict):
- """
- Method called after service-instance is updated
- """
- pass
- #end post_service_instance_update
-
- def pre_service_instance_delete(self, resource_id):
- """
- Method called before service-instance is deleted
- """
- pass
- #end pre_service_instance_delete
-
- def post_service_instance_delete(self, resource_id, resource_dict):
- """
- Method called after service-instance is deleted
- """
- pass
- #end post_service_instance_delete
-
- def pre_namespace_create(self, resource_dict):
- """
- Method called before namespace is created
- """
- pass
- #end pre_namespace_create
-
- def post_namespace_create(self, resource_dict):
- """
- Method called after namespace is created
- """
- pass
- #end post_namespace_create
-
- def pre_namespace_read(self, resource_id):
- """
- Method called before namespace is read
- """
- pass
- #end pre_namespace_read
-
- def post_namespace_read(self, resource_id, resource_dict):
- """
- Method called after namespace is read
- """
- pass
- #end post_namespace_read
-
- def pre_namespace_update(self, resource_id, resource_dict):
- """
- Method called before namespace is updated
- """
- pass
- #end pre_namespace_update
-
- def post_namespace_update(self, resource_id, resource_dict):
- """
- Method called after namespace is updated
- """
- pass
- #end post_namespace_update
-
- def pre_namespace_delete(self, resource_id):
- """
- Method called before namespace is deleted
- """
- pass
- #end pre_namespace_delete
-
- def post_namespace_delete(self, resource_id, resource_dict):
- """
- Method called after namespace is deleted
- """
- pass
- #end post_namespace_delete
-
- def pre_logical_interface_create(self, resource_dict):
- """
- Method called before logical-interface is created
- """
- pass
- #end pre_logical_interface_create
-
- def post_logical_interface_create(self, resource_dict):
- """
- Method called after logical-interface is created
- """
- pass
- #end post_logical_interface_create
-
- def pre_logical_interface_read(self, resource_id):
- """
- Method called before logical-interface is read
- """
- pass
- #end pre_logical_interface_read
-
- def post_logical_interface_read(self, resource_id, resource_dict):
- """
- Method called after logical-interface is read
- """
- pass
- #end post_logical_interface_read
-
- def pre_logical_interface_update(self, resource_id, resource_dict):
- """
- Method called before logical-interface is updated
- """
- pass
- #end pre_logical_interface_update
-
- def post_logical_interface_update(self, resource_id, resource_dict):
- """
- Method called after logical-interface is updated
- """
- pass
- #end post_logical_interface_update
-
- def pre_logical_interface_delete(self, resource_id):
- """
- Method called before logical-interface is deleted
- """
- pass
- #end pre_logical_interface_delete
-
- def post_logical_interface_delete(self, resource_id, resource_dict):
- """
- Method called after logical-interface is deleted
- """
- pass
- #end post_logical_interface_delete
-
- def pre_route_table_create(self, resource_dict):
- """
- Method called before route-table is created
- """
- pass
- #end pre_route_table_create
-
- def post_route_table_create(self, resource_dict):
- """
- Method called after route-table is created
- """
- pass
- #end post_route_table_create
-
- def pre_route_table_read(self, resource_id):
- """
- Method called before route-table is read
- """
- pass
- #end pre_route_table_read
-
- def post_route_table_read(self, resource_id, resource_dict):
- """
- Method called after route-table is read
- """
- pass
- #end post_route_table_read
-
- def pre_route_table_update(self, resource_id, resource_dict):
- """
- Method called before route-table is updated
- """
- pass
- #end pre_route_table_update
-
- def post_route_table_update(self, resource_id, resource_dict):
- """
- Method called after route-table is updated
- """
- pass
- #end post_route_table_update
-
- def pre_route_table_delete(self, resource_id):
- """
- Method called before route-table is deleted
- """
- pass
- #end pre_route_table_delete
-
- def post_route_table_delete(self, resource_id, resource_dict):
- """
- Method called after route-table is deleted
- """
- pass
- #end post_route_table_delete
-
- def pre_physical_interface_create(self, resource_dict):
- """
- Method called before physical-interface is created
- """
- pass
- #end pre_physical_interface_create
-
- def post_physical_interface_create(self, resource_dict):
- """
- Method called after physical-interface is created
- """
- pass
- #end post_physical_interface_create
-
- def pre_physical_interface_read(self, resource_id):
- """
- Method called before physical-interface is read
- """
- pass
- #end pre_physical_interface_read
-
- def post_physical_interface_read(self, resource_id, resource_dict):
- """
- Method called after physical-interface is read
- """
- pass
- #end post_physical_interface_read
-
- def pre_physical_interface_update(self, resource_id, resource_dict):
- """
- Method called before physical-interface is updated
- """
- pass
- #end pre_physical_interface_update
-
- def post_physical_interface_update(self, resource_id, resource_dict):
- """
- Method called after physical-interface is updated
- """
- pass
- #end post_physical_interface_update
-
- def pre_physical_interface_delete(self, resource_id):
- """
- Method called before physical-interface is deleted
- """
- pass
- #end pre_physical_interface_delete
-
- def post_physical_interface_delete(self, resource_id, resource_dict):
- """
- Method called after physical-interface is deleted
- """
- pass
- #end post_physical_interface_delete
-
- def pre_access_control_list_create(self, resource_dict):
- """
- Method called before access-control-list is created
- """
- pass
- #end pre_access_control_list_create
-
- def post_access_control_list_create(self, resource_dict):
- """
- Method called after access-control-list is created
- """
- pass
- #end post_access_control_list_create
-
- def pre_access_control_list_read(self, resource_id):
- """
- Method called before access-control-list is read
- """
- pass
- #end pre_access_control_list_read
-
- def post_access_control_list_read(self, resource_id, resource_dict):
- """
- Method called after access-control-list is read
- """
- pass
- #end post_access_control_list_read
-
- def pre_access_control_list_update(self, resource_id, resource_dict):
- """
- Method called before access-control-list is updated
- """
- pass
- #end pre_access_control_list_update
-
- def post_access_control_list_update(self, resource_id, resource_dict):
- """
- Method called after access-control-list is updated
- """
- pass
- #end post_access_control_list_update
-
- def pre_access_control_list_delete(self, resource_id):
- """
- Method called before access-control-list is deleted
- """
- pass
- #end pre_access_control_list_delete
-
- def post_access_control_list_delete(self, resource_id, resource_dict):
- """
- Method called after access-control-list is deleted
- """
- pass
- #end post_access_control_list_delete
-
- def pre_analytics_node_create(self, resource_dict):
- """
- Method called before analytics-node is created
- """
- pass
- #end pre_analytics_node_create
-
- def post_analytics_node_create(self, resource_dict):
- """
- Method called after analytics-node is created
- """
- pass
- #end post_analytics_node_create
-
- def pre_analytics_node_read(self, resource_id):
- """
- Method called before analytics-node is read
- """
- pass
- #end pre_analytics_node_read
-
- def post_analytics_node_read(self, resource_id, resource_dict):
- """
- Method called after analytics-node is read
- """
- pass
- #end post_analytics_node_read
-
- def pre_analytics_node_update(self, resource_id, resource_dict):
- """
- Method called before analytics-node is updated
- """
- pass
- #end pre_analytics_node_update
-
- def post_analytics_node_update(self, resource_id, resource_dict):
- """
- Method called after analytics-node is updated
- """
- pass
- #end post_analytics_node_update
-
- def pre_analytics_node_delete(self, resource_id):
- """
- Method called before analytics-node is deleted
- """
- pass
- #end pre_analytics_node_delete
-
- def post_analytics_node_delete(self, resource_id, resource_dict):
- """
- Method called after analytics-node is deleted
- """
- pass
- #end post_analytics_node_delete
-
- def pre_virtual_DNS_create(self, resource_dict):
- """
- Method called before virtual-DNS is created
- """
- pass
- #end pre_virtual_DNS_create
-
- def post_virtual_DNS_create(self, resource_dict):
- """
- Method called after virtual-DNS is created
- """
- pass
- #end post_virtual_DNS_create
-
- def pre_virtual_DNS_read(self, resource_id):
- """
- Method called before virtual-DNS is read
- """
- pass
- #end pre_virtual_DNS_read
-
- def post_virtual_DNS_read(self, resource_id, resource_dict):
- """
- Method called after virtual-DNS is read
- """
- pass
- #end post_virtual_DNS_read
-
- def pre_virtual_DNS_update(self, resource_id, resource_dict):
- """
- Method called before virtual-DNS is updated
- """
- pass
- #end pre_virtual_DNS_update
-
- def post_virtual_DNS_update(self, resource_id, resource_dict):
- """
- Method called after virtual-DNS is updated
- """
- pass
- #end post_virtual_DNS_update
-
- def pre_virtual_DNS_delete(self, resource_id):
- """
- Method called before virtual-DNS is deleted
- """
- pass
- #end pre_virtual_DNS_delete
-
- def post_virtual_DNS_delete(self, resource_id, resource_dict):
- """
- Method called after virtual-DNS is deleted
- """
- pass
- #end post_virtual_DNS_delete
-
- def pre_customer_attachment_create(self, resource_dict):
- """
- Method called before customer-attachment is created
- """
- pass
- #end pre_customer_attachment_create
-
- def post_customer_attachment_create(self, resource_dict):
- """
- Method called after customer-attachment is created
- """
- pass
- #end post_customer_attachment_create
-
- def pre_customer_attachment_read(self, resource_id):
- """
- Method called before customer-attachment is read
- """
- pass
- #end pre_customer_attachment_read
-
- def post_customer_attachment_read(self, resource_id, resource_dict):
- """
- Method called after customer-attachment is read
- """
- pass
- #end post_customer_attachment_read
-
- def pre_customer_attachment_update(self, resource_id, resource_dict):
- """
- Method called before customer-attachment is updated
- """
- pass
- #end pre_customer_attachment_update
-
- def post_customer_attachment_update(self, resource_id, resource_dict):
- """
- Method called after customer-attachment is updated
- """
- pass
- #end post_customer_attachment_update
-
- def pre_customer_attachment_delete(self, resource_id):
- """
- Method called before customer-attachment is deleted
- """
- pass
- #end pre_customer_attachment_delete
-
- def post_customer_attachment_delete(self, resource_id, resource_dict):
- """
- Method called after customer-attachment is deleted
- """
- pass
- #end post_customer_attachment_delete
-
- def pre_service_appliance_set_create(self, resource_dict):
- """
- Method called before service-appliance-set is created
- """
- pass
- #end pre_service_appliance_set_create
-
- def post_service_appliance_set_create(self, resource_dict):
- """
- Method called after service-appliance-set is created
- """
- pass
- #end post_service_appliance_set_create
-
- def pre_service_appliance_set_read(self, resource_id):
- """
- Method called before service-appliance-set is read
- """
- pass
- #end pre_service_appliance_set_read
-
- def post_service_appliance_set_read(self, resource_id, resource_dict):
- """
- Method called after service-appliance-set is read
- """
- pass
- #end post_service_appliance_set_read
-
- def pre_service_appliance_set_update(self, resource_id, resource_dict):
- """
- Method called before service-appliance-set is updated
- """
- pass
- #end pre_service_appliance_set_update
-
- def post_service_appliance_set_update(self, resource_id, resource_dict):
- """
- Method called after service-appliance-set is updated
- """
- pass
- #end post_service_appliance_set_update
-
- def pre_service_appliance_set_delete(self, resource_id):
- """
- Method called before service-appliance-set is deleted
- """
- pass
- #end pre_service_appliance_set_delete
-
- def post_service_appliance_set_delete(self, resource_id, resource_dict):
- """
- Method called after service-appliance-set is deleted
- """
- pass
- #end post_service_appliance_set_delete
-
- def pre_config_node_create(self, resource_dict):
- """
- Method called before config-node is created
- """
- pass
- #end pre_config_node_create
-
- def post_config_node_create(self, resource_dict):
- """
- Method called after config-node is created
- """
- pass
- #end post_config_node_create
-
- def pre_config_node_read(self, resource_id):
- """
- Method called before config-node is read
- """
- pass
- #end pre_config_node_read
-
- def post_config_node_read(self, resource_id, resource_dict):
- """
- Method called after config-node is read
- """
- pass
- #end post_config_node_read
-
- def pre_config_node_update(self, resource_id, resource_dict):
- """
- Method called before config-node is updated
- """
- pass
- #end pre_config_node_update
-
- def post_config_node_update(self, resource_id, resource_dict):
- """
- Method called after config-node is updated
- """
- pass
- #end post_config_node_update
-
- def pre_config_node_delete(self, resource_id):
- """
- Method called before config-node is deleted
- """
- pass
- #end pre_config_node_delete
-
- def post_config_node_delete(self, resource_id, resource_dict):
- """
- Method called after config-node is deleted
- """
- pass
- #end post_config_node_delete
-
- def pre_qos_queue_create(self, resource_dict):
- """
- Method called before qos-queue is created
- """
- pass
- #end pre_qos_queue_create
-
- def post_qos_queue_create(self, resource_dict):
- """
- Method called after qos-queue is created
- """
- pass
- #end post_qos_queue_create
-
- def pre_qos_queue_read(self, resource_id):
- """
- Method called before qos-queue is read
- """
- pass
- #end pre_qos_queue_read
-
- def post_qos_queue_read(self, resource_id, resource_dict):
- """
- Method called after qos-queue is read
- """
- pass
- #end post_qos_queue_read
-
- def pre_qos_queue_update(self, resource_id, resource_dict):
- """
- Method called before qos-queue is updated
- """
- pass
- #end pre_qos_queue_update
-
- def post_qos_queue_update(self, resource_id, resource_dict):
- """
- Method called after qos-queue is updated
- """
- pass
- #end post_qos_queue_update
-
- def pre_qos_queue_delete(self, resource_id):
- """
- Method called before qos-queue is deleted
- """
- pass
- #end pre_qos_queue_delete
-
- def post_qos_queue_delete(self, resource_id, resource_dict):
- """
- Method called after qos-queue is deleted
- """
- pass
- #end post_qos_queue_delete
-
- def pre_virtual_machine_create(self, resource_dict):
- """
- Method called before virtual-machine is created
- """
- pass
- #end pre_virtual_machine_create
-
- def post_virtual_machine_create(self, resource_dict):
- """
- Method called after virtual-machine is created
- """
- pass
- #end post_virtual_machine_create
-
- def pre_virtual_machine_read(self, resource_id):
- """
- Method called before virtual-machine is read
- """
- pass
- #end pre_virtual_machine_read
-
- def post_virtual_machine_read(self, resource_id, resource_dict):
- """
- Method called after virtual-machine is read
- """
- pass
- #end post_virtual_machine_read
-
- def pre_virtual_machine_update(self, resource_id, resource_dict):
- """
- Method called before virtual-machine is updated
- """
- pass
- #end pre_virtual_machine_update
-
- def post_virtual_machine_update(self, resource_id, resource_dict):
- """
- Method called after virtual-machine is updated
- """
- pass
- #end post_virtual_machine_update
-
- def pre_virtual_machine_delete(self, resource_id):
- """
- Method called before virtual-machine is deleted
- """
- pass
- #end pre_virtual_machine_delete
-
- def post_virtual_machine_delete(self, resource_id, resource_dict):
- """
- Method called after virtual-machine is deleted
- """
- pass
- #end post_virtual_machine_delete
-
- def pre_interface_route_table_create(self, resource_dict):
- """
- Method called before interface-route-table is created
- """
- pass
- #end pre_interface_route_table_create
-
- def post_interface_route_table_create(self, resource_dict):
- """
- Method called after interface-route-table is created
- """
- pass
- #end post_interface_route_table_create
-
- def pre_interface_route_table_read(self, resource_id):
- """
- Method called before interface-route-table is read
- """
- pass
- #end pre_interface_route_table_read
-
- def post_interface_route_table_read(self, resource_id, resource_dict):
- """
- Method called after interface-route-table is read
- """
- pass
- #end post_interface_route_table_read
-
- def pre_interface_route_table_update(self, resource_id, resource_dict):
- """
- Method called before interface-route-table is updated
- """
- pass
- #end pre_interface_route_table_update
-
- def post_interface_route_table_update(self, resource_id, resource_dict):
- """
- Method called after interface-route-table is updated
- """
- pass
- #end post_interface_route_table_update
-
- def pre_interface_route_table_delete(self, resource_id):
- """
- Method called before interface-route-table is deleted
- """
- pass
- #end pre_interface_route_table_delete
-
- def post_interface_route_table_delete(self, resource_id, resource_dict):
- """
- Method called after interface-route-table is deleted
- """
- pass
- #end post_interface_route_table_delete
-
- def pre_service_template_create(self, resource_dict):
- """
- Method called before service-template is created
- """
- pass
- #end pre_service_template_create
-
- def post_service_template_create(self, resource_dict):
- """
- Method called after service-template is created
- """
- pass
- #end post_service_template_create
-
- def pre_service_template_read(self, resource_id):
- """
- Method called before service-template is read
- """
- pass
- #end pre_service_template_read
-
- def post_service_template_read(self, resource_id, resource_dict):
- """
- Method called after service-template is read
- """
- pass
- #end post_service_template_read
-
- def pre_service_template_update(self, resource_id, resource_dict):
- """
- Method called before service-template is updated
- """
- pass
- #end pre_service_template_update
-
- def post_service_template_update(self, resource_id, resource_dict):
- """
- Method called after service-template is updated
- """
- pass
- #end post_service_template_update
-
- def pre_service_template_delete(self, resource_id):
- """
- Method called before service-template is deleted
- """
- pass
- #end pre_service_template_delete
-
- def post_service_template_delete(self, resource_id, resource_dict):
- """
- Method called after service-template is deleted
- """
- pass
- #end post_service_template_delete
-
- def pre_virtual_ip_create(self, resource_dict):
- """
- Method called before virtual-ip is created
- """
- pass
- #end pre_virtual_ip_create
-
- def post_virtual_ip_create(self, resource_dict):
- """
- Method called after virtual-ip is created
- """
- pass
- #end post_virtual_ip_create
-
- def pre_virtual_ip_read(self, resource_id):
- """
- Method called before virtual-ip is read
- """
- pass
- #end pre_virtual_ip_read
-
- def post_virtual_ip_read(self, resource_id, resource_dict):
- """
- Method called after virtual-ip is read
- """
- pass
- #end post_virtual_ip_read
-
- def pre_virtual_ip_update(self, resource_id, resource_dict):
- """
- Method called before virtual-ip is updated
- """
- pass
- #end pre_virtual_ip_update
-
- def post_virtual_ip_update(self, resource_id, resource_dict):
- """
- Method called after virtual-ip is updated
- """
- pass
- #end post_virtual_ip_update
-
- def pre_virtual_ip_delete(self, resource_id):
- """
- Method called before virtual-ip is deleted
- """
- pass
- #end pre_virtual_ip_delete
-
- def post_virtual_ip_delete(self, resource_id, resource_dict):
- """
- Method called after virtual-ip is deleted
- """
- pass
- #end post_virtual_ip_delete
-
- def pre_loadbalancer_member_create(self, resource_dict):
- """
- Method called before loadbalancer-member is created
- """
- pass
- #end pre_loadbalancer_member_create
-
- def post_loadbalancer_member_create(self, resource_dict):
- """
- Method called after loadbalancer-member is created
- """
- pass
- #end post_loadbalancer_member_create
-
- def pre_loadbalancer_member_read(self, resource_id):
- """
- Method called before loadbalancer-member is read
- """
- pass
- #end pre_loadbalancer_member_read
-
- def post_loadbalancer_member_read(self, resource_id, resource_dict):
- """
- Method called after loadbalancer-member is read
- """
- pass
- #end post_loadbalancer_member_read
-
- def pre_loadbalancer_member_update(self, resource_id, resource_dict):
- """
- Method called before loadbalancer-member is updated
- """
- pass
- #end pre_loadbalancer_member_update
-
- def post_loadbalancer_member_update(self, resource_id, resource_dict):
- """
- Method called after loadbalancer-member is updated
- """
- pass
- #end post_loadbalancer_member_update
-
- def pre_loadbalancer_member_delete(self, resource_id):
- """
- Method called before loadbalancer-member is deleted
- """
- pass
- #end pre_loadbalancer_member_delete
-
- def post_loadbalancer_member_delete(self, resource_id, resource_dict):
- """
- Method called after loadbalancer-member is deleted
- """
- pass
- #end post_loadbalancer_member_delete
-
- def pre_security_group_create(self, resource_dict):
- """
- Method called before security-group is created
- """
- pass
- #end pre_security_group_create
-
- def post_security_group_create(self, resource_dict):
- """
- Method called after security-group is created
- """
- pass
- #end post_security_group_create
-
- def pre_security_group_read(self, resource_id):
- """
- Method called before security-group is read
- """
- pass
- #end pre_security_group_read
-
- def post_security_group_read(self, resource_id, resource_dict):
- """
- Method called after security-group is read
- """
- pass
- #end post_security_group_read
-
- def pre_security_group_update(self, resource_id, resource_dict):
- """
- Method called before security-group is updated
- """
- pass
- #end pre_security_group_update
-
- def post_security_group_update(self, resource_id, resource_dict):
- """
- Method called after security-group is updated
- """
- pass
- #end post_security_group_update
-
- def pre_security_group_delete(self, resource_id):
- """
- Method called before security-group is deleted
- """
- pass
- #end pre_security_group_delete
-
- def post_security_group_delete(self, resource_id, resource_dict):
- """
- Method called after security-group is deleted
- """
- pass
- #end post_security_group_delete
-
- def pre_provider_attachment_create(self, resource_dict):
- """
- Method called before provider-attachment is created
- """
- pass
- #end pre_provider_attachment_create
-
- def post_provider_attachment_create(self, resource_dict):
- """
- Method called after provider-attachment is created
- """
- pass
- #end post_provider_attachment_create
-
- def pre_provider_attachment_read(self, resource_id):
- """
- Method called before provider-attachment is read
- """
- pass
- #end pre_provider_attachment_read
-
- def post_provider_attachment_read(self, resource_id, resource_dict):
- """
- Method called after provider-attachment is read
- """
- pass
- #end post_provider_attachment_read
-
- def pre_provider_attachment_update(self, resource_id, resource_dict):
- """
- Method called before provider-attachment is updated
- """
- pass
- #end pre_provider_attachment_update
-
- def post_provider_attachment_update(self, resource_id, resource_dict):
- """
- Method called after provider-attachment is updated
- """
- pass
- #end post_provider_attachment_update
-
- def pre_provider_attachment_delete(self, resource_id):
- """
- Method called before provider-attachment is deleted
- """
- pass
- #end pre_provider_attachment_delete
-
- def post_provider_attachment_delete(self, resource_id, resource_dict):
- """
- Method called after provider-attachment is deleted
- """
- pass
- #end post_provider_attachment_delete
-
- def pre_virtual_machine_interface_create(self, resource_dict):
- """
- Method called before virtual-machine-interface is created
- """
- pass
- #end pre_virtual_machine_interface_create
-
- def post_virtual_machine_interface_create(self, resource_dict):
- """
- Method called after virtual-machine-interface is created
- """
- pass
- #end post_virtual_machine_interface_create
-
- def pre_virtual_machine_interface_read(self, resource_id):
- """
- Method called before virtual-machine-interface is read
- """
- pass
- #end pre_virtual_machine_interface_read
-
- def post_virtual_machine_interface_read(self, resource_id, resource_dict):
- """
- Method called after virtual-machine-interface is read
- """
- pass
- #end post_virtual_machine_interface_read
-
- def pre_virtual_machine_interface_update(self, resource_id, resource_dict):
- """
- Method called before virtual-machine-interface is updated
- """
- pass
- #end pre_virtual_machine_interface_update
-
- def post_virtual_machine_interface_update(self, resource_id, resource_dict):
- """
- Method called after virtual-machine-interface is updated
- """
- pass
- #end post_virtual_machine_interface_update
-
- def pre_virtual_machine_interface_delete(self, resource_id):
- """
- Method called before virtual-machine-interface is deleted
- """
- pass
- #end pre_virtual_machine_interface_delete
-
- def post_virtual_machine_interface_delete(self, resource_id, resource_dict):
- """
- Method called after virtual-machine-interface is deleted
- """
- pass
- #end post_virtual_machine_interface_delete
-
- def pre_loadbalancer_healthmonitor_create(self, resource_dict):
- """
- Method called before loadbalancer-healthmonitor is created
- """
- pass
- #end pre_loadbalancer_healthmonitor_create
-
- def post_loadbalancer_healthmonitor_create(self, resource_dict):
- """
- Method called after loadbalancer-healthmonitor is created
- """
- pass
- #end post_loadbalancer_healthmonitor_create
-
- def pre_loadbalancer_healthmonitor_read(self, resource_id):
- """
- Method called before loadbalancer-healthmonitor is read
- """
- pass
- #end pre_loadbalancer_healthmonitor_read
-
- def post_loadbalancer_healthmonitor_read(self, resource_id, resource_dict):
- """
- Method called after loadbalancer-healthmonitor is read
- """
- pass
- #end post_loadbalancer_healthmonitor_read
-
- def pre_loadbalancer_healthmonitor_update(self, resource_id, resource_dict):
- """
- Method called before loadbalancer-healthmonitor is updated
- """
- pass
- #end pre_loadbalancer_healthmonitor_update
-
- def post_loadbalancer_healthmonitor_update(self, resource_id, resource_dict):
- """
- Method called after loadbalancer-healthmonitor is updated
- """
- pass
- #end post_loadbalancer_healthmonitor_update
-
- def pre_loadbalancer_healthmonitor_delete(self, resource_id):
- """
- Method called before loadbalancer-healthmonitor is deleted
- """
- pass
- #end pre_loadbalancer_healthmonitor_delete
-
- def post_loadbalancer_healthmonitor_delete(self, resource_id, resource_dict):
- """
- Method called after loadbalancer-healthmonitor is deleted
- """
- pass
- #end post_loadbalancer_healthmonitor_delete
-
- def pre_virtual_network_create(self, resource_dict):
- """
- Method called before virtual-network is created
- """
- pass
- #end pre_virtual_network_create
-
- def post_virtual_network_create(self, resource_dict):
- """
- Method called after virtual-network is created
- """
- pass
- #end post_virtual_network_create
-
- def pre_virtual_network_read(self, resource_id):
- """
- Method called before virtual-network is read
- """
- pass
- #end pre_virtual_network_read
-
- def post_virtual_network_read(self, resource_id, resource_dict):
- """
- Method called after virtual-network is read
- """
- pass
- #end post_virtual_network_read
-
- def pre_virtual_network_update(self, resource_id, resource_dict):
- """
- Method called before virtual-network is updated
- """
- pass
- #end pre_virtual_network_update
-
- def post_virtual_network_update(self, resource_id, resource_dict):
- """
- Method called after virtual-network is updated
- """
- pass
- #end post_virtual_network_update
-
- def pre_virtual_network_delete(self, resource_id):
- """
- Method called before virtual-network is deleted
- """
- pass
- #end pre_virtual_network_delete
-
- def post_virtual_network_delete(self, resource_id, resource_dict):
- """
- Method called after virtual-network is deleted
- """
- pass
- #end post_virtual_network_delete
-
- def pre_project_create(self, resource_dict):
- """
- Method called before project is created
- """
- pass
- #end pre_project_create
-
- def post_project_create(self, resource_dict):
- """
- Method called after project is created
- """
- pass
- #end post_project_create
-
- def pre_project_read(self, resource_id):
- """
- Method called before project is read
- """
- pass
- #end pre_project_read
-
- def post_project_read(self, resource_id, resource_dict):
- """
- Method called after project is read
- """
- pass
- #end post_project_read
-
- def pre_project_update(self, resource_id, resource_dict):
- """
- Method called before project is updated
- """
- pass
- #end pre_project_update
-
- def post_project_update(self, resource_id, resource_dict):
- """
- Method called after project is updated
- """
- pass
- #end post_project_update
-
- def pre_project_delete(self, resource_id):
- """
- Method called before project is deleted
- """
- pass
- #end pre_project_delete
-
- def post_project_delete(self, resource_id, resource_dict):
- """
- Method called after project is deleted
- """
- pass
- #end post_project_delete
-
- def pre_qos_forwarding_class_create(self, resource_dict):
- """
- Method called before qos-forwarding-class is created
- """
- pass
- #end pre_qos_forwarding_class_create
-
- def post_qos_forwarding_class_create(self, resource_dict):
- """
- Method called after qos-forwarding-class is created
- """
- pass
- #end post_qos_forwarding_class_create
-
- def pre_qos_forwarding_class_read(self, resource_id):
- """
- Method called before qos-forwarding-class is read
- """
- pass
- #end pre_qos_forwarding_class_read
-
- def post_qos_forwarding_class_read(self, resource_id, resource_dict):
- """
- Method called after qos-forwarding-class is read
- """
- pass
- #end post_qos_forwarding_class_read
-
- def pre_qos_forwarding_class_update(self, resource_id, resource_dict):
- """
- Method called before qos-forwarding-class is updated
- """
- pass
- #end pre_qos_forwarding_class_update
-
- def post_qos_forwarding_class_update(self, resource_id, resource_dict):
- """
- Method called after qos-forwarding-class is updated
- """
- pass
- #end post_qos_forwarding_class_update
-
- def pre_qos_forwarding_class_delete(self, resource_id):
- """
- Method called before qos-forwarding-class is deleted
- """
- pass
- #end pre_qos_forwarding_class_delete
-
- def post_qos_forwarding_class_delete(self, resource_id, resource_dict):
- """
- Method called after qos-forwarding-class is deleted
- """
- pass
- #end post_qos_forwarding_class_delete
-
- def pre_database_node_create(self, resource_dict):
- """
- Method called before database-node is created
- """
- pass
- #end pre_database_node_create
-
- def post_database_node_create(self, resource_dict):
- """
- Method called after database-node is created
- """
- pass
- #end post_database_node_create
-
- def pre_database_node_read(self, resource_id):
- """
- Method called before database-node is read
- """
- pass
- #end pre_database_node_read
-
- def post_database_node_read(self, resource_id, resource_dict):
- """
- Method called after database-node is read
- """
- pass
- #end post_database_node_read
-
- def pre_database_node_update(self, resource_id, resource_dict):
- """
- Method called before database-node is updated
- """
- pass
- #end pre_database_node_update
-
- def post_database_node_update(self, resource_id, resource_dict):
- """
- Method called after database-node is updated
- """
- pass
- #end post_database_node_update
-
- def pre_database_node_delete(self, resource_id):
- """
- Method called before database-node is deleted
- """
- pass
- #end pre_database_node_delete
-
- def post_database_node_delete(self, resource_id, resource_dict):
- """
- Method called after database-node is deleted
- """
- pass
- #end post_database_node_delete
-
- def pre_routing_instance_create(self, resource_dict):
- """
- Method called before routing-instance is created
- """
- pass
- #end pre_routing_instance_create
-
- def post_routing_instance_create(self, resource_dict):
- """
- Method called after routing-instance is created
- """
- pass
- #end post_routing_instance_create
-
- def pre_routing_instance_read(self, resource_id):
- """
- Method called before routing-instance is read
- """
- pass
- #end pre_routing_instance_read
-
- def post_routing_instance_read(self, resource_id, resource_dict):
- """
- Method called after routing-instance is read
- """
- pass
- #end post_routing_instance_read
-
- def pre_routing_instance_update(self, resource_id, resource_dict):
- """
- Method called before routing-instance is updated
- """
- pass
- #end pre_routing_instance_update
-
- def post_routing_instance_update(self, resource_id, resource_dict):
- """
- Method called after routing-instance is updated
- """
- pass
- #end post_routing_instance_update
-
- def pre_routing_instance_delete(self, resource_id):
- """
- Method called before routing-instance is deleted
- """
- pass
- #end pre_routing_instance_delete
-
- def post_routing_instance_delete(self, resource_id, resource_dict):
- """
- Method called after routing-instance is deleted
- """
- pass
- #end post_routing_instance_delete
-
- def pre_network_ipam_create(self, resource_dict):
- """
- Method called before network-ipam is created
- """
- pass
- #end pre_network_ipam_create
-
- def post_network_ipam_create(self, resource_dict):
- """
- Method called after network-ipam is created
- """
- pass
- #end post_network_ipam_create
-
- def pre_network_ipam_read(self, resource_id):
- """
- Method called before network-ipam is read
- """
- pass
- #end pre_network_ipam_read
-
- def post_network_ipam_read(self, resource_id, resource_dict):
- """
- Method called after network-ipam is read
- """
- pass
- #end post_network_ipam_read
-
- def pre_network_ipam_update(self, resource_id, resource_dict):
- """
- Method called before network-ipam is updated
- """
- pass
- #end pre_network_ipam_update
-
- def post_network_ipam_update(self, resource_id, resource_dict):
- """
- Method called after network-ipam is updated
- """
- pass
- #end post_network_ipam_update
-
- def pre_network_ipam_delete(self, resource_id):
- """
- Method called before network-ipam is deleted
- """
- pass
- #end pre_network_ipam_delete
-
- def post_network_ipam_delete(self, resource_id, resource_dict):
- """
- Method called after network-ipam is deleted
- """
- pass
- #end post_network_ipam_delete
-
- def pre_logical_router_create(self, resource_dict):
- """
- Method called before logical-router is created
- """
- pass
- #end pre_logical_router_create
-
- def post_logical_router_create(self, resource_dict):
- """
- Method called after logical-router is created
- """
- pass
- #end post_logical_router_create
-
- def pre_logical_router_read(self, resource_id):
- """
- Method called before logical-router is read
- """
- pass
- #end pre_logical_router_read
-
- def post_logical_router_read(self, resource_id, resource_dict):
- """
- Method called after logical-router is read
- """
- pass
- #end post_logical_router_read
-
- def pre_logical_router_update(self, resource_id, resource_dict):
- """
- Method called before logical-router is updated
- """
- pass
- #end pre_logical_router_update
-
- def post_logical_router_update(self, resource_id, resource_dict):
- """
- Method called after logical-router is updated
- """
- pass
- #end post_logical_router_update
-
- def pre_logical_router_delete(self, resource_id):
- """
- Method called before logical-router is deleted
- """
- pass
- #end pre_logical_router_delete
-
- def post_logical_router_delete(self, resource_id, resource_dict):
- """
- Method called after logical-router is deleted
- """
- pass
- #end post_logical_router_delete
-
-#end class ResourceApiGen
diff --git a/Testcases/vnc_api/gen/vnc_api_extension_gen.pyc b/Testcases/vnc_api/gen/vnc_api_extension_gen.pyc
deleted file mode 100644
index 0c1c77b..0000000
--- a/Testcases/vnc_api/gen/vnc_api_extension_gen.pyc
+++ /dev/null
Binary files differ
diff --git a/Testcases/vnc_api/gen/vnc_api_schema.py b/Testcases/vnc_api/gen/vnc_api_schema.py
deleted file mode 100644
index 0da7fd6..0000000
--- a/Testcases/vnc_api/gen/vnc_api_schema.py
+++ /dev/null
@@ -1,192 +0,0 @@
-
-# AUTO-GENERATED file from IFMapApiGenerator. Do Not Edit!
-
-import pydot
-
-def generate_schema_graph():
- graph = pydot.Dot(graph_type='digraph')
-
- # Generate node definitions and add to graph
- domain_node = pydot.Node('domain')
- graph.add_node(domain_node)
- global_vrouter_config_node = pydot.Node('global-vrouter-config')
- graph.add_node(global_vrouter_config_node)
- instance_ip_node = pydot.Node('instance-ip')
- graph.add_node(instance_ip_node)
- network_policy_node = pydot.Node('network-policy')
- graph.add_node(network_policy_node)
- loadbalancer_pool_node = pydot.Node('loadbalancer-pool')
- graph.add_node(loadbalancer_pool_node)
- virtual_DNS_record_node = pydot.Node('virtual-DNS-record')
- graph.add_node(virtual_DNS_record_node)
- route_target_node = pydot.Node('route-target')
- graph.add_node(route_target_node)
- floating_ip_node = pydot.Node('floating-ip')
- graph.add_node(floating_ip_node)
- floating_ip_pool_node = pydot.Node('floating-ip-pool')
- graph.add_node(floating_ip_pool_node)
- physical_router_node = pydot.Node('physical-router')
- graph.add_node(physical_router_node)
- bgp_router_node = pydot.Node('bgp-router')
- graph.add_node(bgp_router_node)
- virtual_router_node = pydot.Node('virtual-router')
- graph.add_node(virtual_router_node)
- config_root_node = pydot.Node('config-root')
- graph.add_node(config_root_node)
- subnet_node = pydot.Node('subnet')
- graph.add_node(subnet_node)
- global_system_config_node = pydot.Node('global-system-config')
- graph.add_node(global_system_config_node)
- service_appliance_node = pydot.Node('service-appliance')
- graph.add_node(service_appliance_node)
- service_instance_node = pydot.Node('service-instance')
- graph.add_node(service_instance_node)
- namespace_node = pydot.Node('namespace')
- graph.add_node(namespace_node)
- logical_interface_node = pydot.Node('logical-interface')
- graph.add_node(logical_interface_node)
- route_table_node = pydot.Node('route-table')
- graph.add_node(route_table_node)
- physical_interface_node = pydot.Node('physical-interface')
- graph.add_node(physical_interface_node)
- access_control_list_node = pydot.Node('access-control-list')
- graph.add_node(access_control_list_node)
- analytics_node_node = pydot.Node('analytics-node')
- graph.add_node(analytics_node_node)
- virtual_DNS_node = pydot.Node('virtual-DNS')
- graph.add_node(virtual_DNS_node)
- customer_attachment_node = pydot.Node('customer-attachment')
- graph.add_node(customer_attachment_node)
- service_appliance_set_node = pydot.Node('service-appliance-set')
- graph.add_node(service_appliance_set_node)
- config_node_node = pydot.Node('config-node')
- graph.add_node(config_node_node)
- qos_queue_node = pydot.Node('qos-queue')
- graph.add_node(qos_queue_node)
- virtual_machine_node = pydot.Node('virtual-machine')
- graph.add_node(virtual_machine_node)
- interface_route_table_node = pydot.Node('interface-route-table')
- graph.add_node(interface_route_table_node)
- service_template_node = pydot.Node('service-template')
- graph.add_node(service_template_node)
- virtual_ip_node = pydot.Node('virtual-ip')
- graph.add_node(virtual_ip_node)
- loadbalancer_member_node = pydot.Node('loadbalancer-member')
- graph.add_node(loadbalancer_member_node)
- security_group_node = pydot.Node('security-group')
- graph.add_node(security_group_node)
- provider_attachment_node = pydot.Node('provider-attachment')
- graph.add_node(provider_attachment_node)
- virtual_machine_interface_node = pydot.Node('virtual-machine-interface')
- graph.add_node(virtual_machine_interface_node)
- loadbalancer_healthmonitor_node = pydot.Node('loadbalancer-healthmonitor')
- graph.add_node(loadbalancer_healthmonitor_node)
- virtual_network_node = pydot.Node('virtual-network')
- graph.add_node(virtual_network_node)
- project_node = pydot.Node('project')
- graph.add_node(project_node)
- qos_forwarding_class_node = pydot.Node('qos-forwarding-class')
- graph.add_node(qos_forwarding_class_node)
- database_node_node = pydot.Node('database-node')
- graph.add_node(database_node_node)
- routing_instance_node = pydot.Node('routing-instance')
- graph.add_node(routing_instance_node)
- network_ipam_node = pydot.Node('network-ipam')
- graph.add_node(network_ipam_node)
- logical_router_node = pydot.Node('logical-router')
- graph.add_node(logical_router_node)
-
- # Generate edge definitions and add to graph
- graph.add_edge(pydot.Edge(domain_node, project_node, color = 'red'))
- graph.add_edge(pydot.Edge(domain_node, namespace_node, color = 'red'))
- graph.add_edge(pydot.Edge(domain_node, service_template_node, color = 'red'))
- graph.add_edge(pydot.Edge(domain_node, virtual_DNS_node, color = 'red'))
- graph.add_edge(pydot.Edge(instance_ip_node, virtual_network_node, color = 'blue', constraint = False))
- graph.add_edge(pydot.Edge(instance_ip_node, virtual_machine_interface_node, color = 'blue', constraint = False))
- graph.add_edge(pydot.Edge(loadbalancer_pool_node, service_instance_node, color = 'blue', constraint = False))
- graph.add_edge(pydot.Edge(loadbalancer_pool_node, virtual_machine_interface_node, color = 'blue', constraint = False))
- graph.add_edge(pydot.Edge(loadbalancer_pool_node, service_appliance_set_node, color = 'blue', constraint = False))
- graph.add_edge(pydot.Edge(loadbalancer_pool_node, loadbalancer_member_node, color = 'red'))
- graph.add_edge(pydot.Edge(loadbalancer_pool_node, loadbalancer_healthmonitor_node, color = 'blue', constraint = False))
- graph.add_edge(pydot.Edge(floating_ip_node, project_node, color = 'blue', constraint = False))
- graph.add_edge(pydot.Edge(floating_ip_node, virtual_machine_interface_node, color = 'blue', constraint = False))
- graph.add_edge(pydot.Edge(floating_ip_pool_node, floating_ip_node, color = 'red'))
- graph.add_edge(pydot.Edge(physical_router_node, virtual_router_node, color = 'blue', constraint = False))
- graph.add_edge(pydot.Edge(physical_router_node, bgp_router_node, color = 'blue', constraint = False))
- graph.add_edge(pydot.Edge(physical_router_node, virtual_network_node, color = 'blue', constraint = False))
- graph.add_edge(pydot.Edge(physical_router_node, physical_interface_node, color = 'red'))
- graph.add_edge(pydot.Edge(physical_router_node, logical_interface_node, color = 'red'))
- graph.add_edge(pydot.Edge(bgp_router_node, bgp_router_node, color = 'blue', constraint = False))
- graph.add_edge(pydot.Edge(virtual_router_node, bgp_router_node, color = 'blue', constraint = False))
- graph.add_edge(pydot.Edge(virtual_router_node, virtual_machine_node, color = 'blue', constraint = False))
- graph.add_edge(pydot.Edge(config_root_node, global_system_config_node, color = 'red'))
- graph.add_edge(pydot.Edge(config_root_node, domain_node, color = 'red'))
- graph.add_edge(pydot.Edge(subnet_node, virtual_machine_interface_node, color = 'blue', constraint = False))
- graph.add_edge(pydot.Edge(global_system_config_node, bgp_router_node, color = 'blue', constraint = False))
- graph.add_edge(pydot.Edge(global_system_config_node, global_vrouter_config_node, color = 'red'))
- graph.add_edge(pydot.Edge(global_system_config_node, physical_router_node, color = 'red'))
- graph.add_edge(pydot.Edge(global_system_config_node, virtual_router_node, color = 'red'))
- graph.add_edge(pydot.Edge(global_system_config_node, config_node_node, color = 'red'))
- graph.add_edge(pydot.Edge(global_system_config_node, analytics_node_node, color = 'red'))
- graph.add_edge(pydot.Edge(global_system_config_node, database_node_node, color = 'red'))
- graph.add_edge(pydot.Edge(global_system_config_node, service_appliance_set_node, color = 'red'))
- graph.add_edge(pydot.Edge(service_instance_node, service_template_node, color = 'blue', constraint = False))
- graph.add_edge(pydot.Edge(logical_interface_node, virtual_machine_interface_node, color = 'blue', constraint = False))
- graph.add_edge(pydot.Edge(physical_interface_node, logical_interface_node, color = 'red'))
- graph.add_edge(pydot.Edge(virtual_DNS_node, virtual_DNS_record_node, color = 'red'))
- graph.add_edge(pydot.Edge(customer_attachment_node, virtual_machine_interface_node, color = 'blue', constraint = False))
- graph.add_edge(pydot.Edge(customer_attachment_node, floating_ip_node, color = 'blue', constraint = False))
- graph.add_edge(pydot.Edge(service_appliance_set_node, service_appliance_node, color = 'red'))
- graph.add_edge(pydot.Edge(virtual_machine_node, virtual_machine_interface_node, color = 'red'))
- graph.add_edge(pydot.Edge(virtual_machine_node, service_instance_node, color = 'blue', constraint = False))
- graph.add_edge(pydot.Edge(virtual_ip_node, loadbalancer_pool_node, color = 'blue', constraint = False))
- graph.add_edge(pydot.Edge(virtual_ip_node, virtual_machine_interface_node, color = 'blue', constraint = False))
- graph.add_edge(pydot.Edge(security_group_node, access_control_list_node, color = 'red'))
- graph.add_edge(pydot.Edge(provider_attachment_node, virtual_router_node, color = 'blue', constraint = False))
- graph.add_edge(pydot.Edge(virtual_machine_interface_node, qos_forwarding_class_node, color = 'blue', constraint = False))
- graph.add_edge(pydot.Edge(virtual_machine_interface_node, security_group_node, color = 'blue', constraint = False))
- graph.add_edge(pydot.Edge(virtual_machine_interface_node, virtual_machine_interface_node, color = 'blue', constraint = False))
- graph.add_edge(pydot.Edge(virtual_machine_interface_node, virtual_machine_node, color = 'blue', constraint = False))
- graph.add_edge(pydot.Edge(virtual_machine_interface_node, virtual_network_node, color = 'blue', constraint = False))
- graph.add_edge(pydot.Edge(virtual_machine_interface_node, routing_instance_node, color = 'blue', constraint = False))
- graph.add_edge(pydot.Edge(virtual_machine_interface_node, interface_route_table_node, color = 'blue', constraint = False))
- graph.add_edge(pydot.Edge(virtual_network_node, qos_forwarding_class_node, color = 'blue', constraint = False))
- graph.add_edge(pydot.Edge(virtual_network_node, network_ipam_node, color = 'blue', constraint = False))
- graph.add_edge(pydot.Edge(virtual_network_node, network_policy_node, color = 'blue', constraint = False))
- graph.add_edge(pydot.Edge(virtual_network_node, access_control_list_node, color = 'red'))
- graph.add_edge(pydot.Edge(virtual_network_node, floating_ip_pool_node, color = 'red'))
- graph.add_edge(pydot.Edge(virtual_network_node, routing_instance_node, color = 'red'))
- graph.add_edge(pydot.Edge(virtual_network_node, route_table_node, color = 'blue', constraint = False))
- graph.add_edge(pydot.Edge(project_node, namespace_node, color = 'blue', constraint = False))
- graph.add_edge(pydot.Edge(project_node, security_group_node, color = 'red'))
- graph.add_edge(pydot.Edge(project_node, virtual_network_node, color = 'red'))
- graph.add_edge(pydot.Edge(project_node, qos_queue_node, color = 'red'))
- graph.add_edge(pydot.Edge(project_node, qos_forwarding_class_node, color = 'red'))
- graph.add_edge(pydot.Edge(project_node, network_ipam_node, color = 'red'))
- graph.add_edge(pydot.Edge(project_node, network_policy_node, color = 'red'))
- graph.add_edge(pydot.Edge(project_node, virtual_machine_interface_node, color = 'red'))
- graph.add_edge(pydot.Edge(project_node, floating_ip_pool_node, color = 'blue', constraint = False))
- graph.add_edge(pydot.Edge(project_node, service_instance_node, color = 'red'))
- graph.add_edge(pydot.Edge(project_node, route_table_node, color = 'red'))
- graph.add_edge(pydot.Edge(project_node, interface_route_table_node, color = 'red'))
- graph.add_edge(pydot.Edge(project_node, logical_router_node, color = 'red'))
- graph.add_edge(pydot.Edge(project_node, loadbalancer_pool_node, color = 'red'))
- graph.add_edge(pydot.Edge(project_node, loadbalancer_healthmonitor_node, color = 'red'))
- graph.add_edge(pydot.Edge(project_node, virtual_ip_node, color = 'red'))
- graph.add_edge(pydot.Edge(qos_forwarding_class_node, qos_queue_node, color = 'blue', constraint = False))
- graph.add_edge(pydot.Edge(routing_instance_node, bgp_router_node, color = 'red'))
- graph.add_edge(pydot.Edge(routing_instance_node, routing_instance_node, color = 'blue', constraint = False))
- graph.add_edge(pydot.Edge(routing_instance_node, route_target_node, color = 'blue', constraint = False))
- graph.add_edge(pydot.Edge(network_ipam_node, virtual_DNS_node, color = 'blue', constraint = False))
- graph.add_edge(pydot.Edge(logical_router_node, virtual_machine_interface_node, color = 'blue', constraint = False))
- graph.add_edge(pydot.Edge(logical_router_node, route_target_node, color = 'blue', constraint = False))
- graph.add_edge(pydot.Edge(logical_router_node, virtual_network_node, color = 'blue', constraint = False))
- graph.add_edge(pydot.Edge(logical_router_node, service_instance_node, color = 'blue', constraint = False))
-
- return graph
-#end generate_schema_graph
-
-def write_schema_graph(graph, filename):
- graph.write_xdot(filename)
-#end write_schema_graph
-
diff --git a/Testcases/vnc_api/gen/vnc_api_schema.pyc b/Testcases/vnc_api/gen/vnc_api_schema.pyc
deleted file mode 100644
index ce0c611..0000000
--- a/Testcases/vnc_api/gen/vnc_api_schema.pyc
+++ /dev/null
Binary files differ
diff --git a/Testcases/vnc_api/gen/vnc_api_server_gen.py b/Testcases/vnc_api/gen/vnc_api_server_gen.py
deleted file mode 100644
index ac75e72..0000000
--- a/Testcases/vnc_api/gen/vnc_api_server_gen.py
+++ /dev/null
@@ -1,26142 +0,0 @@
-
-# AUTO-GENERATED file from IFMapApiGenerator. Do Not Edit!
-
-from bottle import abort, request, response
-
-import gevent
-import json
-import uuid
-from pprint import pformat
-
-import cfgm_common
-from cfgm_common.rest import LinkObject, hdr_server_tenant
-from cfgm_common.exceptions import *
-from resource_xsd import *
-from resource_common import *
-from resource_server import *
-import cStringIO
-from lxml import etree
-
-all_resource_types = set([
- 'domain',
- 'global-vrouter-config',
- 'instance-ip',
- 'network-policy',
- 'loadbalancer-pool',
- 'virtual-DNS-record',
- 'route-target',
- 'floating-ip',
- 'floating-ip-pool',
- 'physical-router',
- 'bgp-router',
- 'virtual-router',
- 'config-root',
- 'subnet',
- 'global-system-config',
- 'service-appliance',
- 'service-instance',
- 'namespace',
- 'logical-interface',
- 'route-table',
- 'physical-interface',
- 'access-control-list',
- 'analytics-node',
- 'virtual-DNS',
- 'customer-attachment',
- 'service-appliance-set',
- 'config-node',
- 'qos-queue',
- 'virtual-machine',
- 'interface-route-table',
- 'service-template',
- 'virtual-ip',
- 'loadbalancer-member',
- 'security-group',
- 'provider-attachment',
- 'virtual-machine-interface',
- 'loadbalancer-healthmonitor',
- 'virtual-network',
- 'project',
- 'qos-forwarding-class',
- 'database-node',
- 'routing-instance',
- 'network-ipam',
- 'logical-router',
- ])
-class VncApiServerGen(object):
- def __new__(cls, *args, **kwargs):
- obj = super(VncApiServerGen, cls).__new__(cls, *args, **kwargs)
- # leaf resource
- obj.route('/domain/<id>', 'GET', obj.domain_http_get)
- obj.route('/domain/<id>', 'PUT', obj.domain_http_put)
- obj.route('/domain/<id>', 'DELETE', obj.domain_http_delete)
- # collections
- obj.route('/domains', 'POST', obj.domains_http_post)
- obj.route('/domains', 'GET', obj.domains_http_get)
- # leaf resource
- obj.route('/global-vrouter-config/<id>', 'GET', obj.global_vrouter_config_http_get)
- obj.route('/global-vrouter-config/<id>', 'PUT', obj.global_vrouter_config_http_put)
- obj.route('/global-vrouter-config/<id>', 'DELETE', obj.global_vrouter_config_http_delete)
- # collections
- obj.route('/global-vrouter-configs', 'POST', obj.global_vrouter_configs_http_post)
- obj.route('/global-vrouter-configs', 'GET', obj.global_vrouter_configs_http_get)
- # leaf resource
- obj.route('/instance-ip/<id>', 'GET', obj.instance_ip_http_get)
- obj.route('/instance-ip/<id>', 'PUT', obj.instance_ip_http_put)
- obj.route('/instance-ip/<id>', 'DELETE', obj.instance_ip_http_delete)
- # collections
- obj.route('/instance-ips', 'POST', obj.instance_ips_http_post)
- obj.route('/instance-ips', 'GET', obj.instance_ips_http_get)
- # leaf resource
- obj.route('/network-policy/<id>', 'GET', obj.network_policy_http_get)
- obj.route('/network-policy/<id>', 'PUT', obj.network_policy_http_put)
- obj.route('/network-policy/<id>', 'DELETE', obj.network_policy_http_delete)
- # collections
- obj.route('/network-policys', 'POST', obj.network_policys_http_post)
- obj.route('/network-policys', 'GET', obj.network_policys_http_get)
- # leaf resource
- obj.route('/loadbalancer-pool/<id>', 'GET', obj.loadbalancer_pool_http_get)
- obj.route('/loadbalancer-pool/<id>', 'PUT', obj.loadbalancer_pool_http_put)
- obj.route('/loadbalancer-pool/<id>', 'DELETE', obj.loadbalancer_pool_http_delete)
- # collections
- obj.route('/loadbalancer-pools', 'POST', obj.loadbalancer_pools_http_post)
- obj.route('/loadbalancer-pools', 'GET', obj.loadbalancer_pools_http_get)
- # leaf resource
- obj.route('/virtual-DNS-record/<id>', 'GET', obj.virtual_DNS_record_http_get)
- obj.route('/virtual-DNS-record/<id>', 'PUT', obj.virtual_DNS_record_http_put)
- obj.route('/virtual-DNS-record/<id>', 'DELETE', obj.virtual_DNS_record_http_delete)
- # collections
- obj.route('/virtual-DNS-records', 'POST', obj.virtual_DNS_records_http_post)
- obj.route('/virtual-DNS-records', 'GET', obj.virtual_DNS_records_http_get)
- # leaf resource
- obj.route('/route-target/<id>', 'GET', obj.route_target_http_get)
- obj.route('/route-target/<id>', 'PUT', obj.route_target_http_put)
- obj.route('/route-target/<id>', 'DELETE', obj.route_target_http_delete)
- # collections
- obj.route('/route-targets', 'POST', obj.route_targets_http_post)
- obj.route('/route-targets', 'GET', obj.route_targets_http_get)
- # leaf resource
- obj.route('/floating-ip/<id>', 'GET', obj.floating_ip_http_get)
- obj.route('/floating-ip/<id>', 'PUT', obj.floating_ip_http_put)
- obj.route('/floating-ip/<id>', 'DELETE', obj.floating_ip_http_delete)
- # collections
- obj.route('/floating-ips', 'POST', obj.floating_ips_http_post)
- obj.route('/floating-ips', 'GET', obj.floating_ips_http_get)
- # leaf resource
- obj.route('/floating-ip-pool/<id>', 'GET', obj.floating_ip_pool_http_get)
- obj.route('/floating-ip-pool/<id>', 'PUT', obj.floating_ip_pool_http_put)
- obj.route('/floating-ip-pool/<id>', 'DELETE', obj.floating_ip_pool_http_delete)
- # collections
- obj.route('/floating-ip-pools', 'POST', obj.floating_ip_pools_http_post)
- obj.route('/floating-ip-pools', 'GET', obj.floating_ip_pools_http_get)
- # leaf resource
- obj.route('/physical-router/<id>', 'GET', obj.physical_router_http_get)
- obj.route('/physical-router/<id>', 'PUT', obj.physical_router_http_put)
- obj.route('/physical-router/<id>', 'DELETE', obj.physical_router_http_delete)
- # collections
- obj.route('/physical-routers', 'POST', obj.physical_routers_http_post)
- obj.route('/physical-routers', 'GET', obj.physical_routers_http_get)
- # leaf resource
- obj.route('/bgp-router/<id>', 'GET', obj.bgp_router_http_get)
- obj.route('/bgp-router/<id>', 'PUT', obj.bgp_router_http_put)
- obj.route('/bgp-router/<id>', 'DELETE', obj.bgp_router_http_delete)
- # collections
- obj.route('/bgp-routers', 'POST', obj.bgp_routers_http_post)
- obj.route('/bgp-routers', 'GET', obj.bgp_routers_http_get)
- # leaf resource
- obj.route('/virtual-router/<id>', 'GET', obj.virtual_router_http_get)
- obj.route('/virtual-router/<id>', 'PUT', obj.virtual_router_http_put)
- obj.route('/virtual-router/<id>', 'DELETE', obj.virtual_router_http_delete)
- # collections
- obj.route('/virtual-routers', 'POST', obj.virtual_routers_http_post)
- obj.route('/virtual-routers', 'GET', obj.virtual_routers_http_get)
- # leaf resource
- obj.route('/subnet/<id>', 'GET', obj.subnet_http_get)
- obj.route('/subnet/<id>', 'PUT', obj.subnet_http_put)
- obj.route('/subnet/<id>', 'DELETE', obj.subnet_http_delete)
- # collections
- obj.route('/subnets', 'POST', obj.subnets_http_post)
- obj.route('/subnets', 'GET', obj.subnets_http_get)
- # leaf resource
- obj.route('/global-system-config/<id>', 'GET', obj.global_system_config_http_get)
- obj.route('/global-system-config/<id>', 'PUT', obj.global_system_config_http_put)
- obj.route('/global-system-config/<id>', 'DELETE', obj.global_system_config_http_delete)
- # collections
- obj.route('/global-system-configs', 'POST', obj.global_system_configs_http_post)
- obj.route('/global-system-configs', 'GET', obj.global_system_configs_http_get)
- # leaf resource
- obj.route('/service-appliance/<id>', 'GET', obj.service_appliance_http_get)
- obj.route('/service-appliance/<id>', 'PUT', obj.service_appliance_http_put)
- obj.route('/service-appliance/<id>', 'DELETE', obj.service_appliance_http_delete)
- # collections
- obj.route('/service-appliances', 'POST', obj.service_appliances_http_post)
- obj.route('/service-appliances', 'GET', obj.service_appliances_http_get)
- # leaf resource
- obj.route('/service-instance/<id>', 'GET', obj.service_instance_http_get)
- obj.route('/service-instance/<id>', 'PUT', obj.service_instance_http_put)
- obj.route('/service-instance/<id>', 'DELETE', obj.service_instance_http_delete)
- # collections
- obj.route('/service-instances', 'POST', obj.service_instances_http_post)
- obj.route('/service-instances', 'GET', obj.service_instances_http_get)
- # leaf resource
- obj.route('/namespace/<id>', 'GET', obj.namespace_http_get)
- obj.route('/namespace/<id>', 'PUT', obj.namespace_http_put)
- obj.route('/namespace/<id>', 'DELETE', obj.namespace_http_delete)
- # collections
- obj.route('/namespaces', 'POST', obj.namespaces_http_post)
- obj.route('/namespaces', 'GET', obj.namespaces_http_get)
- # leaf resource
- obj.route('/logical-interface/<id>', 'GET', obj.logical_interface_http_get)
- obj.route('/logical-interface/<id>', 'PUT', obj.logical_interface_http_put)
- obj.route('/logical-interface/<id>', 'DELETE', obj.logical_interface_http_delete)
- # collections
- obj.route('/logical-interfaces', 'POST', obj.logical_interfaces_http_post)
- obj.route('/logical-interfaces', 'GET', obj.logical_interfaces_http_get)
- # leaf resource
- obj.route('/route-table/<id>', 'GET', obj.route_table_http_get)
- obj.route('/route-table/<id>', 'PUT', obj.route_table_http_put)
- obj.route('/route-table/<id>', 'DELETE', obj.route_table_http_delete)
- # collections
- obj.route('/route-tables', 'POST', obj.route_tables_http_post)
- obj.route('/route-tables', 'GET', obj.route_tables_http_get)
- # leaf resource
- obj.route('/physical-interface/<id>', 'GET', obj.physical_interface_http_get)
- obj.route('/physical-interface/<id>', 'PUT', obj.physical_interface_http_put)
- obj.route('/physical-interface/<id>', 'DELETE', obj.physical_interface_http_delete)
- # collections
- obj.route('/physical-interfaces', 'POST', obj.physical_interfaces_http_post)
- obj.route('/physical-interfaces', 'GET', obj.physical_interfaces_http_get)
- # leaf resource
- obj.route('/access-control-list/<id>', 'GET', obj.access_control_list_http_get)
- obj.route('/access-control-list/<id>', 'PUT', obj.access_control_list_http_put)
- obj.route('/access-control-list/<id>', 'DELETE', obj.access_control_list_http_delete)
- # collections
- obj.route('/access-control-lists', 'POST', obj.access_control_lists_http_post)
- obj.route('/access-control-lists', 'GET', obj.access_control_lists_http_get)
- # leaf resource
- obj.route('/analytics-node/<id>', 'GET', obj.analytics_node_http_get)
- obj.route('/analytics-node/<id>', 'PUT', obj.analytics_node_http_put)
- obj.route('/analytics-node/<id>', 'DELETE', obj.analytics_node_http_delete)
- # collections
- obj.route('/analytics-nodes', 'POST', obj.analytics_nodes_http_post)
- obj.route('/analytics-nodes', 'GET', obj.analytics_nodes_http_get)
- # leaf resource
- obj.route('/virtual-DNS/<id>', 'GET', obj.virtual_DNS_http_get)
- obj.route('/virtual-DNS/<id>', 'PUT', obj.virtual_DNS_http_put)
- obj.route('/virtual-DNS/<id>', 'DELETE', obj.virtual_DNS_http_delete)
- # collections
- obj.route('/virtual-DNSs', 'POST', obj.virtual_DNSs_http_post)
- obj.route('/virtual-DNSs', 'GET', obj.virtual_DNSs_http_get)
- # leaf resource
- obj.route('/customer-attachment/<id>', 'GET', obj.customer_attachment_http_get)
- obj.route('/customer-attachment/<id>', 'PUT', obj.customer_attachment_http_put)
- obj.route('/customer-attachment/<id>', 'DELETE', obj.customer_attachment_http_delete)
- # collections
- obj.route('/customer-attachments', 'POST', obj.customer_attachments_http_post)
- obj.route('/customer-attachments', 'GET', obj.customer_attachments_http_get)
- # leaf resource
- obj.route('/service-appliance-set/<id>', 'GET', obj.service_appliance_set_http_get)
- obj.route('/service-appliance-set/<id>', 'PUT', obj.service_appliance_set_http_put)
- obj.route('/service-appliance-set/<id>', 'DELETE', obj.service_appliance_set_http_delete)
- # collections
- obj.route('/service-appliance-sets', 'POST', obj.service_appliance_sets_http_post)
- obj.route('/service-appliance-sets', 'GET', obj.service_appliance_sets_http_get)
- # leaf resource
- obj.route('/config-node/<id>', 'GET', obj.config_node_http_get)
- obj.route('/config-node/<id>', 'PUT', obj.config_node_http_put)
- obj.route('/config-node/<id>', 'DELETE', obj.config_node_http_delete)
- # collections
- obj.route('/config-nodes', 'POST', obj.config_nodes_http_post)
- obj.route('/config-nodes', 'GET', obj.config_nodes_http_get)
- # leaf resource
- obj.route('/qos-queue/<id>', 'GET', obj.qos_queue_http_get)
- obj.route('/qos-queue/<id>', 'PUT', obj.qos_queue_http_put)
- obj.route('/qos-queue/<id>', 'DELETE', obj.qos_queue_http_delete)
- # collections
- obj.route('/qos-queues', 'POST', obj.qos_queues_http_post)
- obj.route('/qos-queues', 'GET', obj.qos_queues_http_get)
- # leaf resource
- obj.route('/virtual-machine/<id>', 'GET', obj.virtual_machine_http_get)
- obj.route('/virtual-machine/<id>', 'PUT', obj.virtual_machine_http_put)
- obj.route('/virtual-machine/<id>', 'DELETE', obj.virtual_machine_http_delete)
- # collections
- obj.route('/virtual-machines', 'POST', obj.virtual_machines_http_post)
- obj.route('/virtual-machines', 'GET', obj.virtual_machines_http_get)
- # leaf resource
- obj.route('/interface-route-table/<id>', 'GET', obj.interface_route_table_http_get)
- obj.route('/interface-route-table/<id>', 'PUT', obj.interface_route_table_http_put)
- obj.route('/interface-route-table/<id>', 'DELETE', obj.interface_route_table_http_delete)
- # collections
- obj.route('/interface-route-tables', 'POST', obj.interface_route_tables_http_post)
- obj.route('/interface-route-tables', 'GET', obj.interface_route_tables_http_get)
- # leaf resource
- obj.route('/service-template/<id>', 'GET', obj.service_template_http_get)
- obj.route('/service-template/<id>', 'PUT', obj.service_template_http_put)
- obj.route('/service-template/<id>', 'DELETE', obj.service_template_http_delete)
- # collections
- obj.route('/service-templates', 'POST', obj.service_templates_http_post)
- obj.route('/service-templates', 'GET', obj.service_templates_http_get)
- # leaf resource
- obj.route('/virtual-ip/<id>', 'GET', obj.virtual_ip_http_get)
- obj.route('/virtual-ip/<id>', 'PUT', obj.virtual_ip_http_put)
- obj.route('/virtual-ip/<id>', 'DELETE', obj.virtual_ip_http_delete)
- # collections
- obj.route('/virtual-ips', 'POST', obj.virtual_ips_http_post)
- obj.route('/virtual-ips', 'GET', obj.virtual_ips_http_get)
- # leaf resource
- obj.route('/loadbalancer-member/<id>', 'GET', obj.loadbalancer_member_http_get)
- obj.route('/loadbalancer-member/<id>', 'PUT', obj.loadbalancer_member_http_put)
- obj.route('/loadbalancer-member/<id>', 'DELETE', obj.loadbalancer_member_http_delete)
- # collections
- obj.route('/loadbalancer-members', 'POST', obj.loadbalancer_members_http_post)
- obj.route('/loadbalancer-members', 'GET', obj.loadbalancer_members_http_get)
- # leaf resource
- obj.route('/security-group/<id>', 'GET', obj.security_group_http_get)
- obj.route('/security-group/<id>', 'PUT', obj.security_group_http_put)
- obj.route('/security-group/<id>', 'DELETE', obj.security_group_http_delete)
- # collections
- obj.route('/security-groups', 'POST', obj.security_groups_http_post)
- obj.route('/security-groups', 'GET', obj.security_groups_http_get)
- # leaf resource
- obj.route('/provider-attachment/<id>', 'GET', obj.provider_attachment_http_get)
- obj.route('/provider-attachment/<id>', 'PUT', obj.provider_attachment_http_put)
- obj.route('/provider-attachment/<id>', 'DELETE', obj.provider_attachment_http_delete)
- # collections
- obj.route('/provider-attachments', 'POST', obj.provider_attachments_http_post)
- obj.route('/provider-attachments', 'GET', obj.provider_attachments_http_get)
- # leaf resource
- obj.route('/virtual-machine-interface/<id>', 'GET', obj.virtual_machine_interface_http_get)
- obj.route('/virtual-machine-interface/<id>', 'PUT', obj.virtual_machine_interface_http_put)
- obj.route('/virtual-machine-interface/<id>', 'DELETE', obj.virtual_machine_interface_http_delete)
- # collections
- obj.route('/virtual-machine-interfaces', 'POST', obj.virtual_machine_interfaces_http_post)
- obj.route('/virtual-machine-interfaces', 'GET', obj.virtual_machine_interfaces_http_get)
- # leaf resource
- obj.route('/loadbalancer-healthmonitor/<id>', 'GET', obj.loadbalancer_healthmonitor_http_get)
- obj.route('/loadbalancer-healthmonitor/<id>', 'PUT', obj.loadbalancer_healthmonitor_http_put)
- obj.route('/loadbalancer-healthmonitor/<id>', 'DELETE', obj.loadbalancer_healthmonitor_http_delete)
- # collections
- obj.route('/loadbalancer-healthmonitors', 'POST', obj.loadbalancer_healthmonitors_http_post)
- obj.route('/loadbalancer-healthmonitors', 'GET', obj.loadbalancer_healthmonitors_http_get)
- # leaf resource
- obj.route('/virtual-network/<id>', 'GET', obj.virtual_network_http_get)
- obj.route('/virtual-network/<id>', 'PUT', obj.virtual_network_http_put)
- obj.route('/virtual-network/<id>', 'DELETE', obj.virtual_network_http_delete)
- # collections
- obj.route('/virtual-networks', 'POST', obj.virtual_networks_http_post)
- obj.route('/virtual-networks', 'GET', obj.virtual_networks_http_get)
- # leaf resource
- obj.route('/project/<id>', 'GET', obj.project_http_get)
- obj.route('/project/<id>', 'PUT', obj.project_http_put)
- obj.route('/project/<id>', 'DELETE', obj.project_http_delete)
- # collections
- obj.route('/projects', 'POST', obj.projects_http_post)
- obj.route('/projects', 'GET', obj.projects_http_get)
- # leaf resource
- obj.route('/qos-forwarding-class/<id>', 'GET', obj.qos_forwarding_class_http_get)
- obj.route('/qos-forwarding-class/<id>', 'PUT', obj.qos_forwarding_class_http_put)
- obj.route('/qos-forwarding-class/<id>', 'DELETE', obj.qos_forwarding_class_http_delete)
- # collections
- obj.route('/qos-forwarding-classs', 'POST', obj.qos_forwarding_classs_http_post)
- obj.route('/qos-forwarding-classs', 'GET', obj.qos_forwarding_classs_http_get)
- # leaf resource
- obj.route('/database-node/<id>', 'GET', obj.database_node_http_get)
- obj.route('/database-node/<id>', 'PUT', obj.database_node_http_put)
- obj.route('/database-node/<id>', 'DELETE', obj.database_node_http_delete)
- # collections
- obj.route('/database-nodes', 'POST', obj.database_nodes_http_post)
- obj.route('/database-nodes', 'GET', obj.database_nodes_http_get)
- # leaf resource
- obj.route('/routing-instance/<id>', 'GET', obj.routing_instance_http_get)
- obj.route('/routing-instance/<id>', 'PUT', obj.routing_instance_http_put)
- obj.route('/routing-instance/<id>', 'DELETE', obj.routing_instance_http_delete)
- # collections
- obj.route('/routing-instances', 'POST', obj.routing_instances_http_post)
- obj.route('/routing-instances', 'GET', obj.routing_instances_http_get)
- # leaf resource
- obj.route('/network-ipam/<id>', 'GET', obj.network_ipam_http_get)
- obj.route('/network-ipam/<id>', 'PUT', obj.network_ipam_http_put)
- obj.route('/network-ipam/<id>', 'DELETE', obj.network_ipam_http_delete)
- # collections
- obj.route('/network-ipams', 'POST', obj.network_ipams_http_post)
- obj.route('/network-ipams', 'GET', obj.network_ipams_http_get)
- # leaf resource
- obj.route('/logical-router/<id>', 'GET', obj.logical_router_http_get)
- obj.route('/logical-router/<id>', 'PUT', obj.logical_router_http_put)
- obj.route('/logical-router/<id>', 'DELETE', obj.logical_router_http_delete)
- # collections
- obj.route('/logical-routers', 'POST', obj.logical_routers_http_post)
- obj.route('/logical-routers', 'GET', obj.logical_routers_http_get)
- return obj
- #end __new__
-
- def __init__(self):
- self._db_conn = None
- self._get_common = None
- self._post_common = None
-
- self._resource_classes = {}
- self._resource_classes['domain'] = DomainServerGen
-
- self._resource_classes['global-vrouter-config'] = GlobalVrouterConfigServerGen
-
- self._resource_classes['instance-ip'] = InstanceIpServerGen
-
- self._resource_classes['network-policy'] = NetworkPolicyServerGen
-
- self._resource_classes['loadbalancer-pool'] = LoadbalancerPoolServerGen
-
- self._resource_classes['virtual-DNS-record'] = VirtualDnsRecordServerGen
-
- self._resource_classes['route-target'] = RouteTargetServerGen
-
- self._resource_classes['floating-ip'] = FloatingIpServerGen
-
- self._resource_classes['floating-ip-pool'] = FloatingIpPoolServerGen
-
- self._resource_classes['physical-router'] = PhysicalRouterServerGen
-
- self._resource_classes['bgp-router'] = BgpRouterServerGen
-
- self._resource_classes['virtual-router'] = VirtualRouterServerGen
-
- self._resource_classes['config-root'] = ConfigRootServerGen
-
- self._resource_classes['subnet'] = SubnetServerGen
-
- self._resource_classes['global-system-config'] = GlobalSystemConfigServerGen
-
- self._resource_classes['service-appliance'] = ServiceApplianceServerGen
-
- self._resource_classes['service-instance'] = ServiceInstanceServerGen
-
- self._resource_classes['namespace'] = NamespaceServerGen
-
- self._resource_classes['logical-interface'] = LogicalInterfaceServerGen
-
- self._resource_classes['route-table'] = RouteTableServerGen
-
- self._resource_classes['physical-interface'] = PhysicalInterfaceServerGen
-
- self._resource_classes['access-control-list'] = AccessControlListServerGen
-
- self._resource_classes['analytics-node'] = AnalyticsNodeServerGen
-
- self._resource_classes['virtual-DNS'] = VirtualDnsServerGen
-
- self._resource_classes['customer-attachment'] = CustomerAttachmentServerGen
-
- self._resource_classes['service-appliance-set'] = ServiceApplianceSetServerGen
-
- self._resource_classes['config-node'] = ConfigNodeServerGen
-
- self._resource_classes['qos-queue'] = QosQueueServerGen
-
- self._resource_classes['virtual-machine'] = VirtualMachineServerGen
-
- self._resource_classes['interface-route-table'] = InterfaceRouteTableServerGen
-
- self._resource_classes['service-template'] = ServiceTemplateServerGen
-
- self._resource_classes['virtual-ip'] = VirtualIpServerGen
-
- self._resource_classes['loadbalancer-member'] = LoadbalancerMemberServerGen
-
- self._resource_classes['security-group'] = SecurityGroupServerGen
-
- self._resource_classes['provider-attachment'] = ProviderAttachmentServerGen
-
- self._resource_classes['virtual-machine-interface'] = VirtualMachineInterfaceServerGen
-
- self._resource_classes['loadbalancer-healthmonitor'] = LoadbalancerHealthmonitorServerGen
-
- self._resource_classes['virtual-network'] = VirtualNetworkServerGen
-
- self._resource_classes['project'] = ProjectServerGen
-
- self._resource_classes['qos-forwarding-class'] = QosForwardingClassServerGen
-
- self._resource_classes['database-node'] = DatabaseNodeServerGen
-
- self._resource_classes['routing-instance'] = RoutingInstanceServerGen
-
- self._resource_classes['network-ipam'] = NetworkIpamServerGen
-
- self._resource_classes['logical-router'] = LogicalRouterServerGen
-
-
- # Generate LinkObjects for all entities
- links = []
- # Link for root
- links.append(LinkObject('root', self._base_url , '/config-root',
- 'config-root'))
-
- # Link for collections
- link = LinkObject('collection',
- self._base_url , '/domains',
- 'domain')
- links.append(link)
-
- link = LinkObject('collection',
- self._base_url , '/global-vrouter-configs',
- 'global-vrouter-config')
- links.append(link)
-
- link = LinkObject('collection',
- self._base_url , '/instance-ips',
- 'instance-ip')
- links.append(link)
-
- link = LinkObject('collection',
- self._base_url , '/network-policys',
- 'network-policy')
- links.append(link)
-
- link = LinkObject('collection',
- self._base_url , '/loadbalancer-pools',
- 'loadbalancer-pool')
- links.append(link)
-
- link = LinkObject('collection',
- self._base_url , '/virtual-DNS-records',
- 'virtual-DNS-record')
- links.append(link)
-
- link = LinkObject('collection',
- self._base_url , '/route-targets',
- 'route-target')
- links.append(link)
-
- link = LinkObject('collection',
- self._base_url , '/floating-ips',
- 'floating-ip')
- links.append(link)
-
- link = LinkObject('collection',
- self._base_url , '/floating-ip-pools',
- 'floating-ip-pool')
- links.append(link)
-
- link = LinkObject('collection',
- self._base_url , '/physical-routers',
- 'physical-router')
- links.append(link)
-
- link = LinkObject('collection',
- self._base_url , '/bgp-routers',
- 'bgp-router')
- links.append(link)
-
- link = LinkObject('collection',
- self._base_url , '/virtual-routers',
- 'virtual-router')
- links.append(link)
-
- link = LinkObject('collection',
- self._base_url , '/subnets',
- 'subnet')
- links.append(link)
-
- link = LinkObject('collection',
- self._base_url , '/global-system-configs',
- 'global-system-config')
- links.append(link)
-
- link = LinkObject('collection',
- self._base_url , '/service-appliances',
- 'service-appliance')
- links.append(link)
-
- link = LinkObject('collection',
- self._base_url , '/service-instances',
- 'service-instance')
- links.append(link)
-
- link = LinkObject('collection',
- self._base_url , '/namespaces',
- 'namespace')
- links.append(link)
-
- link = LinkObject('collection',
- self._base_url , '/logical-interfaces',
- 'logical-interface')
- links.append(link)
-
- link = LinkObject('collection',
- self._base_url , '/route-tables',
- 'route-table')
- links.append(link)
-
- link = LinkObject('collection',
- self._base_url , '/physical-interfaces',
- 'physical-interface')
- links.append(link)
-
- link = LinkObject('collection',
- self._base_url , '/access-control-lists',
- 'access-control-list')
- links.append(link)
-
- link = LinkObject('collection',
- self._base_url , '/analytics-nodes',
- 'analytics-node')
- links.append(link)
-
- link = LinkObject('collection',
- self._base_url , '/virtual-DNSs',
- 'virtual-DNS')
- links.append(link)
-
- link = LinkObject('collection',
- self._base_url , '/customer-attachments',
- 'customer-attachment')
- links.append(link)
-
- link = LinkObject('collection',
- self._base_url , '/service-appliance-sets',
- 'service-appliance-set')
- links.append(link)
-
- link = LinkObject('collection',
- self._base_url , '/config-nodes',
- 'config-node')
- links.append(link)
-
- link = LinkObject('collection',
- self._base_url , '/qos-queues',
- 'qos-queue')
- links.append(link)
-
- link = LinkObject('collection',
- self._base_url , '/virtual-machines',
- 'virtual-machine')
- links.append(link)
-
- link = LinkObject('collection',
- self._base_url , '/interface-route-tables',
- 'interface-route-table')
- links.append(link)
-
- link = LinkObject('collection',
- self._base_url , '/service-templates',
- 'service-template')
- links.append(link)
-
- link = LinkObject('collection',
- self._base_url , '/virtual-ips',
- 'virtual-ip')
- links.append(link)
-
- link = LinkObject('collection',
- self._base_url , '/loadbalancer-members',
- 'loadbalancer-member')
- links.append(link)
-
- link = LinkObject('collection',
- self._base_url , '/security-groups',
- 'security-group')
- links.append(link)
-
- link = LinkObject('collection',
- self._base_url , '/provider-attachments',
- 'provider-attachment')
- links.append(link)
-
- link = LinkObject('collection',
- self._base_url , '/virtual-machine-interfaces',
- 'virtual-machine-interface')
- links.append(link)
-
- link = LinkObject('collection',
- self._base_url , '/loadbalancer-healthmonitors',
- 'loadbalancer-healthmonitor')
- links.append(link)
-
- link = LinkObject('collection',
- self._base_url , '/virtual-networks',
- 'virtual-network')
- links.append(link)
-
- link = LinkObject('collection',
- self._base_url , '/projects',
- 'project')
- links.append(link)
-
- link = LinkObject('collection',
- self._base_url , '/qos-forwarding-classs',
- 'qos-forwarding-class')
- links.append(link)
-
- link = LinkObject('collection',
- self._base_url , '/database-nodes',
- 'database-node')
- links.append(link)
-
- link = LinkObject('collection',
- self._base_url , '/routing-instances',
- 'routing-instance')
- links.append(link)
-
- link = LinkObject('collection',
- self._base_url , '/network-ipams',
- 'network-ipam')
- links.append(link)
-
- link = LinkObject('collection',
- self._base_url , '/logical-routers',
- 'logical-router')
- links.append(link)
-
- # Link for Resources Base
- link = LinkObject('resource-base',
- self._base_url , '/domain',
- 'domain')
- links.append(link)
- link = LinkObject('resource-base',
- self._base_url , '/global-vrouter-config',
- 'global-vrouter-config')
- links.append(link)
- link = LinkObject('resource-base',
- self._base_url , '/instance-ip',
- 'instance-ip')
- links.append(link)
- link = LinkObject('resource-base',
- self._base_url , '/network-policy',
- 'network-policy')
- links.append(link)
- link = LinkObject('resource-base',
- self._base_url , '/loadbalancer-pool',
- 'loadbalancer-pool')
- links.append(link)
- link = LinkObject('resource-base',
- self._base_url , '/virtual-DNS-record',
- 'virtual-DNS-record')
- links.append(link)
- link = LinkObject('resource-base',
- self._base_url , '/route-target',
- 'route-target')
- links.append(link)
- link = LinkObject('resource-base',
- self._base_url , '/floating-ip',
- 'floating-ip')
- links.append(link)
- link = LinkObject('resource-base',
- self._base_url , '/floating-ip-pool',
- 'floating-ip-pool')
- links.append(link)
- link = LinkObject('resource-base',
- self._base_url , '/physical-router',
- 'physical-router')
- links.append(link)
- link = LinkObject('resource-base',
- self._base_url , '/bgp-router',
- 'bgp-router')
- links.append(link)
- link = LinkObject('resource-base',
- self._base_url , '/virtual-router',
- 'virtual-router')
- links.append(link)
- link = LinkObject('resource-base',
- self._base_url , '/config-root',
- 'config-root')
- links.append(link)
- link = LinkObject('resource-base',
- self._base_url , '/subnet',
- 'subnet')
- links.append(link)
- link = LinkObject('resource-base',
- self._base_url , '/global-system-config',
- 'global-system-config')
- links.append(link)
- link = LinkObject('resource-base',
- self._base_url , '/service-appliance',
- 'service-appliance')
- links.append(link)
- link = LinkObject('resource-base',
- self._base_url , '/service-instance',
- 'service-instance')
- links.append(link)
- link = LinkObject('resource-base',
- self._base_url , '/namespace',
- 'namespace')
- links.append(link)
- link = LinkObject('resource-base',
- self._base_url , '/logical-interface',
- 'logical-interface')
- links.append(link)
- link = LinkObject('resource-base',
- self._base_url , '/route-table',
- 'route-table')
- links.append(link)
- link = LinkObject('resource-base',
- self._base_url , '/physical-interface',
- 'physical-interface')
- links.append(link)
- link = LinkObject('resource-base',
- self._base_url , '/access-control-list',
- 'access-control-list')
- links.append(link)
- link = LinkObject('resource-base',
- self._base_url , '/analytics-node',
- 'analytics-node')
- links.append(link)
- link = LinkObject('resource-base',
- self._base_url , '/virtual-DNS',
- 'virtual-DNS')
- links.append(link)
- link = LinkObject('resource-base',
- self._base_url , '/customer-attachment',
- 'customer-attachment')
- links.append(link)
- link = LinkObject('resource-base',
- self._base_url , '/service-appliance-set',
- 'service-appliance-set')
- links.append(link)
- link = LinkObject('resource-base',
- self._base_url , '/config-node',
- 'config-node')
- links.append(link)
- link = LinkObject('resource-base',
- self._base_url , '/qos-queue',
- 'qos-queue')
- links.append(link)
- link = LinkObject('resource-base',
- self._base_url , '/virtual-machine',
- 'virtual-machine')
- links.append(link)
- link = LinkObject('resource-base',
- self._base_url , '/interface-route-table',
- 'interface-route-table')
- links.append(link)
- link = LinkObject('resource-base',
- self._base_url , '/service-template',
- 'service-template')
- links.append(link)
- link = LinkObject('resource-base',
- self._base_url , '/virtual-ip',
- 'virtual-ip')
- links.append(link)
- link = LinkObject('resource-base',
- self._base_url , '/loadbalancer-member',
- 'loadbalancer-member')
- links.append(link)
- link = LinkObject('resource-base',
- self._base_url , '/security-group',
- 'security-group')
- links.append(link)
- link = LinkObject('resource-base',
- self._base_url , '/provider-attachment',
- 'provider-attachment')
- links.append(link)
- link = LinkObject('resource-base',
- self._base_url , '/virtual-machine-interface',
- 'virtual-machine-interface')
- links.append(link)
- link = LinkObject('resource-base',
- self._base_url , '/loadbalancer-healthmonitor',
- 'loadbalancer-healthmonitor')
- links.append(link)
- link = LinkObject('resource-base',
- self._base_url , '/virtual-network',
- 'virtual-network')
- links.append(link)
- link = LinkObject('resource-base',
- self._base_url , '/project',
- 'project')
- links.append(link)
- link = LinkObject('resource-base',
- self._base_url , '/qos-forwarding-class',
- 'qos-forwarding-class')
- links.append(link)
- link = LinkObject('resource-base',
- self._base_url , '/database-node',
- 'database-node')
- links.append(link)
- link = LinkObject('resource-base',
- self._base_url , '/routing-instance',
- 'routing-instance')
- links.append(link)
- link = LinkObject('resource-base',
- self._base_url , '/network-ipam',
- 'network-ipam')
- links.append(link)
- link = LinkObject('resource-base',
- self._base_url , '/logical-router',
- 'logical-router')
- links.append(link)
- self._homepage_links = links
- #end __init__
-
- def is_admin_request(self):
- env = request.headers.environ
- for field in ('HTTP_X_API_ROLE', 'HTTP_X_ROLE'):
- if field in env:
- roles = env[field].split(',')
- return 'admin' in [x.lower() for x in roles]
- return False
-
- def domain_http_get(self, id):
- try:
- self._extension_mgrs['resourceApi'].map_method('pre_domain_read', id)
- except Exception as e:
- pass
-
- # TODO get vals from request out of the global ASAP
- etag = request.headers.get('If-None-Match')
- try:
- obj_type = self._db_conn.uuid_to_obj_type(id)
- except NoIdError:
- obj_type = None
- if obj_type != 'domain':
- abort(404, 'No domain object found for id %s' %(id))
- # common handling for all resource get
- (ok, result) = self._get_common(request, id)
- if not ok:
- (code, msg) = result
- self.config_object_error(id, None, 'domain', 'http_get', msg)
- abort(code, msg)
-
- # type-specific hook
- r_class = self.get_resource_class('domain')
- if r_class:
- r_class.http_get(id)
-
- db_conn = self._db_conn
- if etag:
- obj_ids = {'uuid': id}
- (ok, result) = db_conn.dbe_is_latest(obj_ids, etag.replace('"', ''))
- if not ok:
- # Not present in DB
- self.config_object_error(id, None, 'domain', 'http_get', result)
- abort(404, result)
-
- is_latest = result
- if is_latest:
- # send Not-Modified, caches use this for read optimization
- response.status = 304
- return
- #end if etag
-
- obj_ids = {'uuid': id}
-
- # Generate field list for db layer
- properties = [u'domain_limits', u'api_access_list', u'id_perms', u'display_name']
- references = []
- back_references = [u'config_root_back_refs']
- children = [u'projects', u'namespaces', 'service_templates', u'virtual_DNSs']
- if 'fields' in request.query:
- obj_fields = request.query.fields.split(',')
- else: # default props + children + refs + backrefs
- obj_fields = properties + references
- if 'exclude_back_refs' not in request.query:
- obj_fields = obj_fields + back_references
- if 'exclude_children' not in request.query:
- obj_fields = obj_fields + children
-
- (ok, result) = db_conn.dbe_read('domain', obj_ids, obj_fields)
- if not ok:
- # Not present in DB
- self.config_object_error(id, None, 'domain', 'http_get', result)
- abort(404, result)
-
- # check visibility
- if (not result['id_perms'].get('user_visible', True) and
- not self.is_admin_request()):
- result = 'This object is not visible by users: %s' % id
- self.config_object_error(id, None, 'domain', 'http_get', result)
- abort(404, result)
-
- rsp_body = {}
- rsp_body['uuid'] = id
- rsp_body['href'] = self.generate_url('domain', id)
- rsp_body['name'] = result['fq_name'][-1]
- rsp_body.update(result)
- id_perms = result['id_perms']
- response.set_header('ETag', '"' + id_perms['last_modified'] + '"')
- try:
- self._extension_mgrs['resourceApi'].map_method('post_domain_read', id, rsp_body)
- except Exception as e:
- pass
-
- return {'domain': rsp_body}
- #end domain_http_get
-
- def domain_http_put(self, id):
- key = 'domain'
- obj_dict = request.json[key]
-
- try:
- self._extension_mgrs['resourceApi'].map_method('pre_domain_update', id, obj_dict)
- except Exception as e:
- pass
-
- db_conn = self._db_conn
- try:
- obj_type = db_conn.uuid_to_obj_type(id)
- if obj_type != 'domain':
- abort(404, 'No domain object found for id %s' %(id))
- fq_name = db_conn.uuid_to_fq_name(id)
- except NoIdError as e:
- abort(404, str(e))
- prop_dict = obj_dict.get('domain_limits')
- if prop_dict:
- buf = cStringIO.StringIO()
- xx_domain_limits = DomainLimitsType(**prop_dict)
- xx_domain_limits.export(buf)
- node = etree.fromstring(buf.getvalue())
- xx_domain_limits = DomainLimitsType()
- try:
- xx_domain_limits.build(node)
- except Exception as e:
- abort(400, str(e))
- prop_dict = obj_dict.get('api_access_list')
- if prop_dict:
- buf = cStringIO.StringIO()
- xx_api_access_list = ApiAccessListType(**prop_dict)
- xx_api_access_list.export(buf)
- node = etree.fromstring(buf.getvalue())
- xx_api_access_list = ApiAccessListType()
- try:
- xx_api_access_list.build(node)
- except Exception as e:
- abort(400, str(e))
- prop_dict = obj_dict.get('id_perms')
- if prop_dict:
- buf = cStringIO.StringIO()
- xx_id_perms = IdPermsType(**prop_dict)
- xx_id_perms.export(buf)
- node = etree.fromstring(buf.getvalue())
- xx_id_perms = IdPermsType()
- try:
- xx_id_perms.build(node)
- except Exception as e:
- abort(400, str(e))
- # common handling for all resource put
- (ok, result) = self._put_common(request, 'domain', id, fq_name, obj_dict)
- if not ok:
- (code, msg) = result
- self.config_object_error(id, None, 'domain', 'http_put', msg)
- abort(code, msg)
-
- # Validate perms
- objtype_list = [u'project', u'namespace', 'service_template', u'virtual_DNS']
- for obj_type in objtype_list:
- refs = obj_dict.get('%s_refs'%(obj_type), None)
- if refs:
- for ref in refs:
- try:
- ref_uuid = db_conn.fq_name_to_uuid(obj_type, ref['to'])
- except NoIdError as e:
- abort(404, str(e))
- (ok, status) = self._permissions.check_perms_link(request, ref_uuid)
- if not ok:
- (code, err_msg) = status
- abort(code, err_msg)
-
- # State modification starts from here. Ensure that cleanup is done for all state changes
- cleanup_on_failure = []
- # type-specific hook
- r_class = self.get_resource_class('domain')
- if r_class:
- (ok, put_result) = r_class.http_put(id, fq_name, obj_dict, self._db_conn)
- if not ok:
- (code, msg) = put_result
- self.config_object_error(id, None, 'domain', 'http_put', msg)
- abort(code, msg)
- callable = getattr(r_class, 'http_put_fail', None)
- if callable:
- cleanup_on_failure.append((callable, [id, fq_name, obj_dict, self._db_conn]))
-
- obj_ids = {'uuid': id}
- try:
- (ok, result) = db_conn.dbe_update('domain', obj_ids, obj_dict)
- except Exception as e:
- ok = False
- result = str(e)
- if not ok:
- for fail_cleanup_callable, cleanup_args in cleanup_on_failure:
- fail_cleanup_callable(*cleanup_args)
- self.config_object_error(id, None, 'domain', 'http_put', result)
- abort(404, result)
-
- rsp_body = {}
- rsp_body['uuid'] = id
- rsp_body['href'] = self.generate_url('domain', id)
-
- try:
- self._extension_mgrs['resourceApi'].map_method('post_domain_update', id, obj_dict)
- except Exception as e:
- pass
-
- return {'domain': rsp_body}
- #end domain_http_put
-
- def domain_http_delete(self, id):
- db_conn = self._db_conn
- # if obj doesn't exist return early
- try:
- obj_type = db_conn.uuid_to_obj_type(id)
- if obj_type != 'domain':
- abort(404, 'No domain object found for id %s' %(id))
- _ = db_conn.uuid_to_fq_name(id)
- except NoIdError:
- abort(404, 'ID %s does not exist' %(id))
-
- try:
- self._extension_mgrs['resourceApi'].map_method('pre_domain_delete', id)
- except Exception as e:
- pass
-
- # read in obj from db (accepting error) to get details of it
- obj_ids = {'uuid': id}
- back_references = [u'config_root_back_refs']
- children = [u'projects', u'namespaces', 'service_templates', u'virtual_DNSs']
- obj_fields = children + back_references
- (read_ok, read_result) = db_conn.dbe_read('domain', obj_ids, obj_fields)
- if not read_ok:
- if read_result.startswith('Unknown id:'):
- abort(404, 'ID %s does not exist' %(id))
- else:
- self.config_object_error(id, None, 'domain', 'http_delete', read_result)
- # proceed down to delete the resource
-
- # common handling for all resource delete
- parent_type = read_result.get('parent_type', None)
- (ok, del_result) = self._delete_common(request, 'domain', id, parent_type)
- if not ok:
- (code, msg) = del_result
- self.config_object_error(id, None, 'domain', 'http_delete', msg)
- abort(code, msg)
-
- fq_name = read_result['fq_name']
- ifmap_id = cfgm_common.imid.get_ifmap_id_from_fq_name('domain', fq_name)
- obj_ids['imid'] = ifmap_id
- if parent_type:
- parent_imid = cfgm_common.imid.get_ifmap_id_from_fq_name(parent_type, fq_name[:-1])
- obj_ids['parent_imid'] = parent_imid
-
- # State modification starts from here. Ensure that cleanup is done for all state changes
- cleanup_on_failure = []
-
- # type-specific hook
- r_class = self.get_resource_class('domain')
- if r_class:
- if read_ok:
- # fail if non-default children or backrefs exist
- projects = read_result.get('projects', None)
- if projects:
- has_infos = read_result['projects']
- if ((len(has_infos) > 1) or
- (len(has_infos) == 1 and has_infos[0]['to'][-1] != 'default-project')):
- has_urls = [has_info['href'] for has_info in has_infos]
- has_str = ', '.join(has_urls)
- err_msg = 'Children ' + has_str + ' still exist'
- self.config_object_error(id, None, 'domain', 'http_delete', err_msg)
- abort(409, err_msg)
-
- namespaces = read_result.get('namespaces', None)
- if namespaces:
- has_infos = read_result['namespaces']
- if ((len(has_infos) > 1) or
- (len(has_infos) == 1 and has_infos[0]['to'][-1] != 'default-namespace')):
- has_urls = [has_info['href'] for has_info in has_infos]
- has_str = ', '.join(has_urls)
- err_msg = 'Children ' + has_str + ' still exist'
- self.config_object_error(id, None, 'domain', 'http_delete', err_msg)
- abort(409, err_msg)
-
- service_templates = read_result.get('service_templates', None)
- if service_templates:
- has_infos = read_result['service_templates']
- if ((len(has_infos) > 1) or
- (len(has_infos) == 1 and has_infos[0]['to'][-1] != 'default-service-template')):
- has_urls = [has_info['href'] for has_info in has_infos]
- has_str = ', '.join(has_urls)
- err_msg = 'Children ' + has_str + ' still exist'
- self.config_object_error(id, None, 'domain', 'http_delete', err_msg)
- abort(409, err_msg)
-
- virtual_DNSs = read_result.get('virtual_DNSs', None)
- if virtual_DNSs:
- has_infos = read_result['virtual_DNSs']
- if ((len(has_infos) > 1) or
- (len(has_infos) == 1 and has_infos[0]['to'][-1] != 'default-virtual-DNS')):
- has_urls = [has_info['href'] for has_info in has_infos]
- has_str = ', '.join(has_urls)
- err_msg = 'Children ' + has_str + ' still exist'
- self.config_object_error(id, None, 'domain', 'http_delete', err_msg)
- abort(409, err_msg)
-
-
- # Delete default children first
- self._domain_delete_default_children(read_result)
-
- (ok, del_result) = r_class.http_delete(id, read_result, db_conn)
- if not ok:
- (code, msg) = del_result
- self.config_object_error(id, None, 'domain', 'http_delete', msg)
- abort(code, msg)
- callable = getattr(r_class, 'http_delete_fail', None)
- if callable:
- cleanup_on_failure.append((callable, [id, read_result, db_conn]))
- #end if read_ok
-
- try:
- (ok, del_result) = db_conn.dbe_delete('domain', obj_ids, read_result)
- except Exception as e:
- ok = False
- del_result = str(e)
- if not ok:
- for fail_cleanup_callable, cleanup_args in cleanup_on_failure:
- fail_cleanup_callable(*cleanup_args)
- self.config_object_error(id, None, 'domain', 'http_delete', del_result)
- abort(409, del_result)
-
- try:
- self._extension_mgrs['resourceApi'].map_method('post_domain_delete', id, read_result)
- except Exception as e:
- pass
-
- #end domain_http_delete
-
- def domains_http_post(self):
- key = 'domain'
- obj_dict = request.json[key]
- self._post_validate(key, obj_dict=obj_dict)
- fq_name = obj_dict['fq_name']
-
- try:
- self._extension_mgrs['resourceApi'].map_method('pre_domain_create', obj_dict)
- except Exception as e:
- pass
-
- prop_dict = obj_dict.get('domain_limits')
- if prop_dict:
- buf = cStringIO.StringIO()
- xx_domain_limits = DomainLimitsType(**prop_dict)
- xx_domain_limits.export(buf)
- node = etree.fromstring(buf.getvalue())
- xx_domain_limits = DomainLimitsType()
- try:
- xx_domain_limits.build(node)
- except Exception as e:
- abort(400, str(e))
- prop_dict = obj_dict.get('api_access_list')
- if prop_dict:
- buf = cStringIO.StringIO()
- xx_api_access_list = ApiAccessListType(**prop_dict)
- xx_api_access_list.export(buf)
- node = etree.fromstring(buf.getvalue())
- xx_api_access_list = ApiAccessListType()
- try:
- xx_api_access_list.build(node)
- except Exception as e:
- abort(400, str(e))
- prop_dict = obj_dict.get('id_perms')
- if prop_dict:
- buf = cStringIO.StringIO()
- xx_id_perms = IdPermsType(**prop_dict)
- xx_id_perms.export(buf)
- node = etree.fromstring(buf.getvalue())
- xx_id_perms = IdPermsType()
- try:
- xx_id_perms.build(node)
- except Exception as e:
- abort(400, str(e))
- # common handling for all resource create
- (ok, result) = self._post_common(request, 'domain', obj_dict)
- if not ok:
- (code, msg) = result
- fq_name_str = ':'.join(obj_dict.get('fq_name', []))
- self.config_object_error(None, fq_name_str, 'domain', 'http_post', msg)
- abort(code, msg)
-
- name = obj_dict['fq_name'][-1]
- fq_name = obj_dict['fq_name']
-
- db_conn = self._db_conn
-
- # if client gave parent_type of config-root, ignore and remove
- if 'parent_type' in obj_dict and obj_dict['parent_type'] == 'config-root':
- del obj_dict['parent_type']
-
- if 'parent_type' in obj_dict:
- # non config-root child, verify parent exists
- parent_type = obj_dict['parent_type']
- parent_fq_name = obj_dict['fq_name'][:-1]
- try:
- parent_uuid = self._db_conn.fq_name_to_uuid(parent_type, parent_fq_name)
- (ok, status) = self._permissions.check_perms_write(request, parent_uuid)
- if not ok:
- (code, err_msg) = status
- abort(code, err_msg)
- self._permissions.set_user_role(request, obj_dict)
- except NoIdError:
- err_msg = 'Parent ' + pformat(parent_fq_name) + ' type ' + parent_type + ' does not exist'
- fq_name_str = ':'.join(parent_fq_name)
- self.config_object_error(None, fq_name_str, 'domain', 'http_post', err_msg)
- abort(400, err_msg)
-
- # Validate perms
- objtype_list = [u'project', u'namespace', 'service_template', u'virtual_DNS']
- for obj_type in objtype_list:
- refs = obj_dict.get('%s_refs'%(obj_type), None)
- if refs:
- for ref in refs:
- ref_uuid = db_conn.fq_name_to_uuid(obj_type, ref['to'])
- (ok, status) = self._permissions.check_perms_link(request, ref_uuid)
- if not ok:
- (code, err_msg) = status
- abort(code, err_msg)
-
- # State modification starts from here. Ensure that cleanup is done for all state changes
- cleanup_on_failure = []
- # Alloc and Store id-mappings before creating entry on pubsub store.
- # Else a subscriber can ask for an id mapping before we have stored it
- uuid_requested = result
- (ok, result) = db_conn.dbe_alloc('domain', obj_dict, uuid_requested)
- if not ok:
- (code, msg) = result
- fq_name_str = ':'.join(obj_dict['fq_name'])
- self.config_object_error(None, fq_name_str, 'domain', 'http_post', result)
- abort(code, msg)
- cleanup_on_failure.append((db_conn.dbe_release, ['domain', fq_name]))
-
- obj_ids = result
-
- env = request.headers.environ
- tenant_name = env.get(hdr_server_tenant(), 'default-project')
-
-
- # type-specific hook
- r_class = self.get_resource_class('domain')
- if r_class:
- try:
- (ok, result) = r_class.http_post_collection(tenant_name, obj_dict, db_conn)
- except Exception as e:
- ok = False
- result = (500, str(e))
- if not ok:
- for fail_cleanup_callable, cleanup_args in cleanup_on_failure:
- fail_cleanup_callable(*cleanup_args)
- (code, msg) = result
- fq_name_str = ':'.join(fq_name)
- self.config_object_error(None, fq_name_str, 'domain', 'http_post', msg)
- abort(code, msg)
- callable = getattr(r_class, 'http_post_collection_fail', None)
- if callable:
- cleanup_on_failure.append((callable, [tenant_name, obj_dict, db_conn]))
-
- try:
- (ok, result) = \
- db_conn.dbe_create('domain', obj_ids, obj_dict)
- except Exception as e:
- ok = False
- result = str(e)
-
- if not ok:
- for fail_cleanup_callable, cleanup_args in cleanup_on_failure:
- fail_cleanup_callable(*cleanup_args)
- fq_name_str = ':'.join(fq_name)
- self.config_object_error(None, fq_name_str, 'domain', 'http_post', result)
- abort(404, result)
-
- rsp_body = {}
- rsp_body['name'] = name
- rsp_body['fq_name'] = fq_name
- rsp_body['uuid'] = obj_ids['uuid']
- rsp_body['href'] = self.generate_url('domain', obj_ids['uuid'])
- if 'parent_type' in obj_dict:
- # non config-root child, send back parent uuid/href
- rsp_body['parent_uuid'] = parent_uuid
- rsp_body['parent_href'] = self.generate_url(parent_type, parent_uuid)
-
- try:
- self._extension_mgrs['resourceApi'].map_method('post_domain_create', obj_dict)
- except Exception as e:
- pass
-
- return {'domain': rsp_body}
- #end domains_http_post
-
- def domains_http_get(self):
- # gather list of uuids using 1. any specified anchors
- # 2. any specified filters
- # if not 'detail' return list with any specified 'fields'
- # if 'detail' return list with props+refs + any specified 'fields'
-
- env = request.headers.environ
- tenant_name = env.get(hdr_server_tenant(), 'default-project')
- parent_uuids = None
- back_ref_uuids = None
- obj_uuids = None
- if (('parent_fq_name_str' in request.query) and
- ('parent_type' in request.query)):
- parent_fq_name = request.query.parent_fq_name_str.split(':')
- parent_type = request.query.parent_type
- parent_uuids = [self._db_conn.fq_name_to_uuid(parent_type, parent_fq_name)]
- elif 'parent_id' in request.query:
- parent_ids = request.query.parent_id.split(',')
- parent_uuids = [str(uuid.UUID(p_uuid)) for p_uuid in parent_ids]
- if 'back_ref_id' in request.query:
- back_ref_ids = request.query.back_ref_id.split(',')
- back_ref_uuids = [str(uuid.UUID(b_uuid)) for b_uuid in back_ref_ids]
- if 'obj_uuids' in request.query:
- obj_uuids = request.query.obj_uuids.split(',')
-
- # common handling for all resource get
- (ok, result) = self._get_common(request, parent_uuids)
- if not ok:
- (code, msg) = result
- self.config_object_error(None, None, 'domains', 'http_get_collection', msg)
- abort(code, msg)
-
- if 'count' in request.query:
- count = 'true' in request.query.count.lower()
- else:
- count = False
-
- filter_params = request.query.filters
- if filter_params:
- try:
- ff_key_vals = filter_params.split(',')
- ff_names = [ff.split('==')[0] for ff in ff_key_vals]
- ff_values = [ff.split('==')[1] for ff in ff_key_vals]
- filters = {'field_names': ff_names, 'field_values': ff_values}
- except Exception as e:
- abort(400, 'Invalid filter ' + filter_params)
- else:
- filters = None
- db_conn = self._db_conn
- (ok, result) = \
- db_conn.dbe_list('domain', parent_uuids, back_ref_uuids, obj_uuids, count, filters)
- if not ok:
- self.config_object_error(None, None, 'domains', 'http_get_collection', result)
- abort(404, result)
-
- # If only counting, return early
- if count:
- return {'domains': {'count': result}}
-
- if 'detail' in request.query:
- detail = 'true' in request.query.detail.lower()
- else:
- detail = False
-
- fq_names_uuids = result
- obj_dicts = []
- if not detail:
- if not self.is_admin_request():
- obj_ids_list = [{'uuid': obj_uuid} for _, obj_uuid in fq_names_uuids]
- obj_fields = [u'id_perms']
- (ok, result) = db_conn.dbe_read_multi('domain', obj_ids_list, obj_fields)
- if not ok:
- result = []
- for obj_result in result:
- # give chance for zk heartbeat/ping
- gevent.sleep(0)
- if obj_result['id_perms'].get('user_visible', True):
- obj_dict = {}
- obj_dict['uuid'] = obj_result['uuid']
- obj_dict['href'] = self.generate_url('domain', obj_result['uuid'])
- obj_dict['fq_name'] = obj_result['fq_name']
- obj_dicts.append(obj_dict)
- else:
- for fq_name, obj_uuid in fq_names_uuids:
- # give chance for zk heartbeat/ping
- gevent.sleep(0)
- obj_dict = {}
- obj_dict['uuid'] = obj_uuid
- obj_dict['href'] = self.generate_url('domain', obj_uuid)
- obj_dict['fq_name'] = fq_name
- obj_dicts.append(obj_dict)
- else: #detail
- obj_ids_list = [{'uuid': obj_uuid} for _, obj_uuid in fq_names_uuids]
- obj_fields = [u'domain_limits', u'api_access_list', u'id_perms', u'display_name'] + []
- if 'fields' in request.query:
- obj_fields.extend(request.query.fields.split(','))
- (ok, result) = db_conn.dbe_read_multi('domain', obj_ids_list, obj_fields)
-
- if not ok:
- result = []
-
- for obj_result in result:
- # give chance for zk heartbeat/ping
- gevent.sleep(0)
- obj_dict = {}
- obj_dict['name'] = obj_result['fq_name'][-1]
- obj_dict['href'] = self.generate_url('domain', obj_result['uuid'])
- obj_dict.update(obj_result)
- if (obj_dict['id_perms'].get('user_visible', True) or
- self.is_admin_request()):
- obj_dicts.append({'domain': obj_dict})
-
- return {'domains': obj_dicts}
- #end domains_http_get
-
- def _domain_create_default_children(self, parent_obj):
- # Create a default child only if provisioned for
- r_class = self.get_resource_class('project')
- if r_class and r_class.generate_default_instance:
- child_obj = Project(parent_obj = parent_obj)
- child_dict = child_obj.__dict__
- fq_name = child_dict['fq_name']
- child_dict['id_perms'] = self._get_default_id_perms('project')
-
- db_conn = self._db_conn
- (ok, result) = db_conn.dbe_alloc('project', child_dict)
- if not ok:
- return (ok, result)
-
- obj_ids = result
- db_conn.dbe_create('project', obj_ids, child_dict)
- self._project_create_default_children(child_obj)
-
- # Create a default child only if provisioned for
- r_class = self.get_resource_class('namespace')
- if r_class and r_class.generate_default_instance:
- child_obj = Namespace(parent_obj = parent_obj)
- child_dict = child_obj.__dict__
- fq_name = child_dict['fq_name']
- child_dict['id_perms'] = self._get_default_id_perms('namespace')
-
- db_conn = self._db_conn
- (ok, result) = db_conn.dbe_alloc('namespace', child_dict)
- if not ok:
- return (ok, result)
-
- obj_ids = result
- db_conn.dbe_create('namespace', obj_ids, child_dict)
- self._namespace_create_default_children(child_obj)
-
- # Create a default child only if provisioned for
- r_class = self.get_resource_class('service-template')
- if r_class and r_class.generate_default_instance:
- child_obj = ServiceTemplate(parent_obj = parent_obj)
- child_dict = child_obj.__dict__
- fq_name = child_dict['fq_name']
- child_dict['id_perms'] = self._get_default_id_perms('service-template')
-
- db_conn = self._db_conn
- (ok, result) = db_conn.dbe_alloc('service-template', child_dict)
- if not ok:
- return (ok, result)
-
- obj_ids = result
- db_conn.dbe_create('service-template', obj_ids, child_dict)
- self._service_template_create_default_children(child_obj)
-
- # Create a default child only if provisioned for
- r_class = self.get_resource_class('virtual-DNS')
- if r_class and r_class.generate_default_instance:
- child_obj = VirtualDns(parent_obj = parent_obj)
- child_dict = child_obj.__dict__
- fq_name = child_dict['fq_name']
- child_dict['id_perms'] = self._get_default_id_perms('virtual-DNS')
-
- db_conn = self._db_conn
- (ok, result) = db_conn.dbe_alloc('virtual-DNS', child_dict)
- if not ok:
- return (ok, result)
-
- obj_ids = result
- db_conn.dbe_create('virtual-DNS', obj_ids, child_dict)
- self._virtual_DNS_create_default_children(child_obj)
-
- pass
- #end _domain_create_default_children
-
- def _domain_delete_default_children(self, parent_dict):
- # Delete a default child only if provisioned for
- r_class = self.get_resource_class('virtual-DNS')
- if r_class and r_class.generate_default_instance:
- # first locate default child then delete it
- has_infos = parent_dict.get('projects')
- if has_infos:
- for has_info in has_infos:
- if has_info['to'][-1] == 'default-project':
- default_child_id = has_info['href'].split('/')[-1]
- self.project_http_delete(default_child_id)
- break
-
- # Delete a default child only if provisioned for
- r_class = self.get_resource_class('virtual-DNS')
- if r_class and r_class.generate_default_instance:
- # first locate default child then delete it
- has_infos = parent_dict.get('namespaces')
- if has_infos:
- for has_info in has_infos:
- if has_info['to'][-1] == 'default-namespace':
- default_child_id = has_info['href'].split('/')[-1]
- self.namespace_http_delete(default_child_id)
- break
-
- # Delete a default child only if provisioned for
- r_class = self.get_resource_class('virtual-DNS')
- if r_class and r_class.generate_default_instance:
- # first locate default child then delete it
- has_infos = parent_dict.get('service_templates')
- if has_infos:
- for has_info in has_infos:
- if has_info['to'][-1] == 'default-service-template':
- default_child_id = has_info['href'].split('/')[-1]
- self.service_template_http_delete(default_child_id)
- break
-
- # Delete a default child only if provisioned for
- r_class = self.get_resource_class('virtual-DNS')
- if r_class and r_class.generate_default_instance:
- # first locate default child then delete it
- has_infos = parent_dict.get('virtual_DNSs')
- if has_infos:
- for has_info in has_infos:
- if has_info['to'][-1] == 'default-virtual-DNS':
- default_child_id = has_info['href'].split('/')[-1]
- self.virtual_DNS_http_delete(default_child_id)
- break
-
- pass
- #end _domain_delete_default_children
-
- def global_vrouter_config_http_get(self, id):
- try:
- self._extension_mgrs['resourceApi'].map_method('pre_global_vrouter_config_read', id)
- except Exception as e:
- pass
-
- # TODO get vals from request out of the global ASAP
- etag = request.headers.get('If-None-Match')
- try:
- obj_type = self._db_conn.uuid_to_obj_type(id)
- except NoIdError:
- obj_type = None
- if obj_type != 'global_vrouter_config':
- abort(404, 'No global-vrouter-config object found for id %s' %(id))
- # common handling for all resource get
- (ok, result) = self._get_common(request, id)
- if not ok:
- (code, msg) = result
- self.config_object_error(id, None, 'global_vrouter_config', 'http_get', msg)
- abort(code, msg)
-
- # type-specific hook
- r_class = self.get_resource_class('global-vrouter-config')
- if r_class:
- r_class.http_get(id)
-
- db_conn = self._db_conn
- if etag:
- obj_ids = {'uuid': id}
- (ok, result) = db_conn.dbe_is_latest(obj_ids, etag.replace('"', ''))
- if not ok:
- # Not present in DB
- self.config_object_error(id, None, 'global_vrouter_config', 'http_get', result)
- abort(404, result)
-
- is_latest = result
- if is_latest:
- # send Not-Modified, caches use this for read optimization
- response.status = 304
- return
- #end if etag
-
- obj_ids = {'uuid': id}
-
- # Generate field list for db layer
- properties = [u'linklocal_services', u'encapsulation_priorities', u'vxlan_network_identifier_mode', u'id_perms', u'display_name']
- references = []
- back_references = [u'global_system_config_back_refs']
- children = []
- if 'fields' in request.query:
- obj_fields = request.query.fields.split(',')
- else: # default props + children + refs + backrefs
- obj_fields = properties + references
- if 'exclude_back_refs' not in request.query:
- obj_fields = obj_fields + back_references
- if 'exclude_children' not in request.query:
- obj_fields = obj_fields + children
-
- (ok, result) = db_conn.dbe_read('global-vrouter-config', obj_ids, obj_fields)
- if not ok:
- # Not present in DB
- self.config_object_error(id, None, 'global_vrouter_config', 'http_get', result)
- abort(404, result)
-
- # check visibility
- if (not result['id_perms'].get('user_visible', True) and
- not self.is_admin_request()):
- result = 'This object is not visible by users: %s' % id
- self.config_object_error(id, None, 'global_vrouter_config', 'http_get', result)
- abort(404, result)
-
- rsp_body = {}
- rsp_body['uuid'] = id
- rsp_body['href'] = self.generate_url('global-vrouter-config', id)
- rsp_body['name'] = result['fq_name'][-1]
- rsp_body.update(result)
- id_perms = result['id_perms']
- response.set_header('ETag', '"' + id_perms['last_modified'] + '"')
- try:
- self._extension_mgrs['resourceApi'].map_method('post_global_vrouter_config_read', id, rsp_body)
- except Exception as e:
- pass
-
- return {'global-vrouter-config': rsp_body}
- #end global_vrouter_config_http_get
-
- def global_vrouter_config_http_put(self, id):
- key = 'global-vrouter-config'
- obj_dict = request.json[key]
-
- try:
- self._extension_mgrs['resourceApi'].map_method('pre_global_vrouter_config_update', id, obj_dict)
- except Exception as e:
- pass
-
- db_conn = self._db_conn
- try:
- obj_type = db_conn.uuid_to_obj_type(id)
- if obj_type != 'global_vrouter_config':
- abort(404, 'No global-vrouter-config object found for id %s' %(id))
- fq_name = db_conn.uuid_to_fq_name(id)
- except NoIdError as e:
- abort(404, str(e))
- prop_dict = obj_dict.get('linklocal_services')
- if prop_dict:
- buf = cStringIO.StringIO()
- xx_linklocal_services = LinklocalServicesTypes(**prop_dict)
- xx_linklocal_services.export(buf)
- node = etree.fromstring(buf.getvalue())
- xx_linklocal_services = LinklocalServicesTypes()
- try:
- xx_linklocal_services.build(node)
- except Exception as e:
- abort(400, str(e))
- prop_dict = obj_dict.get('encapsulation_priorities')
- if prop_dict:
- buf = cStringIO.StringIO()
- xx_encapsulation_priorities = EncapsulationPrioritiesType(**prop_dict)
- xx_encapsulation_priorities.export(buf)
- node = etree.fromstring(buf.getvalue())
- xx_encapsulation_priorities = EncapsulationPrioritiesType()
- try:
- xx_encapsulation_priorities.build(node)
- except Exception as e:
- abort(400, str(e))
- prop_dict = obj_dict.get('id_perms')
- if prop_dict:
- buf = cStringIO.StringIO()
- xx_id_perms = IdPermsType(**prop_dict)
- xx_id_perms.export(buf)
- node = etree.fromstring(buf.getvalue())
- xx_id_perms = IdPermsType()
- try:
- xx_id_perms.build(node)
- except Exception as e:
- abort(400, str(e))
- # common handling for all resource put
- (ok, result) = self._put_common(request, 'global_vrouter_config', id, fq_name, obj_dict)
- if not ok:
- (code, msg) = result
- self.config_object_error(id, None, 'global_vrouter_config', 'http_put', msg)
- abort(code, msg)
-
-
- # State modification starts from here. Ensure that cleanup is done for all state changes
- cleanup_on_failure = []
- # type-specific hook
- r_class = self.get_resource_class('global-vrouter-config')
- if r_class:
- (ok, put_result) = r_class.http_put(id, fq_name, obj_dict, self._db_conn)
- if not ok:
- (code, msg) = put_result
- self.config_object_error(id, None, 'global_vrouter_config', 'http_put', msg)
- abort(code, msg)
- callable = getattr(r_class, 'http_put_fail', None)
- if callable:
- cleanup_on_failure.append((callable, [id, fq_name, obj_dict, self._db_conn]))
-
- obj_ids = {'uuid': id}
- try:
- (ok, result) = db_conn.dbe_update('global-vrouter-config', obj_ids, obj_dict)
- except Exception as e:
- ok = False
- result = str(e)
- if not ok:
- for fail_cleanup_callable, cleanup_args in cleanup_on_failure:
- fail_cleanup_callable(*cleanup_args)
- self.config_object_error(id, None, 'global_vrouter_config', 'http_put', result)
- abort(404, result)
-
- rsp_body = {}
- rsp_body['uuid'] = id
- rsp_body['href'] = self.generate_url('global-vrouter-config', id)
-
- try:
- self._extension_mgrs['resourceApi'].map_method('post_global_vrouter_config_update', id, obj_dict)
- except Exception as e:
- pass
-
- return {'global-vrouter-config': rsp_body}
- #end global_vrouter_config_http_put
-
- def global_vrouter_config_http_delete(self, id):
- db_conn = self._db_conn
- # if obj doesn't exist return early
- try:
- obj_type = db_conn.uuid_to_obj_type(id)
- if obj_type != 'global_vrouter_config':
- abort(404, 'No global-vrouter-config object found for id %s' %(id))
- _ = db_conn.uuid_to_fq_name(id)
- except NoIdError:
- abort(404, 'ID %s does not exist' %(id))
-
- try:
- self._extension_mgrs['resourceApi'].map_method('pre_global_vrouter_config_delete', id)
- except Exception as e:
- pass
-
- # read in obj from db (accepting error) to get details of it
- obj_ids = {'uuid': id}
- back_references = [u'global_system_config_back_refs']
- children = []
- obj_fields = children + back_references
- (read_ok, read_result) = db_conn.dbe_read('global-vrouter-config', obj_ids, obj_fields)
- if not read_ok:
- if read_result.startswith('Unknown id:'):
- abort(404, 'ID %s does not exist' %(id))
- else:
- self.config_object_error(id, None, 'global_vrouter_config', 'http_delete', read_result)
- # proceed down to delete the resource
-
- # common handling for all resource delete
- parent_type = read_result.get('parent_type', None)
- (ok, del_result) = self._delete_common(request, 'global_vrouter_config', id, parent_type)
- if not ok:
- (code, msg) = del_result
- self.config_object_error(id, None, 'global_vrouter_config', 'http_delete', msg)
- abort(code, msg)
-
- fq_name = read_result['fq_name']
- ifmap_id = cfgm_common.imid.get_ifmap_id_from_fq_name('global-vrouter-config', fq_name)
- obj_ids['imid'] = ifmap_id
- if parent_type:
- parent_imid = cfgm_common.imid.get_ifmap_id_from_fq_name(parent_type, fq_name[:-1])
- obj_ids['parent_imid'] = parent_imid
-
- # State modification starts from here. Ensure that cleanup is done for all state changes
- cleanup_on_failure = []
-
- # type-specific hook
- r_class = self.get_resource_class('global-vrouter-config')
- if r_class:
- if read_ok:
- # fail if non-default children or backrefs exist
-
- # Delete default children first
- self._global_vrouter_config_delete_default_children(read_result)
-
- (ok, del_result) = r_class.http_delete(id, read_result, db_conn)
- if not ok:
- (code, msg) = del_result
- self.config_object_error(id, None, 'global_vrouter_config', 'http_delete', msg)
- abort(code, msg)
- callable = getattr(r_class, 'http_delete_fail', None)
- if callable:
- cleanup_on_failure.append((callable, [id, read_result, db_conn]))
- #end if read_ok
-
- try:
- (ok, del_result) = db_conn.dbe_delete('global-vrouter-config', obj_ids, read_result)
- except Exception as e:
- ok = False
- del_result = str(e)
- if not ok:
- for fail_cleanup_callable, cleanup_args in cleanup_on_failure:
- fail_cleanup_callable(*cleanup_args)
- self.config_object_error(id, None, 'global_vrouter_config', 'http_delete', del_result)
- abort(409, del_result)
-
- try:
- self._extension_mgrs['resourceApi'].map_method('post_global_vrouter_config_delete', id, read_result)
- except Exception as e:
- pass
-
- #end global_vrouter_config_http_delete
-
- def global_vrouter_configs_http_post(self):
- key = 'global-vrouter-config'
- obj_dict = request.json[key]
- self._post_validate(key, obj_dict=obj_dict)
- fq_name = obj_dict['fq_name']
-
- try:
- self._extension_mgrs['resourceApi'].map_method('pre_global_vrouter_config_create', obj_dict)
- except Exception as e:
- pass
-
- prop_dict = obj_dict.get('linklocal_services')
- if prop_dict:
- buf = cStringIO.StringIO()
- xx_linklocal_services = LinklocalServicesTypes(**prop_dict)
- xx_linklocal_services.export(buf)
- node = etree.fromstring(buf.getvalue())
- xx_linklocal_services = LinklocalServicesTypes()
- try:
- xx_linklocal_services.build(node)
- except Exception as e:
- abort(400, str(e))
- prop_dict = obj_dict.get('encapsulation_priorities')
- if prop_dict:
- buf = cStringIO.StringIO()
- xx_encapsulation_priorities = EncapsulationPrioritiesType(**prop_dict)
- xx_encapsulation_priorities.export(buf)
- node = etree.fromstring(buf.getvalue())
- xx_encapsulation_priorities = EncapsulationPrioritiesType()
- try:
- xx_encapsulation_priorities.build(node)
- except Exception as e:
- abort(400, str(e))
- prop_dict = obj_dict.get('id_perms')
- if prop_dict:
- buf = cStringIO.StringIO()
- xx_id_perms = IdPermsType(**prop_dict)
- xx_id_perms.export(buf)
- node = etree.fromstring(buf.getvalue())
- xx_id_perms = IdPermsType()
- try:
- xx_id_perms.build(node)
- except Exception as e:
- abort(400, str(e))
- # common handling for all resource create
- (ok, result) = self._post_common(request, 'global-vrouter-config', obj_dict)
- if not ok:
- (code, msg) = result
- fq_name_str = ':'.join(obj_dict.get('fq_name', []))
- self.config_object_error(None, fq_name_str, 'global_vrouter_config', 'http_post', msg)
- abort(code, msg)
-
- name = obj_dict['fq_name'][-1]
- fq_name = obj_dict['fq_name']
-
- db_conn = self._db_conn
-
- # if client gave parent_type of config-root, ignore and remove
- if 'parent_type' in obj_dict and obj_dict['parent_type'] == 'config-root':
- del obj_dict['parent_type']
-
- if 'parent_type' in obj_dict:
- # non config-root child, verify parent exists
- parent_type = obj_dict['parent_type']
- parent_fq_name = obj_dict['fq_name'][:-1]
- try:
- parent_uuid = self._db_conn.fq_name_to_uuid(parent_type, parent_fq_name)
- (ok, status) = self._permissions.check_perms_write(request, parent_uuid)
- if not ok:
- (code, err_msg) = status
- abort(code, err_msg)
- self._permissions.set_user_role(request, obj_dict)
- except NoIdError:
- err_msg = 'Parent ' + pformat(parent_fq_name) + ' type ' + parent_type + ' does not exist'
- fq_name_str = ':'.join(parent_fq_name)
- self.config_object_error(None, fq_name_str, 'global_vrouter_config', 'http_post', err_msg)
- abort(400, err_msg)
-
-
- # State modification starts from here. Ensure that cleanup is done for all state changes
- cleanup_on_failure = []
- # Alloc and Store id-mappings before creating entry on pubsub store.
- # Else a subscriber can ask for an id mapping before we have stored it
- uuid_requested = result
- (ok, result) = db_conn.dbe_alloc('global-vrouter-config', obj_dict, uuid_requested)
- if not ok:
- (code, msg) = result
- fq_name_str = ':'.join(obj_dict['fq_name'])
- self.config_object_error(None, fq_name_str, 'global_vrouter_config', 'http_post', result)
- abort(code, msg)
- cleanup_on_failure.append((db_conn.dbe_release, ['global_vrouter_config', fq_name]))
-
- obj_ids = result
-
- env = request.headers.environ
- tenant_name = env.get(hdr_server_tenant(), 'default-project')
-
-
- # type-specific hook
- r_class = self.get_resource_class('global-vrouter-config')
- if r_class:
- try:
- (ok, result) = r_class.http_post_collection(tenant_name, obj_dict, db_conn)
- except Exception as e:
- ok = False
- result = (500, str(e))
- if not ok:
- for fail_cleanup_callable, cleanup_args in cleanup_on_failure:
- fail_cleanup_callable(*cleanup_args)
- (code, msg) = result
- fq_name_str = ':'.join(fq_name)
- self.config_object_error(None, fq_name_str, 'global_vrouter_config', 'http_post', msg)
- abort(code, msg)
- callable = getattr(r_class, 'http_post_collection_fail', None)
- if callable:
- cleanup_on_failure.append((callable, [tenant_name, obj_dict, db_conn]))
-
- try:
- (ok, result) = \
- db_conn.dbe_create('global-vrouter-config', obj_ids, obj_dict)
- except Exception as e:
- ok = False
- result = str(e)
-
- if not ok:
- for fail_cleanup_callable, cleanup_args in cleanup_on_failure:
- fail_cleanup_callable(*cleanup_args)
- fq_name_str = ':'.join(fq_name)
- self.config_object_error(None, fq_name_str, 'global_vrouter_config', 'http_post', result)
- abort(404, result)
-
- rsp_body = {}
- rsp_body['name'] = name
- rsp_body['fq_name'] = fq_name
- rsp_body['uuid'] = obj_ids['uuid']
- rsp_body['href'] = self.generate_url('global-vrouter-config', obj_ids['uuid'])
- if 'parent_type' in obj_dict:
- # non config-root child, send back parent uuid/href
- rsp_body['parent_uuid'] = parent_uuid
- rsp_body['parent_href'] = self.generate_url(parent_type, parent_uuid)
-
- try:
- self._extension_mgrs['resourceApi'].map_method('post_global_vrouter_config_create', obj_dict)
- except Exception as e:
- pass
-
- return {'global-vrouter-config': rsp_body}
- #end global_vrouter_configs_http_post
-
- def global_vrouter_configs_http_get(self):
- # gather list of uuids using 1. any specified anchors
- # 2. any specified filters
- # if not 'detail' return list with any specified 'fields'
- # if 'detail' return list with props+refs + any specified 'fields'
-
- env = request.headers.environ
- tenant_name = env.get(hdr_server_tenant(), 'default-project')
- parent_uuids = None
- back_ref_uuids = None
- obj_uuids = None
- if (('parent_fq_name_str' in request.query) and
- ('parent_type' in request.query)):
- parent_fq_name = request.query.parent_fq_name_str.split(':')
- parent_type = request.query.parent_type
- parent_uuids = [self._db_conn.fq_name_to_uuid(parent_type, parent_fq_name)]
- elif 'parent_id' in request.query:
- parent_ids = request.query.parent_id.split(',')
- parent_uuids = [str(uuid.UUID(p_uuid)) for p_uuid in parent_ids]
- if 'back_ref_id' in request.query:
- back_ref_ids = request.query.back_ref_id.split(',')
- back_ref_uuids = [str(uuid.UUID(b_uuid)) for b_uuid in back_ref_ids]
- if 'obj_uuids' in request.query:
- obj_uuids = request.query.obj_uuids.split(',')
-
- # common handling for all resource get
- (ok, result) = self._get_common(request, parent_uuids)
- if not ok:
- (code, msg) = result
- self.config_object_error(None, None, 'global_vrouter_configs', 'http_get_collection', msg)
- abort(code, msg)
-
- if 'count' in request.query:
- count = 'true' in request.query.count.lower()
- else:
- count = False
-
- filter_params = request.query.filters
- if filter_params:
- try:
- ff_key_vals = filter_params.split(',')
- ff_names = [ff.split('==')[0] for ff in ff_key_vals]
- ff_values = [ff.split('==')[1] for ff in ff_key_vals]
- filters = {'field_names': ff_names, 'field_values': ff_values}
- except Exception as e:
- abort(400, 'Invalid filter ' + filter_params)
- else:
- filters = None
- db_conn = self._db_conn
- (ok, result) = \
- db_conn.dbe_list('global-vrouter-config', parent_uuids, back_ref_uuids, obj_uuids, count, filters)
- if not ok:
- self.config_object_error(None, None, 'global_vrouter_configs', 'http_get_collection', result)
- abort(404, result)
-
- # If only counting, return early
- if count:
- return {'global-vrouter-configs': {'count': result}}
-
- if 'detail' in request.query:
- detail = 'true' in request.query.detail.lower()
- else:
- detail = False
-
- fq_names_uuids = result
- obj_dicts = []
- if not detail:
- if not self.is_admin_request():
- obj_ids_list = [{'uuid': obj_uuid} for _, obj_uuid in fq_names_uuids]
- obj_fields = [u'id_perms']
- (ok, result) = db_conn.dbe_read_multi('global-vrouter-config', obj_ids_list, obj_fields)
- if not ok:
- result = []
- for obj_result in result:
- # give chance for zk heartbeat/ping
- gevent.sleep(0)
- if obj_result['id_perms'].get('user_visible', True):
- obj_dict = {}
- obj_dict['uuid'] = obj_result['uuid']
- obj_dict['href'] = self.generate_url('global-vrouter-config', obj_result['uuid'])
- obj_dict['fq_name'] = obj_result['fq_name']
- obj_dicts.append(obj_dict)
- else:
- for fq_name, obj_uuid in fq_names_uuids:
- # give chance for zk heartbeat/ping
- gevent.sleep(0)
- obj_dict = {}
- obj_dict['uuid'] = obj_uuid
- obj_dict['href'] = self.generate_url('global-vrouter-config', obj_uuid)
- obj_dict['fq_name'] = fq_name
- obj_dicts.append(obj_dict)
- else: #detail
- obj_ids_list = [{'uuid': obj_uuid} for _, obj_uuid in fq_names_uuids]
- obj_fields = [u'linklocal_services', u'encapsulation_priorities', u'vxlan_network_identifier_mode', u'id_perms', u'display_name'] + []
- if 'fields' in request.query:
- obj_fields.extend(request.query.fields.split(','))
- (ok, result) = db_conn.dbe_read_multi('global-vrouter-config', obj_ids_list, obj_fields)
-
- if not ok:
- result = []
-
- for obj_result in result:
- # give chance for zk heartbeat/ping
- gevent.sleep(0)
- obj_dict = {}
- obj_dict['name'] = obj_result['fq_name'][-1]
- obj_dict['href'] = self.generate_url('global-vrouter-config', obj_result['uuid'])
- obj_dict.update(obj_result)
- if (obj_dict['id_perms'].get('user_visible', True) or
- self.is_admin_request()):
- obj_dicts.append({'global-vrouter-config': obj_dict})
-
- return {'global-vrouter-configs': obj_dicts}
- #end global_vrouter_configs_http_get
-
- def _global_vrouter_config_create_default_children(self, parent_obj):
- pass
- #end _global_vrouter_config_create_default_children
-
- def _global_vrouter_config_delete_default_children(self, parent_dict):
- pass
- #end _global_vrouter_config_delete_default_children
-
- def instance_ip_http_get(self, id):
- try:
- self._extension_mgrs['resourceApi'].map_method('pre_instance_ip_read', id)
- except Exception as e:
- pass
-
- # TODO get vals from request out of the global ASAP
- etag = request.headers.get('If-None-Match')
- try:
- obj_type = self._db_conn.uuid_to_obj_type(id)
- except NoIdError:
- obj_type = None
- if obj_type != 'instance_ip':
- abort(404, 'No instance-ip object found for id %s' %(id))
- # common handling for all resource get
- (ok, result) = self._get_common(request, id)
- if not ok:
- (code, msg) = result
- self.config_object_error(id, None, 'instance_ip', 'http_get', msg)
- abort(code, msg)
-
- # type-specific hook
- r_class = self.get_resource_class('instance-ip')
- if r_class:
- r_class.http_get(id)
-
- db_conn = self._db_conn
- if etag:
- obj_ids = {'uuid': id}
- (ok, result) = db_conn.dbe_is_latest(obj_ids, etag.replace('"', ''))
- if not ok:
- # Not present in DB
- self.config_object_error(id, None, 'instance_ip', 'http_get', result)
- abort(404, result)
-
- is_latest = result
- if is_latest:
- # send Not-Modified, caches use this for read optimization
- response.status = 304
- return
- #end if etag
-
- obj_ids = {'uuid': id}
-
- # Generate field list for db layer
- properties = [u'instance_ip_address', u'instance_ip_family', u'instance_ip_mode', u'subnet_uuid', u'id_perms', u'display_name']
- references = [u'virtual_network_refs', 'virtual_machine_interface_refs']
- back_references = []
- children = []
- if 'fields' in request.query:
- obj_fields = request.query.fields.split(',')
- else: # default props + children + refs + backrefs
- obj_fields = properties + references
- if 'exclude_back_refs' not in request.query:
- obj_fields = obj_fields + back_references
- if 'exclude_children' not in request.query:
- obj_fields = obj_fields + children
-
- (ok, result) = db_conn.dbe_read('instance-ip', obj_ids, obj_fields)
- if not ok:
- # Not present in DB
- self.config_object_error(id, None, 'instance_ip', 'http_get', result)
- abort(404, result)
-
- # check visibility
- if (not result['id_perms'].get('user_visible', True) and
- not self.is_admin_request()):
- result = 'This object is not visible by users: %s' % id
- self.config_object_error(id, None, 'instance_ip', 'http_get', result)
- abort(404, result)
-
- rsp_body = {}
- rsp_body['uuid'] = id
- rsp_body['href'] = self.generate_url('instance-ip', id)
- rsp_body['name'] = result['fq_name'][-1]
- rsp_body.update(result)
- id_perms = result['id_perms']
- response.set_header('ETag', '"' + id_perms['last_modified'] + '"')
- try:
- self._extension_mgrs['resourceApi'].map_method('post_instance_ip_read', id, rsp_body)
- except Exception as e:
- pass
-
- return {'instance-ip': rsp_body}
- #end instance_ip_http_get
-
- def instance_ip_http_put(self, id):
- key = 'instance-ip'
- obj_dict = request.json[key]
-
- try:
- self._extension_mgrs['resourceApi'].map_method('pre_instance_ip_update', id, obj_dict)
- except Exception as e:
- pass
-
- db_conn = self._db_conn
- try:
- obj_type = db_conn.uuid_to_obj_type(id)
- if obj_type != 'instance_ip':
- abort(404, 'No instance-ip object found for id %s' %(id))
- fq_name = db_conn.uuid_to_fq_name(id)
- except NoIdError as e:
- abort(404, str(e))
- prop_dict = obj_dict.get('id_perms')
- if prop_dict:
- buf = cStringIO.StringIO()
- xx_id_perms = IdPermsType(**prop_dict)
- xx_id_perms.export(buf)
- node = etree.fromstring(buf.getvalue())
- xx_id_perms = IdPermsType()
- try:
- xx_id_perms.build(node)
- except Exception as e:
- abort(400, str(e))
- # common handling for all resource put
- (ok, result) = self._put_common(request, 'instance_ip', id, fq_name, obj_dict)
- if not ok:
- (code, msg) = result
- self.config_object_error(id, None, 'instance_ip', 'http_put', msg)
- abort(code, msg)
-
- # Validate perms
- objtype_list = [u'virtual_network', 'virtual_machine_interface']
- for obj_type in objtype_list:
- refs = obj_dict.get('%s_refs'%(obj_type), None)
- if refs:
- for ref in refs:
- try:
- ref_uuid = db_conn.fq_name_to_uuid(obj_type, ref['to'])
- except NoIdError as e:
- abort(404, str(e))
- (ok, status) = self._permissions.check_perms_link(request, ref_uuid)
- if not ok:
- (code, err_msg) = status
- abort(code, err_msg)
-
- # State modification starts from here. Ensure that cleanup is done for all state changes
- cleanup_on_failure = []
- # type-specific hook
- r_class = self.get_resource_class('instance-ip')
- if r_class:
- (ok, put_result) = r_class.http_put(id, fq_name, obj_dict, self._db_conn)
- if not ok:
- (code, msg) = put_result
- self.config_object_error(id, None, 'instance_ip', 'http_put', msg)
- abort(code, msg)
- callable = getattr(r_class, 'http_put_fail', None)
- if callable:
- cleanup_on_failure.append((callable, [id, fq_name, obj_dict, self._db_conn]))
-
- obj_ids = {'uuid': id}
- try:
- (ok, result) = db_conn.dbe_update('instance-ip', obj_ids, obj_dict)
- except Exception as e:
- ok = False
- result = str(e)
- if not ok:
- for fail_cleanup_callable, cleanup_args in cleanup_on_failure:
- fail_cleanup_callable(*cleanup_args)
- self.config_object_error(id, None, 'instance_ip', 'http_put', result)
- abort(404, result)
-
- rsp_body = {}
- rsp_body['uuid'] = id
- rsp_body['href'] = self.generate_url('instance-ip', id)
-
- try:
- self._extension_mgrs['resourceApi'].map_method('post_instance_ip_update', id, obj_dict)
- except Exception as e:
- pass
-
- return {'instance-ip': rsp_body}
- #end instance_ip_http_put
-
- def instance_ip_http_delete(self, id):
- db_conn = self._db_conn
- # if obj doesn't exist return early
- try:
- obj_type = db_conn.uuid_to_obj_type(id)
- if obj_type != 'instance_ip':
- abort(404, 'No instance-ip object found for id %s' %(id))
- _ = db_conn.uuid_to_fq_name(id)
- except NoIdError:
- abort(404, 'ID %s does not exist' %(id))
-
- try:
- self._extension_mgrs['resourceApi'].map_method('pre_instance_ip_delete', id)
- except Exception as e:
- pass
-
- # read in obj from db (accepting error) to get details of it
- obj_ids = {'uuid': id}
- back_references = []
- children = []
- obj_fields = children + back_references
- (read_ok, read_result) = db_conn.dbe_read('instance-ip', obj_ids, obj_fields)
- if not read_ok:
- if read_result.startswith('Unknown id:'):
- abort(404, 'ID %s does not exist' %(id))
- else:
- self.config_object_error(id, None, 'instance_ip', 'http_delete', read_result)
- # proceed down to delete the resource
-
- # common handling for all resource delete
- parent_type = read_result.get('parent_type', None)
- (ok, del_result) = self._delete_common(request, 'instance_ip', id, parent_type)
- if not ok:
- (code, msg) = del_result
- self.config_object_error(id, None, 'instance_ip', 'http_delete', msg)
- abort(code, msg)
-
- fq_name = read_result['fq_name']
- ifmap_id = cfgm_common.imid.get_ifmap_id_from_fq_name('instance-ip', fq_name)
- obj_ids['imid'] = ifmap_id
- if parent_type:
- parent_imid = cfgm_common.imid.get_ifmap_id_from_fq_name(parent_type, fq_name[:-1])
- obj_ids['parent_imid'] = parent_imid
-
- # State modification starts from here. Ensure that cleanup is done for all state changes
- cleanup_on_failure = []
-
- # type-specific hook
- r_class = self.get_resource_class('instance-ip')
- if r_class:
- if read_ok:
- # fail if non-default children or backrefs exist
-
- # Delete default children first
- self._instance_ip_delete_default_children(read_result)
-
- (ok, del_result) = r_class.http_delete(id, read_result, db_conn)
- if not ok:
- (code, msg) = del_result
- self.config_object_error(id, None, 'instance_ip', 'http_delete', msg)
- abort(code, msg)
- callable = getattr(r_class, 'http_delete_fail', None)
- if callable:
- cleanup_on_failure.append((callable, [id, read_result, db_conn]))
- #end if read_ok
-
- try:
- (ok, del_result) = db_conn.dbe_delete('instance-ip', obj_ids, read_result)
- except Exception as e:
- ok = False
- del_result = str(e)
- if not ok:
- for fail_cleanup_callable, cleanup_args in cleanup_on_failure:
- fail_cleanup_callable(*cleanup_args)
- self.config_object_error(id, None, 'instance_ip', 'http_delete', del_result)
- abort(409, del_result)
-
- try:
- self._extension_mgrs['resourceApi'].map_method('post_instance_ip_delete', id, read_result)
- except Exception as e:
- pass
-
- #end instance_ip_http_delete
-
- def instance_ips_http_post(self):
- key = 'instance-ip'
- obj_dict = request.json[key]
- self._post_validate(key, obj_dict=obj_dict)
- fq_name = obj_dict['fq_name']
-
- try:
- self._extension_mgrs['resourceApi'].map_method('pre_instance_ip_create', obj_dict)
- except Exception as e:
- pass
-
- prop_dict = obj_dict.get('id_perms')
- if prop_dict:
- buf = cStringIO.StringIO()
- xx_id_perms = IdPermsType(**prop_dict)
- xx_id_perms.export(buf)
- node = etree.fromstring(buf.getvalue())
- xx_id_perms = IdPermsType()
- try:
- xx_id_perms.build(node)
- except Exception as e:
- abort(400, str(e))
- # common handling for all resource create
- (ok, result) = self._post_common(request, 'instance-ip', obj_dict)
- if not ok:
- (code, msg) = result
- fq_name_str = ':'.join(obj_dict.get('fq_name', []))
- self.config_object_error(None, fq_name_str, 'instance_ip', 'http_post', msg)
- abort(code, msg)
-
- name = obj_dict['fq_name'][-1]
- fq_name = obj_dict['fq_name']
-
- db_conn = self._db_conn
-
- # Validate perms
- objtype_list = [u'virtual_network', 'virtual_machine_interface']
- for obj_type in objtype_list:
- refs = obj_dict.get('%s_refs'%(obj_type), None)
- if refs:
- for ref in refs:
- ref_uuid = db_conn.fq_name_to_uuid(obj_type, ref['to'])
- (ok, status) = self._permissions.check_perms_link(request, ref_uuid)
- if not ok:
- (code, err_msg) = status
- abort(code, err_msg)
-
- # State modification starts from here. Ensure that cleanup is done for all state changes
- cleanup_on_failure = []
- # Alloc and Store id-mappings before creating entry on pubsub store.
- # Else a subscriber can ask for an id mapping before we have stored it
- uuid_requested = result
- (ok, result) = db_conn.dbe_alloc('instance-ip', obj_dict, uuid_requested)
- if not ok:
- (code, msg) = result
- fq_name_str = ':'.join(obj_dict['fq_name'])
- self.config_object_error(None, fq_name_str, 'instance_ip', 'http_post', result)
- abort(code, msg)
- cleanup_on_failure.append((db_conn.dbe_release, ['instance_ip', fq_name]))
-
- obj_ids = result
-
- env = request.headers.environ
- tenant_name = env.get(hdr_server_tenant(), 'default-project')
-
-
- # type-specific hook
- r_class = self.get_resource_class('instance-ip')
- if r_class:
- try:
- (ok, result) = r_class.http_post_collection(tenant_name, obj_dict, db_conn)
- except Exception as e:
- ok = False
- result = (500, str(e))
- if not ok:
- for fail_cleanup_callable, cleanup_args in cleanup_on_failure:
- fail_cleanup_callable(*cleanup_args)
- (code, msg) = result
- fq_name_str = ':'.join(fq_name)
- self.config_object_error(None, fq_name_str, 'instance_ip', 'http_post', msg)
- abort(code, msg)
- callable = getattr(r_class, 'http_post_collection_fail', None)
- if callable:
- cleanup_on_failure.append((callable, [tenant_name, obj_dict, db_conn]))
-
- try:
- (ok, result) = \
- db_conn.dbe_create('instance-ip', obj_ids, obj_dict)
- except Exception as e:
- ok = False
- result = str(e)
-
- if not ok:
- for fail_cleanup_callable, cleanup_args in cleanup_on_failure:
- fail_cleanup_callable(*cleanup_args)
- fq_name_str = ':'.join(fq_name)
- self.config_object_error(None, fq_name_str, 'instance_ip', 'http_post', result)
- abort(404, result)
-
- rsp_body = {}
- rsp_body['name'] = name
- rsp_body['fq_name'] = fq_name
- rsp_body['uuid'] = obj_ids['uuid']
- rsp_body['href'] = self.generate_url('instance-ip', obj_ids['uuid'])
-
- try:
- self._extension_mgrs['resourceApi'].map_method('post_instance_ip_create', obj_dict)
- except Exception as e:
- pass
-
- return {'instance-ip': rsp_body}
- #end instance_ips_http_post
-
- def instance_ips_http_get(self):
- # gather list of uuids using 1. any specified anchors
- # 2. any specified filters
- # if not 'detail' return list with any specified 'fields'
- # if 'detail' return list with props+refs + any specified 'fields'
-
- env = request.headers.environ
- tenant_name = env.get(hdr_server_tenant(), 'default-project')
- parent_uuids = None
- back_ref_uuids = None
- obj_uuids = None
- if (('parent_fq_name_str' in request.query) and
- ('parent_type' in request.query)):
- parent_fq_name = request.query.parent_fq_name_str.split(':')
- parent_type = request.query.parent_type
- parent_uuids = [self._db_conn.fq_name_to_uuid(parent_type, parent_fq_name)]
- elif 'parent_id' in request.query:
- parent_ids = request.query.parent_id.split(',')
- parent_uuids = [str(uuid.UUID(p_uuid)) for p_uuid in parent_ids]
- if 'back_ref_id' in request.query:
- back_ref_ids = request.query.back_ref_id.split(',')
- back_ref_uuids = [str(uuid.UUID(b_uuid)) for b_uuid in back_ref_ids]
- if 'obj_uuids' in request.query:
- obj_uuids = request.query.obj_uuids.split(',')
-
- # common handling for all resource get
- (ok, result) = self._get_common(request, parent_uuids)
- if not ok:
- (code, msg) = result
- self.config_object_error(None, None, 'instance_ips', 'http_get_collection', msg)
- abort(code, msg)
-
- if 'count' in request.query:
- count = 'true' in request.query.count.lower()
- else:
- count = False
-
- filter_params = request.query.filters
- if filter_params:
- try:
- ff_key_vals = filter_params.split(',')
- ff_names = [ff.split('==')[0] for ff in ff_key_vals]
- ff_values = [ff.split('==')[1] for ff in ff_key_vals]
- filters = {'field_names': ff_names, 'field_values': ff_values}
- except Exception as e:
- abort(400, 'Invalid filter ' + filter_params)
- else:
- filters = None
- db_conn = self._db_conn
- (ok, result) = \
- db_conn.dbe_list('instance-ip', parent_uuids, back_ref_uuids, obj_uuids, count, filters)
- if not ok:
- self.config_object_error(None, None, 'instance_ips', 'http_get_collection', result)
- abort(404, result)
-
- # If only counting, return early
- if count:
- return {'instance-ips': {'count': result}}
-
- if 'detail' in request.query:
- detail = 'true' in request.query.detail.lower()
- else:
- detail = False
-
- fq_names_uuids = result
- obj_dicts = []
- if not detail:
- if not self.is_admin_request():
- obj_ids_list = [{'uuid': obj_uuid} for _, obj_uuid in fq_names_uuids]
- obj_fields = [u'id_perms']
- (ok, result) = db_conn.dbe_read_multi('instance-ip', obj_ids_list, obj_fields)
- if not ok:
- result = []
- for obj_result in result:
- # give chance for zk heartbeat/ping
- gevent.sleep(0)
- if obj_result['id_perms'].get('user_visible', True):
- obj_dict = {}
- obj_dict['uuid'] = obj_result['uuid']
- obj_dict['href'] = self.generate_url('instance-ip', obj_result['uuid'])
- obj_dict['fq_name'] = obj_result['fq_name']
- obj_dicts.append(obj_dict)
- else:
- for fq_name, obj_uuid in fq_names_uuids:
- # give chance for zk heartbeat/ping
- gevent.sleep(0)
- obj_dict = {}
- obj_dict['uuid'] = obj_uuid
- obj_dict['href'] = self.generate_url('instance-ip', obj_uuid)
- obj_dict['fq_name'] = fq_name
- obj_dicts.append(obj_dict)
- else: #detail
- obj_ids_list = [{'uuid': obj_uuid} for _, obj_uuid in fq_names_uuids]
- obj_fields = [u'instance_ip_address', u'instance_ip_family', u'instance_ip_mode', u'subnet_uuid', u'id_perms', u'display_name'] + [u'virtual_network_refs', 'virtual_machine_interface_refs']
- if 'fields' in request.query:
- obj_fields.extend(request.query.fields.split(','))
- (ok, result) = db_conn.dbe_read_multi('instance-ip', obj_ids_list, obj_fields)
-
- if not ok:
- result = []
-
- for obj_result in result:
- # give chance for zk heartbeat/ping
- gevent.sleep(0)
- obj_dict = {}
- obj_dict['name'] = obj_result['fq_name'][-1]
- obj_dict['href'] = self.generate_url('instance-ip', obj_result['uuid'])
- obj_dict.update(obj_result)
- if (obj_dict['id_perms'].get('user_visible', True) or
- self.is_admin_request()):
- obj_dicts.append({'instance-ip': obj_dict})
-
- return {'instance-ips': obj_dicts}
- #end instance_ips_http_get
-
- def _instance_ip_create_default_children(self, parent_obj):
- pass
- #end _instance_ip_create_default_children
-
- def _instance_ip_delete_default_children(self, parent_dict):
- pass
- #end _instance_ip_delete_default_children
-
- def network_policy_http_get(self, id):
- try:
- self._extension_mgrs['resourceApi'].map_method('pre_network_policy_read', id)
- except Exception as e:
- pass
-
- # TODO get vals from request out of the global ASAP
- etag = request.headers.get('If-None-Match')
- try:
- obj_type = self._db_conn.uuid_to_obj_type(id)
- except NoIdError:
- obj_type = None
- if obj_type != 'network_policy':
- abort(404, 'No network-policy object found for id %s' %(id))
- # common handling for all resource get
- (ok, result) = self._get_common(request, id)
- if not ok:
- (code, msg) = result
- self.config_object_error(id, None, 'network_policy', 'http_get', msg)
- abort(code, msg)
-
- # type-specific hook
- r_class = self.get_resource_class('network-policy')
- if r_class:
- r_class.http_get(id)
-
- db_conn = self._db_conn
- if etag:
- obj_ids = {'uuid': id}
- (ok, result) = db_conn.dbe_is_latest(obj_ids, etag.replace('"', ''))
- if not ok:
- # Not present in DB
- self.config_object_error(id, None, 'network_policy', 'http_get', result)
- abort(404, result)
-
- is_latest = result
- if is_latest:
- # send Not-Modified, caches use this for read optimization
- response.status = 304
- return
- #end if etag
-
- obj_ids = {'uuid': id}
-
- # Generate field list for db layer
- properties = [u'network_policy_entries', u'id_perms', u'display_name']
- references = []
- back_references = [u'project_back_refs', u'virtual_network_back_refs']
- children = []
- if 'fields' in request.query:
- obj_fields = request.query.fields.split(',')
- else: # default props + children + refs + backrefs
- obj_fields = properties + references
- if 'exclude_back_refs' not in request.query:
- obj_fields = obj_fields + back_references
- if 'exclude_children' not in request.query:
- obj_fields = obj_fields + children
-
- (ok, result) = db_conn.dbe_read('network-policy', obj_ids, obj_fields)
- if not ok:
- # Not present in DB
- self.config_object_error(id, None, 'network_policy', 'http_get', result)
- abort(404, result)
-
- # check visibility
- if (not result['id_perms'].get('user_visible', True) and
- not self.is_admin_request()):
- result = 'This object is not visible by users: %s' % id
- self.config_object_error(id, None, 'network_policy', 'http_get', result)
- abort(404, result)
-
- rsp_body = {}
- rsp_body['uuid'] = id
- rsp_body['href'] = self.generate_url('network-policy', id)
- rsp_body['name'] = result['fq_name'][-1]
- rsp_body.update(result)
- id_perms = result['id_perms']
- response.set_header('ETag', '"' + id_perms['last_modified'] + '"')
- try:
- self._extension_mgrs['resourceApi'].map_method('post_network_policy_read', id, rsp_body)
- except Exception as e:
- pass
-
- return {'network-policy': rsp_body}
- #end network_policy_http_get
-
- def network_policy_http_put(self, id):
- key = 'network-policy'
- obj_dict = request.json[key]
-
- try:
- self._extension_mgrs['resourceApi'].map_method('pre_network_policy_update', id, obj_dict)
- except Exception as e:
- pass
-
- db_conn = self._db_conn
- try:
- obj_type = db_conn.uuid_to_obj_type(id)
- if obj_type != 'network_policy':
- abort(404, 'No network-policy object found for id %s' %(id))
- fq_name = db_conn.uuid_to_fq_name(id)
- except NoIdError as e:
- abort(404, str(e))
- prop_dict = obj_dict.get('network_policy_entries')
- if prop_dict:
- buf = cStringIO.StringIO()
- xx_network_policy_entries = PolicyEntriesType(**prop_dict)
- xx_network_policy_entries.export(buf)
- node = etree.fromstring(buf.getvalue())
- xx_network_policy_entries = PolicyEntriesType()
- try:
- xx_network_policy_entries.build(node)
- except Exception as e:
- abort(400, str(e))
- prop_dict = obj_dict.get('id_perms')
- if prop_dict:
- buf = cStringIO.StringIO()
- xx_id_perms = IdPermsType(**prop_dict)
- xx_id_perms.export(buf)
- node = etree.fromstring(buf.getvalue())
- xx_id_perms = IdPermsType()
- try:
- xx_id_perms.build(node)
- except Exception as e:
- abort(400, str(e))
- # common handling for all resource put
- (ok, result) = self._put_common(request, 'network_policy', id, fq_name, obj_dict)
- if not ok:
- (code, msg) = result
- self.config_object_error(id, None, 'network_policy', 'http_put', msg)
- abort(code, msg)
-
-
- # State modification starts from here. Ensure that cleanup is done for all state changes
- cleanup_on_failure = []
- # type-specific hook
- r_class = self.get_resource_class('network-policy')
- if r_class:
- (ok, put_result) = r_class.http_put(id, fq_name, obj_dict, self._db_conn)
- if not ok:
- (code, msg) = put_result
- self.config_object_error(id, None, 'network_policy', 'http_put', msg)
- abort(code, msg)
- callable = getattr(r_class, 'http_put_fail', None)
- if callable:
- cleanup_on_failure.append((callable, [id, fq_name, obj_dict, self._db_conn]))
-
- obj_ids = {'uuid': id}
- try:
- (ok, result) = db_conn.dbe_update('network-policy', obj_ids, obj_dict)
- except Exception as e:
- ok = False
- result = str(e)
- if not ok:
- for fail_cleanup_callable, cleanup_args in cleanup_on_failure:
- fail_cleanup_callable(*cleanup_args)
- self.config_object_error(id, None, 'network_policy', 'http_put', result)
- abort(404, result)
-
- rsp_body = {}
- rsp_body['uuid'] = id
- rsp_body['href'] = self.generate_url('network-policy', id)
-
- try:
- self._extension_mgrs['resourceApi'].map_method('post_network_policy_update', id, obj_dict)
- except Exception as e:
- pass
-
- return {'network-policy': rsp_body}
- #end network_policy_http_put
-
- def network_policy_http_delete(self, id):
- db_conn = self._db_conn
- # if obj doesn't exist return early
- try:
- obj_type = db_conn.uuid_to_obj_type(id)
- if obj_type != 'network_policy':
- abort(404, 'No network-policy object found for id %s' %(id))
- _ = db_conn.uuid_to_fq_name(id)
- except NoIdError:
- abort(404, 'ID %s does not exist' %(id))
-
- try:
- self._extension_mgrs['resourceApi'].map_method('pre_network_policy_delete', id)
- except Exception as e:
- pass
-
- # read in obj from db (accepting error) to get details of it
- obj_ids = {'uuid': id}
- back_references = [u'project_back_refs', u'virtual_network_back_refs']
- children = []
- obj_fields = children + back_references
- (read_ok, read_result) = db_conn.dbe_read('network-policy', obj_ids, obj_fields)
- if not read_ok:
- if read_result.startswith('Unknown id:'):
- abort(404, 'ID %s does not exist' %(id))
- else:
- self.config_object_error(id, None, 'network_policy', 'http_delete', read_result)
- # proceed down to delete the resource
-
- # common handling for all resource delete
- parent_type = read_result.get('parent_type', None)
- (ok, del_result) = self._delete_common(request, 'network_policy', id, parent_type)
- if not ok:
- (code, msg) = del_result
- self.config_object_error(id, None, 'network_policy', 'http_delete', msg)
- abort(code, msg)
-
- fq_name = read_result['fq_name']
- ifmap_id = cfgm_common.imid.get_ifmap_id_from_fq_name('network-policy', fq_name)
- obj_ids['imid'] = ifmap_id
- if parent_type:
- parent_imid = cfgm_common.imid.get_ifmap_id_from_fq_name(parent_type, fq_name[:-1])
- obj_ids['parent_imid'] = parent_imid
-
- # State modification starts from here. Ensure that cleanup is done for all state changes
- cleanup_on_failure = []
-
- # type-specific hook
- r_class = self.get_resource_class('network-policy')
- if r_class:
- if read_ok:
- # fail if non-default children or backrefs exist
- virtual_network_back_refs = read_result.get('virtual_network_back_refs', None)
- if virtual_network_back_refs:
- back_ref_urls = [back_ref_info['href'] for back_ref_info in read_result['virtual_network_back_refs']]
- back_ref_str = ', '.join(back_ref_urls)
- err_msg = 'Back-References from ' + back_ref_str + ' still exist'
- self.config_object_error(id, None, 'network_policy', 'http_delete', err_msg)
- abort(409, err_msg)
-
-
- # Delete default children first
- self._network_policy_delete_default_children(read_result)
-
- (ok, del_result) = r_class.http_delete(id, read_result, db_conn)
- if not ok:
- (code, msg) = del_result
- self.config_object_error(id, None, 'network_policy', 'http_delete', msg)
- abort(code, msg)
- callable = getattr(r_class, 'http_delete_fail', None)
- if callable:
- cleanup_on_failure.append((callable, [id, read_result, db_conn]))
- #end if read_ok
-
- try:
- (ok, del_result) = db_conn.dbe_delete('network-policy', obj_ids, read_result)
- except Exception as e:
- ok = False
- del_result = str(e)
- if not ok:
- for fail_cleanup_callable, cleanup_args in cleanup_on_failure:
- fail_cleanup_callable(*cleanup_args)
- self.config_object_error(id, None, 'network_policy', 'http_delete', del_result)
- abort(409, del_result)
-
- try:
- self._extension_mgrs['resourceApi'].map_method('post_network_policy_delete', id, read_result)
- except Exception as e:
- pass
-
- #end network_policy_http_delete
-
- def network_policys_http_post(self):
- key = 'network-policy'
- obj_dict = request.json[key]
- self._post_validate(key, obj_dict=obj_dict)
- fq_name = obj_dict['fq_name']
-
- try:
- self._extension_mgrs['resourceApi'].map_method('pre_network_policy_create', obj_dict)
- except Exception as e:
- pass
-
- prop_dict = obj_dict.get('network_policy_entries')
- if prop_dict:
- buf = cStringIO.StringIO()
- xx_network_policy_entries = PolicyEntriesType(**prop_dict)
- xx_network_policy_entries.export(buf)
- node = etree.fromstring(buf.getvalue())
- xx_network_policy_entries = PolicyEntriesType()
- try:
- xx_network_policy_entries.build(node)
- except Exception as e:
- abort(400, str(e))
- prop_dict = obj_dict.get('id_perms')
- if prop_dict:
- buf = cStringIO.StringIO()
- xx_id_perms = IdPermsType(**prop_dict)
- xx_id_perms.export(buf)
- node = etree.fromstring(buf.getvalue())
- xx_id_perms = IdPermsType()
- try:
- xx_id_perms.build(node)
- except Exception as e:
- abort(400, str(e))
- # common handling for all resource create
- (ok, result) = self._post_common(request, 'network-policy', obj_dict)
- if not ok:
- (code, msg) = result
- fq_name_str = ':'.join(obj_dict.get('fq_name', []))
- self.config_object_error(None, fq_name_str, 'network_policy', 'http_post', msg)
- abort(code, msg)
-
- name = obj_dict['fq_name'][-1]
- fq_name = obj_dict['fq_name']
-
- db_conn = self._db_conn
-
- # if client gave parent_type of config-root, ignore and remove
- if 'parent_type' in obj_dict and obj_dict['parent_type'] == 'config-root':
- del obj_dict['parent_type']
-
- if 'parent_type' in obj_dict:
- # non config-root child, verify parent exists
- parent_type = obj_dict['parent_type']
- parent_fq_name = obj_dict['fq_name'][:-1]
- try:
- parent_uuid = self._db_conn.fq_name_to_uuid(parent_type, parent_fq_name)
- (ok, status) = self._permissions.check_perms_write(request, parent_uuid)
- if not ok:
- (code, err_msg) = status
- abort(code, err_msg)
- self._permissions.set_user_role(request, obj_dict)
- except NoIdError:
- err_msg = 'Parent ' + pformat(parent_fq_name) + ' type ' + parent_type + ' does not exist'
- fq_name_str = ':'.join(parent_fq_name)
- self.config_object_error(None, fq_name_str, 'network_policy', 'http_post', err_msg)
- abort(400, err_msg)
-
-
- # State modification starts from here. Ensure that cleanup is done for all state changes
- cleanup_on_failure = []
- # Alloc and Store id-mappings before creating entry on pubsub store.
- # Else a subscriber can ask for an id mapping before we have stored it
- uuid_requested = result
- (ok, result) = db_conn.dbe_alloc('network-policy', obj_dict, uuid_requested)
- if not ok:
- (code, msg) = result
- fq_name_str = ':'.join(obj_dict['fq_name'])
- self.config_object_error(None, fq_name_str, 'network_policy', 'http_post', result)
- abort(code, msg)
- cleanup_on_failure.append((db_conn.dbe_release, ['network_policy', fq_name]))
-
- obj_ids = result
-
- env = request.headers.environ
- tenant_name = env.get(hdr_server_tenant(), 'default-project')
-
-
- # type-specific hook
- r_class = self.get_resource_class('network-policy')
- if r_class:
- try:
- (ok, result) = r_class.http_post_collection(tenant_name, obj_dict, db_conn)
- except Exception as e:
- ok = False
- result = (500, str(e))
- if not ok:
- for fail_cleanup_callable, cleanup_args in cleanup_on_failure:
- fail_cleanup_callable(*cleanup_args)
- (code, msg) = result
- fq_name_str = ':'.join(fq_name)
- self.config_object_error(None, fq_name_str, 'network_policy', 'http_post', msg)
- abort(code, msg)
- callable = getattr(r_class, 'http_post_collection_fail', None)
- if callable:
- cleanup_on_failure.append((callable, [tenant_name, obj_dict, db_conn]))
-
- try:
- (ok, result) = \
- db_conn.dbe_create('network-policy', obj_ids, obj_dict)
- except Exception as e:
- ok = False
- result = str(e)
-
- if not ok:
- for fail_cleanup_callable, cleanup_args in cleanup_on_failure:
- fail_cleanup_callable(*cleanup_args)
- fq_name_str = ':'.join(fq_name)
- self.config_object_error(None, fq_name_str, 'network_policy', 'http_post', result)
- abort(404, result)
-
- rsp_body = {}
- rsp_body['name'] = name
- rsp_body['fq_name'] = fq_name
- rsp_body['uuid'] = obj_ids['uuid']
- rsp_body['href'] = self.generate_url('network-policy', obj_ids['uuid'])
- if 'parent_type' in obj_dict:
- # non config-root child, send back parent uuid/href
- rsp_body['parent_uuid'] = parent_uuid
- rsp_body['parent_href'] = self.generate_url(parent_type, parent_uuid)
-
- try:
- self._extension_mgrs['resourceApi'].map_method('post_network_policy_create', obj_dict)
- except Exception as e:
- pass
-
- return {'network-policy': rsp_body}
- #end network_policys_http_post
-
- def network_policys_http_get(self):
- # gather list of uuids using 1. any specified anchors
- # 2. any specified filters
- # if not 'detail' return list with any specified 'fields'
- # if 'detail' return list with props+refs + any specified 'fields'
-
- env = request.headers.environ
- tenant_name = env.get(hdr_server_tenant(), 'default-project')
- parent_uuids = None
- back_ref_uuids = None
- obj_uuids = None
- if (('parent_fq_name_str' in request.query) and
- ('parent_type' in request.query)):
- parent_fq_name = request.query.parent_fq_name_str.split(':')
- parent_type = request.query.parent_type
- parent_uuids = [self._db_conn.fq_name_to_uuid(parent_type, parent_fq_name)]
- elif 'parent_id' in request.query:
- parent_ids = request.query.parent_id.split(',')
- parent_uuids = [str(uuid.UUID(p_uuid)) for p_uuid in parent_ids]
- if 'back_ref_id' in request.query:
- back_ref_ids = request.query.back_ref_id.split(',')
- back_ref_uuids = [str(uuid.UUID(b_uuid)) for b_uuid in back_ref_ids]
- if 'obj_uuids' in request.query:
- obj_uuids = request.query.obj_uuids.split(',')
-
- # common handling for all resource get
- (ok, result) = self._get_common(request, parent_uuids)
- if not ok:
- (code, msg) = result
- self.config_object_error(None, None, 'network_policys', 'http_get_collection', msg)
- abort(code, msg)
-
- if 'count' in request.query:
- count = 'true' in request.query.count.lower()
- else:
- count = False
-
- filter_params = request.query.filters
- if filter_params:
- try:
- ff_key_vals = filter_params.split(',')
- ff_names = [ff.split('==')[0] for ff in ff_key_vals]
- ff_values = [ff.split('==')[1] for ff in ff_key_vals]
- filters = {'field_names': ff_names, 'field_values': ff_values}
- except Exception as e:
- abort(400, 'Invalid filter ' + filter_params)
- else:
- filters = None
- db_conn = self._db_conn
- (ok, result) = \
- db_conn.dbe_list('network-policy', parent_uuids, back_ref_uuids, obj_uuids, count, filters)
- if not ok:
- self.config_object_error(None, None, 'network_policys', 'http_get_collection', result)
- abort(404, result)
-
- # If only counting, return early
- if count:
- return {'network-policys': {'count': result}}
-
- if 'detail' in request.query:
- detail = 'true' in request.query.detail.lower()
- else:
- detail = False
-
- fq_names_uuids = result
- obj_dicts = []
- if not detail:
- if not self.is_admin_request():
- obj_ids_list = [{'uuid': obj_uuid} for _, obj_uuid in fq_names_uuids]
- obj_fields = [u'id_perms']
- (ok, result) = db_conn.dbe_read_multi('network-policy', obj_ids_list, obj_fields)
- if not ok:
- result = []
- for obj_result in result:
- # give chance for zk heartbeat/ping
- gevent.sleep(0)
- if obj_result['id_perms'].get('user_visible', True):
- obj_dict = {}
- obj_dict['uuid'] = obj_result['uuid']
- obj_dict['href'] = self.generate_url('network-policy', obj_result['uuid'])
- obj_dict['fq_name'] = obj_result['fq_name']
- obj_dicts.append(obj_dict)
- else:
- for fq_name, obj_uuid in fq_names_uuids:
- # give chance for zk heartbeat/ping
- gevent.sleep(0)
- obj_dict = {}
- obj_dict['uuid'] = obj_uuid
- obj_dict['href'] = self.generate_url('network-policy', obj_uuid)
- obj_dict['fq_name'] = fq_name
- obj_dicts.append(obj_dict)
- else: #detail
- obj_ids_list = [{'uuid': obj_uuid} for _, obj_uuid in fq_names_uuids]
- obj_fields = [u'network_policy_entries', u'id_perms', u'display_name'] + []
- if 'fields' in request.query:
- obj_fields.extend(request.query.fields.split(','))
- (ok, result) = db_conn.dbe_read_multi('network-policy', obj_ids_list, obj_fields)
-
- if not ok:
- result = []
-
- for obj_result in result:
- # give chance for zk heartbeat/ping
- gevent.sleep(0)
- obj_dict = {}
- obj_dict['name'] = obj_result['fq_name'][-1]
- obj_dict['href'] = self.generate_url('network-policy', obj_result['uuid'])
- obj_dict.update(obj_result)
- if (obj_dict['id_perms'].get('user_visible', True) or
- self.is_admin_request()):
- obj_dicts.append({'network-policy': obj_dict})
-
- return {'network-policys': obj_dicts}
- #end network_policys_http_get
-
- def _network_policy_create_default_children(self, parent_obj):
- pass
- #end _network_policy_create_default_children
-
- def _network_policy_delete_default_children(self, parent_dict):
- pass
- #end _network_policy_delete_default_children
-
- def loadbalancer_pool_http_get(self, id):
- try:
- self._extension_mgrs['resourceApi'].map_method('pre_loadbalancer_pool_read', id)
- except Exception as e:
- pass
-
- # TODO get vals from request out of the global ASAP
- etag = request.headers.get('If-None-Match')
- try:
- obj_type = self._db_conn.uuid_to_obj_type(id)
- except NoIdError:
- obj_type = None
- if obj_type != 'loadbalancer_pool':
- abort(404, 'No loadbalancer-pool object found for id %s' %(id))
- # common handling for all resource get
- (ok, result) = self._get_common(request, id)
- if not ok:
- (code, msg) = result
- self.config_object_error(id, None, 'loadbalancer_pool', 'http_get', msg)
- abort(code, msg)
-
- # type-specific hook
- r_class = self.get_resource_class('loadbalancer-pool')
- if r_class:
- r_class.http_get(id)
-
- db_conn = self._db_conn
- if etag:
- obj_ids = {'uuid': id}
- (ok, result) = db_conn.dbe_is_latest(obj_ids, etag.replace('"', ''))
- if not ok:
- # Not present in DB
- self.config_object_error(id, None, 'loadbalancer_pool', 'http_get', result)
- abort(404, result)
-
- is_latest = result
- if is_latest:
- # send Not-Modified, caches use this for read optimization
- response.status = 304
- return
- #end if etag
-
- obj_ids = {'uuid': id}
-
- # Generate field list for db layer
- properties = [u'loadbalancer_pool_properties', u'loadbalancer_pool_provider', u'id_perms', u'display_name']
- references = [u'service_instance_refs', 'virtual_machine_interface_refs', u'service_appliance_set_refs', u'loadbalancer_healthmonitor_refs']
- back_references = [u'project_back_refs', u'virtual_ip_back_refs']
- children = [u'loadbalancer_members']
- if 'fields' in request.query:
- obj_fields = request.query.fields.split(',')
- else: # default props + children + refs + backrefs
- obj_fields = properties + references
- if 'exclude_back_refs' not in request.query:
- obj_fields = obj_fields + back_references
- if 'exclude_children' not in request.query:
- obj_fields = obj_fields + children
-
- (ok, result) = db_conn.dbe_read('loadbalancer-pool', obj_ids, obj_fields)
- if not ok:
- # Not present in DB
- self.config_object_error(id, None, 'loadbalancer_pool', 'http_get', result)
- abort(404, result)
-
- # check visibility
- if (not result['id_perms'].get('user_visible', True) and
- not self.is_admin_request()):
- result = 'This object is not visible by users: %s' % id
- self.config_object_error(id, None, 'loadbalancer_pool', 'http_get', result)
- abort(404, result)
-
- rsp_body = {}
- rsp_body['uuid'] = id
- rsp_body['href'] = self.generate_url('loadbalancer-pool', id)
- rsp_body['name'] = result['fq_name'][-1]
- rsp_body.update(result)
- id_perms = result['id_perms']
- response.set_header('ETag', '"' + id_perms['last_modified'] + '"')
- try:
- self._extension_mgrs['resourceApi'].map_method('post_loadbalancer_pool_read', id, rsp_body)
- except Exception as e:
- pass
-
- return {'loadbalancer-pool': rsp_body}
- #end loadbalancer_pool_http_get
-
- def loadbalancer_pool_http_put(self, id):
- key = 'loadbalancer-pool'
- obj_dict = request.json[key]
-
- try:
- self._extension_mgrs['resourceApi'].map_method('pre_loadbalancer_pool_update', id, obj_dict)
- except Exception as e:
- pass
-
- db_conn = self._db_conn
- try:
- obj_type = db_conn.uuid_to_obj_type(id)
- if obj_type != 'loadbalancer_pool':
- abort(404, 'No loadbalancer-pool object found for id %s' %(id))
- fq_name = db_conn.uuid_to_fq_name(id)
- except NoIdError as e:
- abort(404, str(e))
- prop_dict = obj_dict.get('loadbalancer_pool_properties')
- if prop_dict:
- buf = cStringIO.StringIO()
- xx_loadbalancer_pool_properties = LoadbalancerPoolType(**prop_dict)
- xx_loadbalancer_pool_properties.export(buf)
- node = etree.fromstring(buf.getvalue())
- xx_loadbalancer_pool_properties = LoadbalancerPoolType()
- try:
- xx_loadbalancer_pool_properties.build(node)
- except Exception as e:
- abort(400, str(e))
- prop_dict = obj_dict.get('id_perms')
- if prop_dict:
- buf = cStringIO.StringIO()
- xx_id_perms = IdPermsType(**prop_dict)
- xx_id_perms.export(buf)
- node = etree.fromstring(buf.getvalue())
- xx_id_perms = IdPermsType()
- try:
- xx_id_perms.build(node)
- except Exception as e:
- abort(400, str(e))
- # common handling for all resource put
- (ok, result) = self._put_common(request, 'loadbalancer_pool', id, fq_name, obj_dict)
- if not ok:
- (code, msg) = result
- self.config_object_error(id, None, 'loadbalancer_pool', 'http_put', msg)
- abort(code, msg)
-
- # Validate perms
- objtype_list = [u'service_instance', 'virtual_machine_interface', u'service_appliance_set', u'loadbalancer_member', u'loadbalancer_healthmonitor']
- for obj_type in objtype_list:
- refs = obj_dict.get('%s_refs'%(obj_type), None)
- if refs:
- for ref in refs:
- try:
- ref_uuid = db_conn.fq_name_to_uuid(obj_type, ref['to'])
- except NoIdError as e:
- abort(404, str(e))
- (ok, status) = self._permissions.check_perms_link(request, ref_uuid)
- if not ok:
- (code, err_msg) = status
- abort(code, err_msg)
-
- # State modification starts from here. Ensure that cleanup is done for all state changes
- cleanup_on_failure = []
- # type-specific hook
- r_class = self.get_resource_class('loadbalancer-pool')
- if r_class:
- (ok, put_result) = r_class.http_put(id, fq_name, obj_dict, self._db_conn)
- if not ok:
- (code, msg) = put_result
- self.config_object_error(id, None, 'loadbalancer_pool', 'http_put', msg)
- abort(code, msg)
- callable = getattr(r_class, 'http_put_fail', None)
- if callable:
- cleanup_on_failure.append((callable, [id, fq_name, obj_dict, self._db_conn]))
-
- obj_ids = {'uuid': id}
- try:
- (ok, result) = db_conn.dbe_update('loadbalancer-pool', obj_ids, obj_dict)
- except Exception as e:
- ok = False
- result = str(e)
- if not ok:
- for fail_cleanup_callable, cleanup_args in cleanup_on_failure:
- fail_cleanup_callable(*cleanup_args)
- self.config_object_error(id, None, 'loadbalancer_pool', 'http_put', result)
- abort(404, result)
-
- rsp_body = {}
- rsp_body['uuid'] = id
- rsp_body['href'] = self.generate_url('loadbalancer-pool', id)
-
- try:
- self._extension_mgrs['resourceApi'].map_method('post_loadbalancer_pool_update', id, obj_dict)
- except Exception as e:
- pass
-
- return {'loadbalancer-pool': rsp_body}
- #end loadbalancer_pool_http_put
-
- def loadbalancer_pool_http_delete(self, id):
- db_conn = self._db_conn
- # if obj doesn't exist return early
- try:
- obj_type = db_conn.uuid_to_obj_type(id)
- if obj_type != 'loadbalancer_pool':
- abort(404, 'No loadbalancer-pool object found for id %s' %(id))
- _ = db_conn.uuid_to_fq_name(id)
- except NoIdError:
- abort(404, 'ID %s does not exist' %(id))
-
- try:
- self._extension_mgrs['resourceApi'].map_method('pre_loadbalancer_pool_delete', id)
- except Exception as e:
- pass
-
- # read in obj from db (accepting error) to get details of it
- obj_ids = {'uuid': id}
- back_references = [u'project_back_refs', u'virtual_ip_back_refs']
- children = [u'loadbalancer_members']
- obj_fields = children + back_references
- (read_ok, read_result) = db_conn.dbe_read('loadbalancer-pool', obj_ids, obj_fields)
- if not read_ok:
- if read_result.startswith('Unknown id:'):
- abort(404, 'ID %s does not exist' %(id))
- else:
- self.config_object_error(id, None, 'loadbalancer_pool', 'http_delete', read_result)
- # proceed down to delete the resource
-
- # common handling for all resource delete
- parent_type = read_result.get('parent_type', None)
- (ok, del_result) = self._delete_common(request, 'loadbalancer_pool', id, parent_type)
- if not ok:
- (code, msg) = del_result
- self.config_object_error(id, None, 'loadbalancer_pool', 'http_delete', msg)
- abort(code, msg)
-
- fq_name = read_result['fq_name']
- ifmap_id = cfgm_common.imid.get_ifmap_id_from_fq_name('loadbalancer-pool', fq_name)
- obj_ids['imid'] = ifmap_id
- if parent_type:
- parent_imid = cfgm_common.imid.get_ifmap_id_from_fq_name(parent_type, fq_name[:-1])
- obj_ids['parent_imid'] = parent_imid
-
- # State modification starts from here. Ensure that cleanup is done for all state changes
- cleanup_on_failure = []
-
- # type-specific hook
- r_class = self.get_resource_class('loadbalancer-pool')
- if r_class:
- if read_ok:
- # fail if non-default children or backrefs exist
- loadbalancer_members = read_result.get('loadbalancer_members', None)
- if loadbalancer_members:
- has_infos = read_result['loadbalancer_members']
- if ((len(has_infos) > 1) or
- (len(has_infos) == 1 and has_infos[0]['to'][-1] != 'default-loadbalancer-member')):
- has_urls = [has_info['href'] for has_info in has_infos]
- has_str = ', '.join(has_urls)
- err_msg = 'Children ' + has_str + ' still exist'
- self.config_object_error(id, None, 'loadbalancer_pool', 'http_delete', err_msg)
- abort(409, err_msg)
-
- virtual_ip_back_refs = read_result.get('virtual_ip_back_refs', None)
- if virtual_ip_back_refs:
- back_ref_urls = [back_ref_info['href'] for back_ref_info in read_result['virtual_ip_back_refs']]
- back_ref_str = ', '.join(back_ref_urls)
- err_msg = 'Back-References from ' + back_ref_str + ' still exist'
- self.config_object_error(id, None, 'loadbalancer_pool', 'http_delete', err_msg)
- abort(409, err_msg)
-
-
- # Delete default children first
- self._loadbalancer_pool_delete_default_children(read_result)
-
- (ok, del_result) = r_class.http_delete(id, read_result, db_conn)
- if not ok:
- (code, msg) = del_result
- self.config_object_error(id, None, 'loadbalancer_pool', 'http_delete', msg)
- abort(code, msg)
- callable = getattr(r_class, 'http_delete_fail', None)
- if callable:
- cleanup_on_failure.append((callable, [id, read_result, db_conn]))
- #end if read_ok
-
- try:
- (ok, del_result) = db_conn.dbe_delete('loadbalancer-pool', obj_ids, read_result)
- except Exception as e:
- ok = False
- del_result = str(e)
- if not ok:
- for fail_cleanup_callable, cleanup_args in cleanup_on_failure:
- fail_cleanup_callable(*cleanup_args)
- self.config_object_error(id, None, 'loadbalancer_pool', 'http_delete', del_result)
- abort(409, del_result)
-
- try:
- self._extension_mgrs['resourceApi'].map_method('post_loadbalancer_pool_delete', id, read_result)
- except Exception as e:
- pass
-
- #end loadbalancer_pool_http_delete
-
- def loadbalancer_pools_http_post(self):
- key = 'loadbalancer-pool'
- obj_dict = request.json[key]
- self._post_validate(key, obj_dict=obj_dict)
- fq_name = obj_dict['fq_name']
-
- try:
- self._extension_mgrs['resourceApi'].map_method('pre_loadbalancer_pool_create', obj_dict)
- except Exception as e:
- pass
-
- prop_dict = obj_dict.get('loadbalancer_pool_properties')
- if prop_dict:
- buf = cStringIO.StringIO()
- xx_loadbalancer_pool_properties = LoadbalancerPoolType(**prop_dict)
- xx_loadbalancer_pool_properties.export(buf)
- node = etree.fromstring(buf.getvalue())
- xx_loadbalancer_pool_properties = LoadbalancerPoolType()
- try:
- xx_loadbalancer_pool_properties.build(node)
- except Exception as e:
- abort(400, str(e))
- prop_dict = obj_dict.get('id_perms')
- if prop_dict:
- buf = cStringIO.StringIO()
- xx_id_perms = IdPermsType(**prop_dict)
- xx_id_perms.export(buf)
- node = etree.fromstring(buf.getvalue())
- xx_id_perms = IdPermsType()
- try:
- xx_id_perms.build(node)
- except Exception as e:
- abort(400, str(e))
- # common handling for all resource create
- (ok, result) = self._post_common(request, 'loadbalancer-pool', obj_dict)
- if not ok:
- (code, msg) = result
- fq_name_str = ':'.join(obj_dict.get('fq_name', []))
- self.config_object_error(None, fq_name_str, 'loadbalancer_pool', 'http_post', msg)
- abort(code, msg)
-
- name = obj_dict['fq_name'][-1]
- fq_name = obj_dict['fq_name']
-
- db_conn = self._db_conn
-
- # if client gave parent_type of config-root, ignore and remove
- if 'parent_type' in obj_dict and obj_dict['parent_type'] == 'config-root':
- del obj_dict['parent_type']
-
- if 'parent_type' in obj_dict:
- # non config-root child, verify parent exists
- parent_type = obj_dict['parent_type']
- parent_fq_name = obj_dict['fq_name'][:-1]
- try:
- parent_uuid = self._db_conn.fq_name_to_uuid(parent_type, parent_fq_name)
- (ok, status) = self._permissions.check_perms_write(request, parent_uuid)
- if not ok:
- (code, err_msg) = status
- abort(code, err_msg)
- self._permissions.set_user_role(request, obj_dict)
- except NoIdError:
- err_msg = 'Parent ' + pformat(parent_fq_name) + ' type ' + parent_type + ' does not exist'
- fq_name_str = ':'.join(parent_fq_name)
- self.config_object_error(None, fq_name_str, 'loadbalancer_pool', 'http_post', err_msg)
- abort(400, err_msg)
-
- # Validate perms
- objtype_list = [u'service_instance', 'virtual_machine_interface', u'service_appliance_set', u'loadbalancer_member', u'loadbalancer_healthmonitor']
- for obj_type in objtype_list:
- refs = obj_dict.get('%s_refs'%(obj_type), None)
- if refs:
- for ref in refs:
- ref_uuid = db_conn.fq_name_to_uuid(obj_type, ref['to'])
- (ok, status) = self._permissions.check_perms_link(request, ref_uuid)
- if not ok:
- (code, err_msg) = status
- abort(code, err_msg)
-
- # State modification starts from here. Ensure that cleanup is done for all state changes
- cleanup_on_failure = []
- # Alloc and Store id-mappings before creating entry on pubsub store.
- # Else a subscriber can ask for an id mapping before we have stored it
- uuid_requested = result
- (ok, result) = db_conn.dbe_alloc('loadbalancer-pool', obj_dict, uuid_requested)
- if not ok:
- (code, msg) = result
- fq_name_str = ':'.join(obj_dict['fq_name'])
- self.config_object_error(None, fq_name_str, 'loadbalancer_pool', 'http_post', result)
- abort(code, msg)
- cleanup_on_failure.append((db_conn.dbe_release, ['loadbalancer_pool', fq_name]))
-
- obj_ids = result
-
- env = request.headers.environ
- tenant_name = env.get(hdr_server_tenant(), 'default-project')
-
-
- # type-specific hook
- r_class = self.get_resource_class('loadbalancer-pool')
- if r_class:
- try:
- (ok, result) = r_class.http_post_collection(tenant_name, obj_dict, db_conn)
- except Exception as e:
- ok = False
- result = (500, str(e))
- if not ok:
- for fail_cleanup_callable, cleanup_args in cleanup_on_failure:
- fail_cleanup_callable(*cleanup_args)
- (code, msg) = result
- fq_name_str = ':'.join(fq_name)
- self.config_object_error(None, fq_name_str, 'loadbalancer_pool', 'http_post', msg)
- abort(code, msg)
- callable = getattr(r_class, 'http_post_collection_fail', None)
- if callable:
- cleanup_on_failure.append((callable, [tenant_name, obj_dict, db_conn]))
-
- try:
- (ok, result) = \
- db_conn.dbe_create('loadbalancer-pool', obj_ids, obj_dict)
- except Exception as e:
- ok = False
- result = str(e)
-
- if not ok:
- for fail_cleanup_callable, cleanup_args in cleanup_on_failure:
- fail_cleanup_callable(*cleanup_args)
- fq_name_str = ':'.join(fq_name)
- self.config_object_error(None, fq_name_str, 'loadbalancer_pool', 'http_post', result)
- abort(404, result)
-
- rsp_body = {}
- rsp_body['name'] = name
- rsp_body['fq_name'] = fq_name
- rsp_body['uuid'] = obj_ids['uuid']
- rsp_body['href'] = self.generate_url('loadbalancer-pool', obj_ids['uuid'])
- if 'parent_type' in obj_dict:
- # non config-root child, send back parent uuid/href
- rsp_body['parent_uuid'] = parent_uuid
- rsp_body['parent_href'] = self.generate_url(parent_type, parent_uuid)
-
- try:
- self._extension_mgrs['resourceApi'].map_method('post_loadbalancer_pool_create', obj_dict)
- except Exception as e:
- pass
-
- return {'loadbalancer-pool': rsp_body}
- #end loadbalancer_pools_http_post
-
- def loadbalancer_pools_http_get(self):
- # gather list of uuids using 1. any specified anchors
- # 2. any specified filters
- # if not 'detail' return list with any specified 'fields'
- # if 'detail' return list with props+refs + any specified 'fields'
-
- env = request.headers.environ
- tenant_name = env.get(hdr_server_tenant(), 'default-project')
- parent_uuids = None
- back_ref_uuids = None
- obj_uuids = None
- if (('parent_fq_name_str' in request.query) and
- ('parent_type' in request.query)):
- parent_fq_name = request.query.parent_fq_name_str.split(':')
- parent_type = request.query.parent_type
- parent_uuids = [self._db_conn.fq_name_to_uuid(parent_type, parent_fq_name)]
- elif 'parent_id' in request.query:
- parent_ids = request.query.parent_id.split(',')
- parent_uuids = [str(uuid.UUID(p_uuid)) for p_uuid in parent_ids]
- if 'back_ref_id' in request.query:
- back_ref_ids = request.query.back_ref_id.split(',')
- back_ref_uuids = [str(uuid.UUID(b_uuid)) for b_uuid in back_ref_ids]
- if 'obj_uuids' in request.query:
- obj_uuids = request.query.obj_uuids.split(',')
-
- # common handling for all resource get
- (ok, result) = self._get_common(request, parent_uuids)
- if not ok:
- (code, msg) = result
- self.config_object_error(None, None, 'loadbalancer_pools', 'http_get_collection', msg)
- abort(code, msg)
-
- if 'count' in request.query:
- count = 'true' in request.query.count.lower()
- else:
- count = False
-
- filter_params = request.query.filters
- if filter_params:
- try:
- ff_key_vals = filter_params.split(',')
- ff_names = [ff.split('==')[0] for ff in ff_key_vals]
- ff_values = [ff.split('==')[1] for ff in ff_key_vals]
- filters = {'field_names': ff_names, 'field_values': ff_values}
- except Exception as e:
- abort(400, 'Invalid filter ' + filter_params)
- else:
- filters = None
- db_conn = self._db_conn
- (ok, result) = \
- db_conn.dbe_list('loadbalancer-pool', parent_uuids, back_ref_uuids, obj_uuids, count, filters)
- if not ok:
- self.config_object_error(None, None, 'loadbalancer_pools', 'http_get_collection', result)
- abort(404, result)
-
- # If only counting, return early
- if count:
- return {'loadbalancer-pools': {'count': result}}
-
- if 'detail' in request.query:
- detail = 'true' in request.query.detail.lower()
- else:
- detail = False
-
- fq_names_uuids = result
- obj_dicts = []
- if not detail:
- if not self.is_admin_request():
- obj_ids_list = [{'uuid': obj_uuid} for _, obj_uuid in fq_names_uuids]
- obj_fields = [u'id_perms']
- (ok, result) = db_conn.dbe_read_multi('loadbalancer-pool', obj_ids_list, obj_fields)
- if not ok:
- result = []
- for obj_result in result:
- # give chance for zk heartbeat/ping
- gevent.sleep(0)
- if obj_result['id_perms'].get('user_visible', True):
- obj_dict = {}
- obj_dict['uuid'] = obj_result['uuid']
- obj_dict['href'] = self.generate_url('loadbalancer-pool', obj_result['uuid'])
- obj_dict['fq_name'] = obj_result['fq_name']
- obj_dicts.append(obj_dict)
- else:
- for fq_name, obj_uuid in fq_names_uuids:
- # give chance for zk heartbeat/ping
- gevent.sleep(0)
- obj_dict = {}
- obj_dict['uuid'] = obj_uuid
- obj_dict['href'] = self.generate_url('loadbalancer-pool', obj_uuid)
- obj_dict['fq_name'] = fq_name
- obj_dicts.append(obj_dict)
- else: #detail
- obj_ids_list = [{'uuid': obj_uuid} for _, obj_uuid in fq_names_uuids]
- obj_fields = [u'loadbalancer_pool_properties', u'loadbalancer_pool_provider', u'id_perms', u'display_name'] + [u'service_instance_refs', 'virtual_machine_interface_refs', u'service_appliance_set_refs', u'loadbalancer_healthmonitor_refs']
- if 'fields' in request.query:
- obj_fields.extend(request.query.fields.split(','))
- (ok, result) = db_conn.dbe_read_multi('loadbalancer-pool', obj_ids_list, obj_fields)
-
- if not ok:
- result = []
-
- for obj_result in result:
- # give chance for zk heartbeat/ping
- gevent.sleep(0)
- obj_dict = {}
- obj_dict['name'] = obj_result['fq_name'][-1]
- obj_dict['href'] = self.generate_url('loadbalancer-pool', obj_result['uuid'])
- obj_dict.update(obj_result)
- if (obj_dict['id_perms'].get('user_visible', True) or
- self.is_admin_request()):
- obj_dicts.append({'loadbalancer-pool': obj_dict})
-
- return {'loadbalancer-pools': obj_dicts}
- #end loadbalancer_pools_http_get
-
- def _loadbalancer_pool_create_default_children(self, parent_obj):
- # Create a default child only if provisioned for
- r_class = self.get_resource_class('loadbalancer-member')
- if r_class and r_class.generate_default_instance:
- child_obj = LoadbalancerMember(parent_obj = parent_obj)
- child_dict = child_obj.__dict__
- fq_name = child_dict['fq_name']
- child_dict['id_perms'] = self._get_default_id_perms('loadbalancer-member')
-
- db_conn = self._db_conn
- (ok, result) = db_conn.dbe_alloc('loadbalancer-member', child_dict)
- if not ok:
- return (ok, result)
-
- obj_ids = result
- db_conn.dbe_create('loadbalancer-member', obj_ids, child_dict)
- self._loadbalancer_member_create_default_children(child_obj)
-
- pass
- #end _loadbalancer_pool_create_default_children
-
- def _loadbalancer_pool_delete_default_children(self, parent_dict):
- # Delete a default child only if provisioned for
- r_class = self.get_resource_class('loadbalancer-member')
- if r_class and r_class.generate_default_instance:
- # first locate default child then delete it
- has_infos = parent_dict.get('loadbalancer_members')
- if has_infos:
- for has_info in has_infos:
- if has_info['to'][-1] == 'default-loadbalancer-member':
- default_child_id = has_info['href'].split('/')[-1]
- self.loadbalancer_member_http_delete(default_child_id)
- break
-
- pass
- #end _loadbalancer_pool_delete_default_children
-
- def virtual_DNS_record_http_get(self, id):
- try:
- self._extension_mgrs['resourceApi'].map_method('pre_virtual_DNS_record_read', id)
- except Exception as e:
- pass
-
- # TODO get vals from request out of the global ASAP
- etag = request.headers.get('If-None-Match')
- try:
- obj_type = self._db_conn.uuid_to_obj_type(id)
- except NoIdError:
- obj_type = None
- if obj_type != 'virtual_DNS_record':
- abort(404, 'No virtual-DNS-record object found for id %s' %(id))
- # common handling for all resource get
- (ok, result) = self._get_common(request, id)
- if not ok:
- (code, msg) = result
- self.config_object_error(id, None, 'virtual_DNS_record', 'http_get', msg)
- abort(code, msg)
-
- # type-specific hook
- r_class = self.get_resource_class('virtual-DNS-record')
- if r_class:
- r_class.http_get(id)
-
- db_conn = self._db_conn
- if etag:
- obj_ids = {'uuid': id}
- (ok, result) = db_conn.dbe_is_latest(obj_ids, etag.replace('"', ''))
- if not ok:
- # Not present in DB
- self.config_object_error(id, None, 'virtual_DNS_record', 'http_get', result)
- abort(404, result)
-
- is_latest = result
- if is_latest:
- # send Not-Modified, caches use this for read optimization
- response.status = 304
- return
- #end if etag
-
- obj_ids = {'uuid': id}
-
- # Generate field list for db layer
- properties = [u'virtual_DNS_record_data', u'id_perms', u'display_name']
- references = []
- back_references = [u'virtual_DNS_back_refs']
- children = []
- if 'fields' in request.query:
- obj_fields = request.query.fields.split(',')
- else: # default props + children + refs + backrefs
- obj_fields = properties + references
- if 'exclude_back_refs' not in request.query:
- obj_fields = obj_fields + back_references
- if 'exclude_children' not in request.query:
- obj_fields = obj_fields + children
-
- (ok, result) = db_conn.dbe_read('virtual-DNS-record', obj_ids, obj_fields)
- if not ok:
- # Not present in DB
- self.config_object_error(id, None, 'virtual_DNS_record', 'http_get', result)
- abort(404, result)
-
- # check visibility
- if (not result['id_perms'].get('user_visible', True) and
- not self.is_admin_request()):
- result = 'This object is not visible by users: %s' % id
- self.config_object_error(id, None, 'virtual_DNS_record', 'http_get', result)
- abort(404, result)
-
- rsp_body = {}
- rsp_body['uuid'] = id
- rsp_body['href'] = self.generate_url('virtual-DNS-record', id)
- rsp_body['name'] = result['fq_name'][-1]
- rsp_body.update(result)
- id_perms = result['id_perms']
- response.set_header('ETag', '"' + id_perms['last_modified'] + '"')
- try:
- self._extension_mgrs['resourceApi'].map_method('post_virtual_DNS_record_read', id, rsp_body)
- except Exception as e:
- pass
-
- return {'virtual-DNS-record': rsp_body}
- #end virtual_DNS_record_http_get
-
- def virtual_DNS_record_http_put(self, id):
- key = 'virtual-DNS-record'
- obj_dict = request.json[key]
-
- try:
- self._extension_mgrs['resourceApi'].map_method('pre_virtual_DNS_record_update', id, obj_dict)
- except Exception as e:
- pass
-
- db_conn = self._db_conn
- try:
- obj_type = db_conn.uuid_to_obj_type(id)
- if obj_type != 'virtual_DNS_record':
- abort(404, 'No virtual-DNS-record object found for id %s' %(id))
- fq_name = db_conn.uuid_to_fq_name(id)
- except NoIdError as e:
- abort(404, str(e))
- prop_dict = obj_dict.get('virtual_DNS_record_data')
- if prop_dict:
- buf = cStringIO.StringIO()
- xx_virtual_DNS_record_data = VirtualDnsRecordType(**prop_dict)
- xx_virtual_DNS_record_data.export(buf)
- node = etree.fromstring(buf.getvalue())
- xx_virtual_DNS_record_data = VirtualDnsRecordType()
- try:
- xx_virtual_DNS_record_data.build(node)
- except Exception as e:
- abort(400, str(e))
- prop_dict = obj_dict.get('id_perms')
- if prop_dict:
- buf = cStringIO.StringIO()
- xx_id_perms = IdPermsType(**prop_dict)
- xx_id_perms.export(buf)
- node = etree.fromstring(buf.getvalue())
- xx_id_perms = IdPermsType()
- try:
- xx_id_perms.build(node)
- except Exception as e:
- abort(400, str(e))
- # common handling for all resource put
- (ok, result) = self._put_common(request, 'virtual_DNS_record', id, fq_name, obj_dict)
- if not ok:
- (code, msg) = result
- self.config_object_error(id, None, 'virtual_DNS_record', 'http_put', msg)
- abort(code, msg)
-
-
- # State modification starts from here. Ensure that cleanup is done for all state changes
- cleanup_on_failure = []
- # type-specific hook
- r_class = self.get_resource_class('virtual-DNS-record')
- if r_class:
- (ok, put_result) = r_class.http_put(id, fq_name, obj_dict, self._db_conn)
- if not ok:
- (code, msg) = put_result
- self.config_object_error(id, None, 'virtual_DNS_record', 'http_put', msg)
- abort(code, msg)
- callable = getattr(r_class, 'http_put_fail', None)
- if callable:
- cleanup_on_failure.append((callable, [id, fq_name, obj_dict, self._db_conn]))
-
- obj_ids = {'uuid': id}
- try:
- (ok, result) = db_conn.dbe_update('virtual-DNS-record', obj_ids, obj_dict)
- except Exception as e:
- ok = False
- result = str(e)
- if not ok:
- for fail_cleanup_callable, cleanup_args in cleanup_on_failure:
- fail_cleanup_callable(*cleanup_args)
- self.config_object_error(id, None, 'virtual_DNS_record', 'http_put', result)
- abort(404, result)
-
- rsp_body = {}
- rsp_body['uuid'] = id
- rsp_body['href'] = self.generate_url('virtual-DNS-record', id)
-
- try:
- self._extension_mgrs['resourceApi'].map_method('post_virtual_DNS_record_update', id, obj_dict)
- except Exception as e:
- pass
-
- return {'virtual-DNS-record': rsp_body}
- #end virtual_DNS_record_http_put
-
- def virtual_DNS_record_http_delete(self, id):
- db_conn = self._db_conn
- # if obj doesn't exist return early
- try:
- obj_type = db_conn.uuid_to_obj_type(id)
- if obj_type != 'virtual_DNS_record':
- abort(404, 'No virtual-DNS-record object found for id %s' %(id))
- _ = db_conn.uuid_to_fq_name(id)
- except NoIdError:
- abort(404, 'ID %s does not exist' %(id))
-
- try:
- self._extension_mgrs['resourceApi'].map_method('pre_virtual_DNS_record_delete', id)
- except Exception as e:
- pass
-
- # read in obj from db (accepting error) to get details of it
- obj_ids = {'uuid': id}
- back_references = [u'virtual_DNS_back_refs']
- children = []
- obj_fields = children + back_references
- (read_ok, read_result) = db_conn.dbe_read('virtual-DNS-record', obj_ids, obj_fields)
- if not read_ok:
- if read_result.startswith('Unknown id:'):
- abort(404, 'ID %s does not exist' %(id))
- else:
- self.config_object_error(id, None, 'virtual_DNS_record', 'http_delete', read_result)
- # proceed down to delete the resource
-
- # common handling for all resource delete
- parent_type = read_result.get('parent_type', None)
- (ok, del_result) = self._delete_common(request, 'virtual_DNS_record', id, parent_type)
- if not ok:
- (code, msg) = del_result
- self.config_object_error(id, None, 'virtual_DNS_record', 'http_delete', msg)
- abort(code, msg)
-
- fq_name = read_result['fq_name']
- ifmap_id = cfgm_common.imid.get_ifmap_id_from_fq_name('virtual-DNS-record', fq_name)
- obj_ids['imid'] = ifmap_id
- if parent_type:
- parent_imid = cfgm_common.imid.get_ifmap_id_from_fq_name(parent_type, fq_name[:-1])
- obj_ids['parent_imid'] = parent_imid
-
- # State modification starts from here. Ensure that cleanup is done for all state changes
- cleanup_on_failure = []
-
- # type-specific hook
- r_class = self.get_resource_class('virtual-DNS-record')
- if r_class:
- if read_ok:
- # fail if non-default children or backrefs exist
-
- # Delete default children first
- self._virtual_DNS_record_delete_default_children(read_result)
-
- (ok, del_result) = r_class.http_delete(id, read_result, db_conn)
- if not ok:
- (code, msg) = del_result
- self.config_object_error(id, None, 'virtual_DNS_record', 'http_delete', msg)
- abort(code, msg)
- callable = getattr(r_class, 'http_delete_fail', None)
- if callable:
- cleanup_on_failure.append((callable, [id, read_result, db_conn]))
- #end if read_ok
-
- try:
- (ok, del_result) = db_conn.dbe_delete('virtual-DNS-record', obj_ids, read_result)
- except Exception as e:
- ok = False
- del_result = str(e)
- if not ok:
- for fail_cleanup_callable, cleanup_args in cleanup_on_failure:
- fail_cleanup_callable(*cleanup_args)
- self.config_object_error(id, None, 'virtual_DNS_record', 'http_delete', del_result)
- abort(409, del_result)
-
- try:
- self._extension_mgrs['resourceApi'].map_method('post_virtual_DNS_record_delete', id, read_result)
- except Exception as e:
- pass
-
- #end virtual_DNS_record_http_delete
-
- def virtual_DNS_records_http_post(self):
- key = 'virtual-DNS-record'
- obj_dict = request.json[key]
- self._post_validate(key, obj_dict=obj_dict)
- fq_name = obj_dict['fq_name']
-
- try:
- self._extension_mgrs['resourceApi'].map_method('pre_virtual_DNS_record_create', obj_dict)
- except Exception as e:
- pass
-
- prop_dict = obj_dict.get('virtual_DNS_record_data')
- if prop_dict:
- buf = cStringIO.StringIO()
- xx_virtual_DNS_record_data = VirtualDnsRecordType(**prop_dict)
- xx_virtual_DNS_record_data.export(buf)
- node = etree.fromstring(buf.getvalue())
- xx_virtual_DNS_record_data = VirtualDnsRecordType()
- try:
- xx_virtual_DNS_record_data.build(node)
- except Exception as e:
- abort(400, str(e))
- prop_dict = obj_dict.get('id_perms')
- if prop_dict:
- buf = cStringIO.StringIO()
- xx_id_perms = IdPermsType(**prop_dict)
- xx_id_perms.export(buf)
- node = etree.fromstring(buf.getvalue())
- xx_id_perms = IdPermsType()
- try:
- xx_id_perms.build(node)
- except Exception as e:
- abort(400, str(e))
- # common handling for all resource create
- (ok, result) = self._post_common(request, 'virtual-DNS-record', obj_dict)
- if not ok:
- (code, msg) = result
- fq_name_str = ':'.join(obj_dict.get('fq_name', []))
- self.config_object_error(None, fq_name_str, 'virtual_DNS_record', 'http_post', msg)
- abort(code, msg)
-
- name = obj_dict['fq_name'][-1]
- fq_name = obj_dict['fq_name']
-
- db_conn = self._db_conn
-
- # if client gave parent_type of config-root, ignore and remove
- if 'parent_type' in obj_dict and obj_dict['parent_type'] == 'config-root':
- del obj_dict['parent_type']
-
- if 'parent_type' in obj_dict:
- # non config-root child, verify parent exists
- parent_type = obj_dict['parent_type']
- parent_fq_name = obj_dict['fq_name'][:-1]
- try:
- parent_uuid = self._db_conn.fq_name_to_uuid(parent_type, parent_fq_name)
- (ok, status) = self._permissions.check_perms_write(request, parent_uuid)
- if not ok:
- (code, err_msg) = status
- abort(code, err_msg)
- self._permissions.set_user_role(request, obj_dict)
- except NoIdError:
- err_msg = 'Parent ' + pformat(parent_fq_name) + ' type ' + parent_type + ' does not exist'
- fq_name_str = ':'.join(parent_fq_name)
- self.config_object_error(None, fq_name_str, 'virtual_DNS_record', 'http_post', err_msg)
- abort(400, err_msg)
-
-
- # State modification starts from here. Ensure that cleanup is done for all state changes
- cleanup_on_failure = []
- # Alloc and Store id-mappings before creating entry on pubsub store.
- # Else a subscriber can ask for an id mapping before we have stored it
- uuid_requested = result
- (ok, result) = db_conn.dbe_alloc('virtual-DNS-record', obj_dict, uuid_requested)
- if not ok:
- (code, msg) = result
- fq_name_str = ':'.join(obj_dict['fq_name'])
- self.config_object_error(None, fq_name_str, 'virtual_DNS_record', 'http_post', result)
- abort(code, msg)
- cleanup_on_failure.append((db_conn.dbe_release, ['virtual_DNS_record', fq_name]))
-
- obj_ids = result
-
- env = request.headers.environ
- tenant_name = env.get(hdr_server_tenant(), 'default-project')
-
-
- # type-specific hook
- r_class = self.get_resource_class('virtual-DNS-record')
- if r_class:
- try:
- (ok, result) = r_class.http_post_collection(tenant_name, obj_dict, db_conn)
- except Exception as e:
- ok = False
- result = (500, str(e))
- if not ok:
- for fail_cleanup_callable, cleanup_args in cleanup_on_failure:
- fail_cleanup_callable(*cleanup_args)
- (code, msg) = result
- fq_name_str = ':'.join(fq_name)
- self.config_object_error(None, fq_name_str, 'virtual_DNS_record', 'http_post', msg)
- abort(code, msg)
- callable = getattr(r_class, 'http_post_collection_fail', None)
- if callable:
- cleanup_on_failure.append((callable, [tenant_name, obj_dict, db_conn]))
-
- try:
- (ok, result) = \
- db_conn.dbe_create('virtual-DNS-record', obj_ids, obj_dict)
- except Exception as e:
- ok = False
- result = str(e)
-
- if not ok:
- for fail_cleanup_callable, cleanup_args in cleanup_on_failure:
- fail_cleanup_callable(*cleanup_args)
- fq_name_str = ':'.join(fq_name)
- self.config_object_error(None, fq_name_str, 'virtual_DNS_record', 'http_post', result)
- abort(404, result)
-
- rsp_body = {}
- rsp_body['name'] = name
- rsp_body['fq_name'] = fq_name
- rsp_body['uuid'] = obj_ids['uuid']
- rsp_body['href'] = self.generate_url('virtual-DNS-record', obj_ids['uuid'])
- if 'parent_type' in obj_dict:
- # non config-root child, send back parent uuid/href
- rsp_body['parent_uuid'] = parent_uuid
- rsp_body['parent_href'] = self.generate_url(parent_type, parent_uuid)
-
- try:
- self._extension_mgrs['resourceApi'].map_method('post_virtual_DNS_record_create', obj_dict)
- except Exception as e:
- pass
-
- return {'virtual-DNS-record': rsp_body}
- #end virtual_DNS_records_http_post
-
- def virtual_DNS_records_http_get(self):
- # gather list of uuids using 1. any specified anchors
- # 2. any specified filters
- # if not 'detail' return list with any specified 'fields'
- # if 'detail' return list with props+refs + any specified 'fields'
-
- env = request.headers.environ
- tenant_name = env.get(hdr_server_tenant(), 'default-project')
- parent_uuids = None
- back_ref_uuids = None
- obj_uuids = None
- if (('parent_fq_name_str' in request.query) and
- ('parent_type' in request.query)):
- parent_fq_name = request.query.parent_fq_name_str.split(':')
- parent_type = request.query.parent_type
- parent_uuids = [self._db_conn.fq_name_to_uuid(parent_type, parent_fq_name)]
- elif 'parent_id' in request.query:
- parent_ids = request.query.parent_id.split(',')
- parent_uuids = [str(uuid.UUID(p_uuid)) for p_uuid in parent_ids]
- if 'back_ref_id' in request.query:
- back_ref_ids = request.query.back_ref_id.split(',')
- back_ref_uuids = [str(uuid.UUID(b_uuid)) for b_uuid in back_ref_ids]
- if 'obj_uuids' in request.query:
- obj_uuids = request.query.obj_uuids.split(',')
-
- # common handling for all resource get
- (ok, result) = self._get_common(request, parent_uuids)
- if not ok:
- (code, msg) = result
- self.config_object_error(None, None, 'virtual_DNS_records', 'http_get_collection', msg)
- abort(code, msg)
-
- if 'count' in request.query:
- count = 'true' in request.query.count.lower()
- else:
- count = False
-
- filter_params = request.query.filters
- if filter_params:
- try:
- ff_key_vals = filter_params.split(',')
- ff_names = [ff.split('==')[0] for ff in ff_key_vals]
- ff_values = [ff.split('==')[1] for ff in ff_key_vals]
- filters = {'field_names': ff_names, 'field_values': ff_values}
- except Exception as e:
- abort(400, 'Invalid filter ' + filter_params)
- else:
- filters = None
- db_conn = self._db_conn
- (ok, result) = \
- db_conn.dbe_list('virtual-DNS-record', parent_uuids, back_ref_uuids, obj_uuids, count, filters)
- if not ok:
- self.config_object_error(None, None, 'virtual_DNS_records', 'http_get_collection', result)
- abort(404, result)
-
- # If only counting, return early
- if count:
- return {'virtual-DNS-records': {'count': result}}
-
- if 'detail' in request.query:
- detail = 'true' in request.query.detail.lower()
- else:
- detail = False
-
- fq_names_uuids = result
- obj_dicts = []
- if not detail:
- if not self.is_admin_request():
- obj_ids_list = [{'uuid': obj_uuid} for _, obj_uuid in fq_names_uuids]
- obj_fields = [u'id_perms']
- (ok, result) = db_conn.dbe_read_multi('virtual-DNS-record', obj_ids_list, obj_fields)
- if not ok:
- result = []
- for obj_result in result:
- # give chance for zk heartbeat/ping
- gevent.sleep(0)
- if obj_result['id_perms'].get('user_visible', True):
- obj_dict = {}
- obj_dict['uuid'] = obj_result['uuid']
- obj_dict['href'] = self.generate_url('virtual-DNS-record', obj_result['uuid'])
- obj_dict['fq_name'] = obj_result['fq_name']
- obj_dicts.append(obj_dict)
- else:
- for fq_name, obj_uuid in fq_names_uuids:
- # give chance for zk heartbeat/ping
- gevent.sleep(0)
- obj_dict = {}
- obj_dict['uuid'] = obj_uuid
- obj_dict['href'] = self.generate_url('virtual-DNS-record', obj_uuid)
- obj_dict['fq_name'] = fq_name
- obj_dicts.append(obj_dict)
- else: #detail
- obj_ids_list = [{'uuid': obj_uuid} for _, obj_uuid in fq_names_uuids]
- obj_fields = [u'virtual_DNS_record_data', u'id_perms', u'display_name'] + []
- if 'fields' in request.query:
- obj_fields.extend(request.query.fields.split(','))
- (ok, result) = db_conn.dbe_read_multi('virtual-DNS-record', obj_ids_list, obj_fields)
-
- if not ok:
- result = []
-
- for obj_result in result:
- # give chance for zk heartbeat/ping
- gevent.sleep(0)
- obj_dict = {}
- obj_dict['name'] = obj_result['fq_name'][-1]
- obj_dict['href'] = self.generate_url('virtual-DNS-record', obj_result['uuid'])
- obj_dict.update(obj_result)
- if (obj_dict['id_perms'].get('user_visible', True) or
- self.is_admin_request()):
- obj_dicts.append({'virtual-DNS-record': obj_dict})
-
- return {'virtual-DNS-records': obj_dicts}
- #end virtual_DNS_records_http_get
-
- def _virtual_DNS_record_create_default_children(self, parent_obj):
- pass
- #end _virtual_DNS_record_create_default_children
-
- def _virtual_DNS_record_delete_default_children(self, parent_dict):
- pass
- #end _virtual_DNS_record_delete_default_children
-
- def route_target_http_get(self, id):
- try:
- self._extension_mgrs['resourceApi'].map_method('pre_route_target_read', id)
- except Exception as e:
- pass
-
- # TODO get vals from request out of the global ASAP
- etag = request.headers.get('If-None-Match')
- try:
- obj_type = self._db_conn.uuid_to_obj_type(id)
- except NoIdError:
- obj_type = None
- if obj_type != 'route_target':
- abort(404, 'No route-target object found for id %s' %(id))
- # common handling for all resource get
- (ok, result) = self._get_common(request, id)
- if not ok:
- (code, msg) = result
- self.config_object_error(id, None, 'route_target', 'http_get', msg)
- abort(code, msg)
-
- # type-specific hook
- r_class = self.get_resource_class('route-target')
- if r_class:
- r_class.http_get(id)
-
- db_conn = self._db_conn
- if etag:
- obj_ids = {'uuid': id}
- (ok, result) = db_conn.dbe_is_latest(obj_ids, etag.replace('"', ''))
- if not ok:
- # Not present in DB
- self.config_object_error(id, None, 'route_target', 'http_get', result)
- abort(404, result)
-
- is_latest = result
- if is_latest:
- # send Not-Modified, caches use this for read optimization
- response.status = 304
- return
- #end if etag
-
- obj_ids = {'uuid': id}
-
- # Generate field list for db layer
- properties = [u'id_perms', u'display_name']
- references = []
- back_references = [u'logical_router_back_refs', 'routing_instance_back_refs']
- children = []
- if 'fields' in request.query:
- obj_fields = request.query.fields.split(',')
- else: # default props + children + refs + backrefs
- obj_fields = properties + references
- if 'exclude_back_refs' not in request.query:
- obj_fields = obj_fields + back_references
- if 'exclude_children' not in request.query:
- obj_fields = obj_fields + children
-
- (ok, result) = db_conn.dbe_read('route-target', obj_ids, obj_fields)
- if not ok:
- # Not present in DB
- self.config_object_error(id, None, 'route_target', 'http_get', result)
- abort(404, result)
-
- # check visibility
- if (not result['id_perms'].get('user_visible', True) and
- not self.is_admin_request()):
- result = 'This object is not visible by users: %s' % id
- self.config_object_error(id, None, 'route_target', 'http_get', result)
- abort(404, result)
-
- rsp_body = {}
- rsp_body['uuid'] = id
- rsp_body['href'] = self.generate_url('route-target', id)
- rsp_body['name'] = result['fq_name'][-1]
- rsp_body.update(result)
- id_perms = result['id_perms']
- response.set_header('ETag', '"' + id_perms['last_modified'] + '"')
- try:
- self._extension_mgrs['resourceApi'].map_method('post_route_target_read', id, rsp_body)
- except Exception as e:
- pass
-
- return {'route-target': rsp_body}
- #end route_target_http_get
-
- def route_target_http_put(self, id):
- key = 'route-target'
- obj_dict = request.json[key]
-
- try:
- self._extension_mgrs['resourceApi'].map_method('pre_route_target_update', id, obj_dict)
- except Exception as e:
- pass
-
- db_conn = self._db_conn
- try:
- obj_type = db_conn.uuid_to_obj_type(id)
- if obj_type != 'route_target':
- abort(404, 'No route-target object found for id %s' %(id))
- fq_name = db_conn.uuid_to_fq_name(id)
- except NoIdError as e:
- abort(404, str(e))
- prop_dict = obj_dict.get('id_perms')
- if prop_dict:
- buf = cStringIO.StringIO()
- xx_id_perms = IdPermsType(**prop_dict)
- xx_id_perms.export(buf)
- node = etree.fromstring(buf.getvalue())
- xx_id_perms = IdPermsType()
- try:
- xx_id_perms.build(node)
- except Exception as e:
- abort(400, str(e))
- # common handling for all resource put
- (ok, result) = self._put_common(request, 'route_target', id, fq_name, obj_dict)
- if not ok:
- (code, msg) = result
- self.config_object_error(id, None, 'route_target', 'http_put', msg)
- abort(code, msg)
-
-
- # State modification starts from here. Ensure that cleanup is done for all state changes
- cleanup_on_failure = []
- # type-specific hook
- r_class = self.get_resource_class('route-target')
- if r_class:
- (ok, put_result) = r_class.http_put(id, fq_name, obj_dict, self._db_conn)
- if not ok:
- (code, msg) = put_result
- self.config_object_error(id, None, 'route_target', 'http_put', msg)
- abort(code, msg)
- callable = getattr(r_class, 'http_put_fail', None)
- if callable:
- cleanup_on_failure.append((callable, [id, fq_name, obj_dict, self._db_conn]))
-
- obj_ids = {'uuid': id}
- try:
- (ok, result) = db_conn.dbe_update('route-target', obj_ids, obj_dict)
- except Exception as e:
- ok = False
- result = str(e)
- if not ok:
- for fail_cleanup_callable, cleanup_args in cleanup_on_failure:
- fail_cleanup_callable(*cleanup_args)
- self.config_object_error(id, None, 'route_target', 'http_put', result)
- abort(404, result)
-
- rsp_body = {}
- rsp_body['uuid'] = id
- rsp_body['href'] = self.generate_url('route-target', id)
-
- try:
- self._extension_mgrs['resourceApi'].map_method('post_route_target_update', id, obj_dict)
- except Exception as e:
- pass
-
- return {'route-target': rsp_body}
- #end route_target_http_put
-
- def route_target_http_delete(self, id):
- db_conn = self._db_conn
- # if obj doesn't exist return early
- try:
- obj_type = db_conn.uuid_to_obj_type(id)
- if obj_type != 'route_target':
- abort(404, 'No route-target object found for id %s' %(id))
- _ = db_conn.uuid_to_fq_name(id)
- except NoIdError:
- abort(404, 'ID %s does not exist' %(id))
-
- try:
- self._extension_mgrs['resourceApi'].map_method('pre_route_target_delete', id)
- except Exception as e:
- pass
-
- # read in obj from db (accepting error) to get details of it
- obj_ids = {'uuid': id}
- back_references = [u'logical_router_back_refs', 'routing_instance_back_refs']
- children = []
- obj_fields = children + back_references
- (read_ok, read_result) = db_conn.dbe_read('route-target', obj_ids, obj_fields)
- if not read_ok:
- if read_result.startswith('Unknown id:'):
- abort(404, 'ID %s does not exist' %(id))
- else:
- self.config_object_error(id, None, 'route_target', 'http_delete', read_result)
- # proceed down to delete the resource
-
- # common handling for all resource delete
- parent_type = read_result.get('parent_type', None)
- (ok, del_result) = self._delete_common(request, 'route_target', id, parent_type)
- if not ok:
- (code, msg) = del_result
- self.config_object_error(id, None, 'route_target', 'http_delete', msg)
- abort(code, msg)
-
- fq_name = read_result['fq_name']
- ifmap_id = cfgm_common.imid.get_ifmap_id_from_fq_name('route-target', fq_name)
- obj_ids['imid'] = ifmap_id
- if parent_type:
- parent_imid = cfgm_common.imid.get_ifmap_id_from_fq_name(parent_type, fq_name[:-1])
- obj_ids['parent_imid'] = parent_imid
-
- # State modification starts from here. Ensure that cleanup is done for all state changes
- cleanup_on_failure = []
-
- # type-specific hook
- r_class = self.get_resource_class('route-target')
- if r_class:
- if read_ok:
- # fail if non-default children or backrefs exist
- logical_router_back_refs = read_result.get('logical_router_back_refs', None)
- if logical_router_back_refs:
- back_ref_urls = [back_ref_info['href'] for back_ref_info in read_result['logical_router_back_refs']]
- back_ref_str = ', '.join(back_ref_urls)
- err_msg = 'Back-References from ' + back_ref_str + ' still exist'
- self.config_object_error(id, None, 'route_target', 'http_delete', err_msg)
- abort(409, err_msg)
-
- routing_instance_back_refs = read_result.get('routing_instance_back_refs', None)
- if routing_instance_back_refs:
- back_ref_urls = [back_ref_info['href'] for back_ref_info in read_result['routing_instance_back_refs']]
- back_ref_str = ', '.join(back_ref_urls)
- err_msg = 'Back-References from ' + back_ref_str + ' still exist'
- self.config_object_error(id, None, 'route_target', 'http_delete', err_msg)
- abort(409, err_msg)
-
-
- # Delete default children first
- self._route_target_delete_default_children(read_result)
-
- (ok, del_result) = r_class.http_delete(id, read_result, db_conn)
- if not ok:
- (code, msg) = del_result
- self.config_object_error(id, None, 'route_target', 'http_delete', msg)
- abort(code, msg)
- callable = getattr(r_class, 'http_delete_fail', None)
- if callable:
- cleanup_on_failure.append((callable, [id, read_result, db_conn]))
- #end if read_ok
-
- try:
- (ok, del_result) = db_conn.dbe_delete('route-target', obj_ids, read_result)
- except Exception as e:
- ok = False
- del_result = str(e)
- if not ok:
- for fail_cleanup_callable, cleanup_args in cleanup_on_failure:
- fail_cleanup_callable(*cleanup_args)
- self.config_object_error(id, None, 'route_target', 'http_delete', del_result)
- abort(409, del_result)
-
- try:
- self._extension_mgrs['resourceApi'].map_method('post_route_target_delete', id, read_result)
- except Exception as e:
- pass
-
- #end route_target_http_delete
-
- def route_targets_http_post(self):
- key = 'route-target'
- obj_dict = request.json[key]
- self._post_validate(key, obj_dict=obj_dict)
- fq_name = obj_dict['fq_name']
-
- try:
- self._extension_mgrs['resourceApi'].map_method('pre_route_target_create', obj_dict)
- except Exception as e:
- pass
-
- prop_dict = obj_dict.get('id_perms')
- if prop_dict:
- buf = cStringIO.StringIO()
- xx_id_perms = IdPermsType(**prop_dict)
- xx_id_perms.export(buf)
- node = etree.fromstring(buf.getvalue())
- xx_id_perms = IdPermsType()
- try:
- xx_id_perms.build(node)
- except Exception as e:
- abort(400, str(e))
- # common handling for all resource create
- (ok, result) = self._post_common(request, 'route-target', obj_dict)
- if not ok:
- (code, msg) = result
- fq_name_str = ':'.join(obj_dict.get('fq_name', []))
- self.config_object_error(None, fq_name_str, 'route_target', 'http_post', msg)
- abort(code, msg)
-
- name = obj_dict['fq_name'][-1]
- fq_name = obj_dict['fq_name']
-
- db_conn = self._db_conn
-
-
- # State modification starts from here. Ensure that cleanup is done for all state changes
- cleanup_on_failure = []
- # Alloc and Store id-mappings before creating entry on pubsub store.
- # Else a subscriber can ask for an id mapping before we have stored it
- uuid_requested = result
- (ok, result) = db_conn.dbe_alloc('route-target', obj_dict, uuid_requested)
- if not ok:
- (code, msg) = result
- fq_name_str = ':'.join(obj_dict['fq_name'])
- self.config_object_error(None, fq_name_str, 'route_target', 'http_post', result)
- abort(code, msg)
- cleanup_on_failure.append((db_conn.dbe_release, ['route_target', fq_name]))
-
- obj_ids = result
-
- env = request.headers.environ
- tenant_name = env.get(hdr_server_tenant(), 'default-project')
-
-
- # type-specific hook
- r_class = self.get_resource_class('route-target')
- if r_class:
- try:
- (ok, result) = r_class.http_post_collection(tenant_name, obj_dict, db_conn)
- except Exception as e:
- ok = False
- result = (500, str(e))
- if not ok:
- for fail_cleanup_callable, cleanup_args in cleanup_on_failure:
- fail_cleanup_callable(*cleanup_args)
- (code, msg) = result
- fq_name_str = ':'.join(fq_name)
- self.config_object_error(None, fq_name_str, 'route_target', 'http_post', msg)
- abort(code, msg)
- callable = getattr(r_class, 'http_post_collection_fail', None)
- if callable:
- cleanup_on_failure.append((callable, [tenant_name, obj_dict, db_conn]))
-
- try:
- (ok, result) = \
- db_conn.dbe_create('route-target', obj_ids, obj_dict)
- except Exception as e:
- ok = False
- result = str(e)
-
- if not ok:
- for fail_cleanup_callable, cleanup_args in cleanup_on_failure:
- fail_cleanup_callable(*cleanup_args)
- fq_name_str = ':'.join(fq_name)
- self.config_object_error(None, fq_name_str, 'route_target', 'http_post', result)
- abort(404, result)
-
- rsp_body = {}
- rsp_body['name'] = name
- rsp_body['fq_name'] = fq_name
- rsp_body['uuid'] = obj_ids['uuid']
- rsp_body['href'] = self.generate_url('route-target', obj_ids['uuid'])
-
- try:
- self._extension_mgrs['resourceApi'].map_method('post_route_target_create', obj_dict)
- except Exception as e:
- pass
-
- return {'route-target': rsp_body}
- #end route_targets_http_post
-
- def route_targets_http_get(self):
- # gather list of uuids using 1. any specified anchors
- # 2. any specified filters
- # if not 'detail' return list with any specified 'fields'
- # if 'detail' return list with props+refs + any specified 'fields'
-
- env = request.headers.environ
- tenant_name = env.get(hdr_server_tenant(), 'default-project')
- parent_uuids = None
- back_ref_uuids = None
- obj_uuids = None
- if (('parent_fq_name_str' in request.query) and
- ('parent_type' in request.query)):
- parent_fq_name = request.query.parent_fq_name_str.split(':')
- parent_type = request.query.parent_type
- parent_uuids = [self._db_conn.fq_name_to_uuid(parent_type, parent_fq_name)]
- elif 'parent_id' in request.query:
- parent_ids = request.query.parent_id.split(',')
- parent_uuids = [str(uuid.UUID(p_uuid)) for p_uuid in parent_ids]
- if 'back_ref_id' in request.query:
- back_ref_ids = request.query.back_ref_id.split(',')
- back_ref_uuids = [str(uuid.UUID(b_uuid)) for b_uuid in back_ref_ids]
- if 'obj_uuids' in request.query:
- obj_uuids = request.query.obj_uuids.split(',')
-
- # common handling for all resource get
- (ok, result) = self._get_common(request, parent_uuids)
- if not ok:
- (code, msg) = result
- self.config_object_error(None, None, 'route_targets', 'http_get_collection', msg)
- abort(code, msg)
-
- if 'count' in request.query:
- count = 'true' in request.query.count.lower()
- else:
- count = False
-
- filter_params = request.query.filters
- if filter_params:
- try:
- ff_key_vals = filter_params.split(',')
- ff_names = [ff.split('==')[0] for ff in ff_key_vals]
- ff_values = [ff.split('==')[1] for ff in ff_key_vals]
- filters = {'field_names': ff_names, 'field_values': ff_values}
- except Exception as e:
- abort(400, 'Invalid filter ' + filter_params)
- else:
- filters = None
- db_conn = self._db_conn
- (ok, result) = \
- db_conn.dbe_list('route-target', parent_uuids, back_ref_uuids, obj_uuids, count, filters)
- if not ok:
- self.config_object_error(None, None, 'route_targets', 'http_get_collection', result)
- abort(404, result)
-
- # If only counting, return early
- if count:
- return {'route-targets': {'count': result}}
-
- if 'detail' in request.query:
- detail = 'true' in request.query.detail.lower()
- else:
- detail = False
-
- fq_names_uuids = result
- obj_dicts = []
- if not detail:
- if not self.is_admin_request():
- obj_ids_list = [{'uuid': obj_uuid} for _, obj_uuid in fq_names_uuids]
- obj_fields = [u'id_perms']
- (ok, result) = db_conn.dbe_read_multi('route-target', obj_ids_list, obj_fields)
- if not ok:
- result = []
- for obj_result in result:
- # give chance for zk heartbeat/ping
- gevent.sleep(0)
- if obj_result['id_perms'].get('user_visible', True):
- obj_dict = {}
- obj_dict['uuid'] = obj_result['uuid']
- obj_dict['href'] = self.generate_url('route-target', obj_result['uuid'])
- obj_dict['fq_name'] = obj_result['fq_name']
- obj_dicts.append(obj_dict)
- else:
- for fq_name, obj_uuid in fq_names_uuids:
- # give chance for zk heartbeat/ping
- gevent.sleep(0)
- obj_dict = {}
- obj_dict['uuid'] = obj_uuid
- obj_dict['href'] = self.generate_url('route-target', obj_uuid)
- obj_dict['fq_name'] = fq_name
- obj_dicts.append(obj_dict)
- else: #detail
- obj_ids_list = [{'uuid': obj_uuid} for _, obj_uuid in fq_names_uuids]
- obj_fields = [u'id_perms', u'display_name'] + []
- if 'fields' in request.query:
- obj_fields.extend(request.query.fields.split(','))
- (ok, result) = db_conn.dbe_read_multi('route-target', obj_ids_list, obj_fields)
-
- if not ok:
- result = []
-
- for obj_result in result:
- # give chance for zk heartbeat/ping
- gevent.sleep(0)
- obj_dict = {}
- obj_dict['name'] = obj_result['fq_name'][-1]
- obj_dict['href'] = self.generate_url('route-target', obj_result['uuid'])
- obj_dict.update(obj_result)
- if (obj_dict['id_perms'].get('user_visible', True) or
- self.is_admin_request()):
- obj_dicts.append({'route-target': obj_dict})
-
- return {'route-targets': obj_dicts}
- #end route_targets_http_get
-
- def _route_target_create_default_children(self, parent_obj):
- pass
- #end _route_target_create_default_children
-
- def _route_target_delete_default_children(self, parent_dict):
- pass
- #end _route_target_delete_default_children
-
- def floating_ip_http_get(self, id):
- try:
- self._extension_mgrs['resourceApi'].map_method('pre_floating_ip_read', id)
- except Exception as e:
- pass
-
- # TODO get vals from request out of the global ASAP
- etag = request.headers.get('If-None-Match')
- try:
- obj_type = self._db_conn.uuid_to_obj_type(id)
- except NoIdError:
- obj_type = None
- if obj_type != 'floating_ip':
- abort(404, 'No floating-ip object found for id %s' %(id))
- # common handling for all resource get
- (ok, result) = self._get_common(request, id)
- if not ok:
- (code, msg) = result
- self.config_object_error(id, None, 'floating_ip', 'http_get', msg)
- abort(code, msg)
-
- # type-specific hook
- r_class = self.get_resource_class('floating-ip')
- if r_class:
- r_class.http_get(id)
-
- db_conn = self._db_conn
- if etag:
- obj_ids = {'uuid': id}
- (ok, result) = db_conn.dbe_is_latest(obj_ids, etag.replace('"', ''))
- if not ok:
- # Not present in DB
- self.config_object_error(id, None, 'floating_ip', 'http_get', result)
- abort(404, result)
-
- is_latest = result
- if is_latest:
- # send Not-Modified, caches use this for read optimization
- response.status = 304
- return
- #end if etag
-
- obj_ids = {'uuid': id}
-
- # Generate field list for db layer
- properties = [u'floating_ip_address', u'floating_ip_is_virtual_ip', u'floating_ip_fixed_ip_address', u'floating_ip_address_family', u'id_perms', u'display_name']
- references = [u'project_refs', 'virtual_machine_interface_refs']
- back_references = [u'floating_ip_pool_back_refs', 'customer_attachment_back_refs']
- children = []
- if 'fields' in request.query:
- obj_fields = request.query.fields.split(',')
- else: # default props + children + refs + backrefs
- obj_fields = properties + references
- if 'exclude_back_refs' not in request.query:
- obj_fields = obj_fields + back_references
- if 'exclude_children' not in request.query:
- obj_fields = obj_fields + children
-
- (ok, result) = db_conn.dbe_read('floating-ip', obj_ids, obj_fields)
- if not ok:
- # Not present in DB
- self.config_object_error(id, None, 'floating_ip', 'http_get', result)
- abort(404, result)
-
- # check visibility
- if (not result['id_perms'].get('user_visible', True) and
- not self.is_admin_request()):
- result = 'This object is not visible by users: %s' % id
- self.config_object_error(id, None, 'floating_ip', 'http_get', result)
- abort(404, result)
-
- rsp_body = {}
- rsp_body['uuid'] = id
- rsp_body['href'] = self.generate_url('floating-ip', id)
- rsp_body['name'] = result['fq_name'][-1]
- rsp_body.update(result)
- id_perms = result['id_perms']
- response.set_header('ETag', '"' + id_perms['last_modified'] + '"')
- try:
- self._extension_mgrs['resourceApi'].map_method('post_floating_ip_read', id, rsp_body)
- except Exception as e:
- pass
-
- return {'floating-ip': rsp_body}
- #end floating_ip_http_get
-
- def floating_ip_http_put(self, id):
- key = 'floating-ip'
- obj_dict = request.json[key]
-
- try:
- self._extension_mgrs['resourceApi'].map_method('pre_floating_ip_update', id, obj_dict)
- except Exception as e:
- pass
-
- db_conn = self._db_conn
- try:
- obj_type = db_conn.uuid_to_obj_type(id)
- if obj_type != 'floating_ip':
- abort(404, 'No floating-ip object found for id %s' %(id))
- fq_name = db_conn.uuid_to_fq_name(id)
- except NoIdError as e:
- abort(404, str(e))
- prop_dict = obj_dict.get('id_perms')
- if prop_dict:
- buf = cStringIO.StringIO()
- xx_id_perms = IdPermsType(**prop_dict)
- xx_id_perms.export(buf)
- node = etree.fromstring(buf.getvalue())
- xx_id_perms = IdPermsType()
- try:
- xx_id_perms.build(node)
- except Exception as e:
- abort(400, str(e))
- # common handling for all resource put
- (ok, result) = self._put_common(request, 'floating_ip', id, fq_name, obj_dict)
- if not ok:
- (code, msg) = result
- self.config_object_error(id, None, 'floating_ip', 'http_put', msg)
- abort(code, msg)
-
- # Validate perms
- objtype_list = [u'project', 'virtual_machine_interface']
- for obj_type in objtype_list:
- refs = obj_dict.get('%s_refs'%(obj_type), None)
- if refs:
- for ref in refs:
- try:
- ref_uuid = db_conn.fq_name_to_uuid(obj_type, ref['to'])
- except NoIdError as e:
- abort(404, str(e))
- (ok, status) = self._permissions.check_perms_link(request, ref_uuid)
- if not ok:
- (code, err_msg) = status
- abort(code, err_msg)
-
- # State modification starts from here. Ensure that cleanup is done for all state changes
- cleanup_on_failure = []
- # type-specific hook
- r_class = self.get_resource_class('floating-ip')
- if r_class:
- (ok, put_result) = r_class.http_put(id, fq_name, obj_dict, self._db_conn)
- if not ok:
- (code, msg) = put_result
- self.config_object_error(id, None, 'floating_ip', 'http_put', msg)
- abort(code, msg)
- callable = getattr(r_class, 'http_put_fail', None)
- if callable:
- cleanup_on_failure.append((callable, [id, fq_name, obj_dict, self._db_conn]))
-
- obj_ids = {'uuid': id}
- try:
- (ok, result) = db_conn.dbe_update('floating-ip', obj_ids, obj_dict)
- except Exception as e:
- ok = False
- result = str(e)
- if not ok:
- for fail_cleanup_callable, cleanup_args in cleanup_on_failure:
- fail_cleanup_callable(*cleanup_args)
- self.config_object_error(id, None, 'floating_ip', 'http_put', result)
- abort(404, result)
-
- rsp_body = {}
- rsp_body['uuid'] = id
- rsp_body['href'] = self.generate_url('floating-ip', id)
-
- try:
- self._extension_mgrs['resourceApi'].map_method('post_floating_ip_update', id, obj_dict)
- except Exception as e:
- pass
-
- return {'floating-ip': rsp_body}
- #end floating_ip_http_put
-
- def floating_ip_http_delete(self, id):
- db_conn = self._db_conn
- # if obj doesn't exist return early
- try:
- obj_type = db_conn.uuid_to_obj_type(id)
- if obj_type != 'floating_ip':
- abort(404, 'No floating-ip object found for id %s' %(id))
- _ = db_conn.uuid_to_fq_name(id)
- except NoIdError:
- abort(404, 'ID %s does not exist' %(id))
-
- try:
- self._extension_mgrs['resourceApi'].map_method('pre_floating_ip_delete', id)
- except Exception as e:
- pass
-
- # read in obj from db (accepting error) to get details of it
- obj_ids = {'uuid': id}
- back_references = [u'floating_ip_pool_back_refs', 'customer_attachment_back_refs']
- children = []
- obj_fields = children + back_references
- (read_ok, read_result) = db_conn.dbe_read('floating-ip', obj_ids, obj_fields)
- if not read_ok:
- if read_result.startswith('Unknown id:'):
- abort(404, 'ID %s does not exist' %(id))
- else:
- self.config_object_error(id, None, 'floating_ip', 'http_delete', read_result)
- # proceed down to delete the resource
-
- # common handling for all resource delete
- parent_type = read_result.get('parent_type', None)
- (ok, del_result) = self._delete_common(request, 'floating_ip', id, parent_type)
- if not ok:
- (code, msg) = del_result
- self.config_object_error(id, None, 'floating_ip', 'http_delete', msg)
- abort(code, msg)
-
- fq_name = read_result['fq_name']
- ifmap_id = cfgm_common.imid.get_ifmap_id_from_fq_name('floating-ip', fq_name)
- obj_ids['imid'] = ifmap_id
- if parent_type:
- parent_imid = cfgm_common.imid.get_ifmap_id_from_fq_name(parent_type, fq_name[:-1])
- obj_ids['parent_imid'] = parent_imid
-
- # State modification starts from here. Ensure that cleanup is done for all state changes
- cleanup_on_failure = []
-
- # type-specific hook
- r_class = self.get_resource_class('floating-ip')
- if r_class:
- if read_ok:
- # fail if non-default children or backrefs exist
- customer_attachment_back_refs = read_result.get('customer_attachment_back_refs', None)
- if customer_attachment_back_refs:
- back_ref_urls = [back_ref_info['href'] for back_ref_info in read_result['customer_attachment_back_refs']]
- back_ref_str = ', '.join(back_ref_urls)
- err_msg = 'Back-References from ' + back_ref_str + ' still exist'
- self.config_object_error(id, None, 'floating_ip', 'http_delete', err_msg)
- abort(409, err_msg)
-
-
- # Delete default children first
- self._floating_ip_delete_default_children(read_result)
-
- (ok, del_result) = r_class.http_delete(id, read_result, db_conn)
- if not ok:
- (code, msg) = del_result
- self.config_object_error(id, None, 'floating_ip', 'http_delete', msg)
- abort(code, msg)
- callable = getattr(r_class, 'http_delete_fail', None)
- if callable:
- cleanup_on_failure.append((callable, [id, read_result, db_conn]))
- #end if read_ok
-
- try:
- (ok, del_result) = db_conn.dbe_delete('floating-ip', obj_ids, read_result)
- except Exception as e:
- ok = False
- del_result = str(e)
- if not ok:
- for fail_cleanup_callable, cleanup_args in cleanup_on_failure:
- fail_cleanup_callable(*cleanup_args)
- self.config_object_error(id, None, 'floating_ip', 'http_delete', del_result)
- abort(409, del_result)
-
- try:
- self._extension_mgrs['resourceApi'].map_method('post_floating_ip_delete', id, read_result)
- except Exception as e:
- pass
-
- #end floating_ip_http_delete
-
- def floating_ips_http_post(self):
- key = 'floating-ip'
- obj_dict = request.json[key]
- self._post_validate(key, obj_dict=obj_dict)
- fq_name = obj_dict['fq_name']
-
- try:
- self._extension_mgrs['resourceApi'].map_method('pre_floating_ip_create', obj_dict)
- except Exception as e:
- pass
-
- prop_dict = obj_dict.get('id_perms')
- if prop_dict:
- buf = cStringIO.StringIO()
- xx_id_perms = IdPermsType(**prop_dict)
- xx_id_perms.export(buf)
- node = etree.fromstring(buf.getvalue())
- xx_id_perms = IdPermsType()
- try:
- xx_id_perms.build(node)
- except Exception as e:
- abort(400, str(e))
- # common handling for all resource create
- (ok, result) = self._post_common(request, 'floating-ip', obj_dict)
- if not ok:
- (code, msg) = result
- fq_name_str = ':'.join(obj_dict.get('fq_name', []))
- self.config_object_error(None, fq_name_str, 'floating_ip', 'http_post', msg)
- abort(code, msg)
-
- name = obj_dict['fq_name'][-1]
- fq_name = obj_dict['fq_name']
-
- db_conn = self._db_conn
-
- # if client gave parent_type of config-root, ignore and remove
- if 'parent_type' in obj_dict and obj_dict['parent_type'] == 'config-root':
- del obj_dict['parent_type']
-
- if 'parent_type' in obj_dict:
- # non config-root child, verify parent exists
- parent_type = obj_dict['parent_type']
- parent_fq_name = obj_dict['fq_name'][:-1]
- try:
- parent_uuid = self._db_conn.fq_name_to_uuid(parent_type, parent_fq_name)
- (ok, status) = self._permissions.check_perms_write(request, parent_uuid)
- if not ok:
- (code, err_msg) = status
- abort(code, err_msg)
- self._permissions.set_user_role(request, obj_dict)
- except NoIdError:
- err_msg = 'Parent ' + pformat(parent_fq_name) + ' type ' + parent_type + ' does not exist'
- fq_name_str = ':'.join(parent_fq_name)
- self.config_object_error(None, fq_name_str, 'floating_ip', 'http_post', err_msg)
- abort(400, err_msg)
-
- # Validate perms
- objtype_list = [u'project', 'virtual_machine_interface']
- for obj_type in objtype_list:
- refs = obj_dict.get('%s_refs'%(obj_type), None)
- if refs:
- for ref in refs:
- ref_uuid = db_conn.fq_name_to_uuid(obj_type, ref['to'])
- (ok, status) = self._permissions.check_perms_link(request, ref_uuid)
- if not ok:
- (code, err_msg) = status
- abort(code, err_msg)
-
- # State modification starts from here. Ensure that cleanup is done for all state changes
- cleanup_on_failure = []
- # Alloc and Store id-mappings before creating entry on pubsub store.
- # Else a subscriber can ask for an id mapping before we have stored it
- uuid_requested = result
- (ok, result) = db_conn.dbe_alloc('floating-ip', obj_dict, uuid_requested)
- if not ok:
- (code, msg) = result
- fq_name_str = ':'.join(obj_dict['fq_name'])
- self.config_object_error(None, fq_name_str, 'floating_ip', 'http_post', result)
- abort(code, msg)
- cleanup_on_failure.append((db_conn.dbe_release, ['floating_ip', fq_name]))
-
- obj_ids = result
-
- env = request.headers.environ
- tenant_name = env.get(hdr_server_tenant(), 'default-project')
-
-
- # type-specific hook
- r_class = self.get_resource_class('floating-ip')
- if r_class:
- try:
- (ok, result) = r_class.http_post_collection(tenant_name, obj_dict, db_conn)
- except Exception as e:
- ok = False
- result = (500, str(e))
- if not ok:
- for fail_cleanup_callable, cleanup_args in cleanup_on_failure:
- fail_cleanup_callable(*cleanup_args)
- (code, msg) = result
- fq_name_str = ':'.join(fq_name)
- self.config_object_error(None, fq_name_str, 'floating_ip', 'http_post', msg)
- abort(code, msg)
- callable = getattr(r_class, 'http_post_collection_fail', None)
- if callable:
- cleanup_on_failure.append((callable, [tenant_name, obj_dict, db_conn]))
-
- try:
- (ok, result) = \
- db_conn.dbe_create('floating-ip', obj_ids, obj_dict)
- except Exception as e:
- ok = False
- result = str(e)
-
- if not ok:
- for fail_cleanup_callable, cleanup_args in cleanup_on_failure:
- fail_cleanup_callable(*cleanup_args)
- fq_name_str = ':'.join(fq_name)
- self.config_object_error(None, fq_name_str, 'floating_ip', 'http_post', result)
- abort(404, result)
-
- rsp_body = {}
- rsp_body['name'] = name
- rsp_body['fq_name'] = fq_name
- rsp_body['uuid'] = obj_ids['uuid']
- rsp_body['href'] = self.generate_url('floating-ip', obj_ids['uuid'])
- if 'parent_type' in obj_dict:
- # non config-root child, send back parent uuid/href
- rsp_body['parent_uuid'] = parent_uuid
- rsp_body['parent_href'] = self.generate_url(parent_type, parent_uuid)
-
- try:
- self._extension_mgrs['resourceApi'].map_method('post_floating_ip_create', obj_dict)
- except Exception as e:
- pass
-
- return {'floating-ip': rsp_body}
- #end floating_ips_http_post
-
- def floating_ips_http_get(self):
- # gather list of uuids using 1. any specified anchors
- # 2. any specified filters
- # if not 'detail' return list with any specified 'fields'
- # if 'detail' return list with props+refs + any specified 'fields'
-
- env = request.headers.environ
- tenant_name = env.get(hdr_server_tenant(), 'default-project')
- parent_uuids = None
- back_ref_uuids = None
- obj_uuids = None
- if (('parent_fq_name_str' in request.query) and
- ('parent_type' in request.query)):
- parent_fq_name = request.query.parent_fq_name_str.split(':')
- parent_type = request.query.parent_type
- parent_uuids = [self._db_conn.fq_name_to_uuid(parent_type, parent_fq_name)]
- elif 'parent_id' in request.query:
- parent_ids = request.query.parent_id.split(',')
- parent_uuids = [str(uuid.UUID(p_uuid)) for p_uuid in parent_ids]
- if 'back_ref_id' in request.query:
- back_ref_ids = request.query.back_ref_id.split(',')
- back_ref_uuids = [str(uuid.UUID(b_uuid)) for b_uuid in back_ref_ids]
- if 'obj_uuids' in request.query:
- obj_uuids = request.query.obj_uuids.split(',')
-
- # common handling for all resource get
- (ok, result) = self._get_common(request, parent_uuids)
- if not ok:
- (code, msg) = result
- self.config_object_error(None, None, 'floating_ips', 'http_get_collection', msg)
- abort(code, msg)
-
- if 'count' in request.query:
- count = 'true' in request.query.count.lower()
- else:
- count = False
-
- filter_params = request.query.filters
- if filter_params:
- try:
- ff_key_vals = filter_params.split(',')
- ff_names = [ff.split('==')[0] for ff in ff_key_vals]
- ff_values = [ff.split('==')[1] for ff in ff_key_vals]
- filters = {'field_names': ff_names, 'field_values': ff_values}
- except Exception as e:
- abort(400, 'Invalid filter ' + filter_params)
- else:
- filters = None
- db_conn = self._db_conn
- (ok, result) = \
- db_conn.dbe_list('floating-ip', parent_uuids, back_ref_uuids, obj_uuids, count, filters)
- if not ok:
- self.config_object_error(None, None, 'floating_ips', 'http_get_collection', result)
- abort(404, result)
-
- # If only counting, return early
- if count:
- return {'floating-ips': {'count': result}}
-
- if 'detail' in request.query:
- detail = 'true' in request.query.detail.lower()
- else:
- detail = False
-
- fq_names_uuids = result
- obj_dicts = []
- if not detail:
- if not self.is_admin_request():
- obj_ids_list = [{'uuid': obj_uuid} for _, obj_uuid in fq_names_uuids]
- obj_fields = [u'id_perms']
- (ok, result) = db_conn.dbe_read_multi('floating-ip', obj_ids_list, obj_fields)
- if not ok:
- result = []
- for obj_result in result:
- # give chance for zk heartbeat/ping
- gevent.sleep(0)
- if obj_result['id_perms'].get('user_visible', True):
- obj_dict = {}
- obj_dict['uuid'] = obj_result['uuid']
- obj_dict['href'] = self.generate_url('floating-ip', obj_result['uuid'])
- obj_dict['fq_name'] = obj_result['fq_name']
- obj_dicts.append(obj_dict)
- else:
- for fq_name, obj_uuid in fq_names_uuids:
- # give chance for zk heartbeat/ping
- gevent.sleep(0)
- obj_dict = {}
- obj_dict['uuid'] = obj_uuid
- obj_dict['href'] = self.generate_url('floating-ip', obj_uuid)
- obj_dict['fq_name'] = fq_name
- obj_dicts.append(obj_dict)
- else: #detail
- obj_ids_list = [{'uuid': obj_uuid} for _, obj_uuid in fq_names_uuids]
- obj_fields = [u'floating_ip_address', u'floating_ip_is_virtual_ip', u'floating_ip_fixed_ip_address', u'floating_ip_address_family', u'id_perms', u'display_name'] + [u'project_refs', 'virtual_machine_interface_refs']
- if 'fields' in request.query:
- obj_fields.extend(request.query.fields.split(','))
- (ok, result) = db_conn.dbe_read_multi('floating-ip', obj_ids_list, obj_fields)
-
- if not ok:
- result = []
-
- for obj_result in result:
- # give chance for zk heartbeat/ping
- gevent.sleep(0)
- obj_dict = {}
- obj_dict['name'] = obj_result['fq_name'][-1]
- obj_dict['href'] = self.generate_url('floating-ip', obj_result['uuid'])
- obj_dict.update(obj_result)
- if (obj_dict['id_perms'].get('user_visible', True) or
- self.is_admin_request()):
- obj_dicts.append({'floating-ip': obj_dict})
-
- return {'floating-ips': obj_dicts}
- #end floating_ips_http_get
-
- def _floating_ip_create_default_children(self, parent_obj):
- pass
- #end _floating_ip_create_default_children
-
- def _floating_ip_delete_default_children(self, parent_dict):
- pass
- #end _floating_ip_delete_default_children
-
- def floating_ip_pool_http_get(self, id):
- try:
- self._extension_mgrs['resourceApi'].map_method('pre_floating_ip_pool_read', id)
- except Exception as e:
- pass
-
- # TODO get vals from request out of the global ASAP
- etag = request.headers.get('If-None-Match')
- try:
- obj_type = self._db_conn.uuid_to_obj_type(id)
- except NoIdError:
- obj_type = None
- if obj_type != 'floating_ip_pool':
- abort(404, 'No floating-ip-pool object found for id %s' %(id))
- # common handling for all resource get
- (ok, result) = self._get_common(request, id)
- if not ok:
- (code, msg) = result
- self.config_object_error(id, None, 'floating_ip_pool', 'http_get', msg)
- abort(code, msg)
-
- # type-specific hook
- r_class = self.get_resource_class('floating-ip-pool')
- if r_class:
- r_class.http_get(id)
-
- db_conn = self._db_conn
- if etag:
- obj_ids = {'uuid': id}
- (ok, result) = db_conn.dbe_is_latest(obj_ids, etag.replace('"', ''))
- if not ok:
- # Not present in DB
- self.config_object_error(id, None, 'floating_ip_pool', 'http_get', result)
- abort(404, result)
-
- is_latest = result
- if is_latest:
- # send Not-Modified, caches use this for read optimization
- response.status = 304
- return
- #end if etag
-
- obj_ids = {'uuid': id}
-
- # Generate field list for db layer
- properties = [u'floating_ip_pool_prefixes', u'id_perms', u'display_name']
- references = []
- back_references = [u'virtual_network_back_refs', u'project_back_refs']
- children = [u'floating_ips']
- if 'fields' in request.query:
- obj_fields = request.query.fields.split(',')
- else: # default props + children + refs + backrefs
- obj_fields = properties + references
- if 'exclude_back_refs' not in request.query:
- obj_fields = obj_fields + back_references
- if 'exclude_children' not in request.query:
- obj_fields = obj_fields + children
-
- (ok, result) = db_conn.dbe_read('floating-ip-pool', obj_ids, obj_fields)
- if not ok:
- # Not present in DB
- self.config_object_error(id, None, 'floating_ip_pool', 'http_get', result)
- abort(404, result)
-
- # check visibility
- if (not result['id_perms'].get('user_visible', True) and
- not self.is_admin_request()):
- result = 'This object is not visible by users: %s' % id
- self.config_object_error(id, None, 'floating_ip_pool', 'http_get', result)
- abort(404, result)
-
- rsp_body = {}
- rsp_body['uuid'] = id
- rsp_body['href'] = self.generate_url('floating-ip-pool', id)
- rsp_body['name'] = result['fq_name'][-1]
- rsp_body.update(result)
- id_perms = result['id_perms']
- response.set_header('ETag', '"' + id_perms['last_modified'] + '"')
- try:
- self._extension_mgrs['resourceApi'].map_method('post_floating_ip_pool_read', id, rsp_body)
- except Exception as e:
- pass
-
- return {'floating-ip-pool': rsp_body}
- #end floating_ip_pool_http_get
-
- def floating_ip_pool_http_put(self, id):
- key = 'floating-ip-pool'
- obj_dict = request.json[key]
-
- try:
- self._extension_mgrs['resourceApi'].map_method('pre_floating_ip_pool_update', id, obj_dict)
- except Exception as e:
- pass
-
- db_conn = self._db_conn
- try:
- obj_type = db_conn.uuid_to_obj_type(id)
- if obj_type != 'floating_ip_pool':
- abort(404, 'No floating-ip-pool object found for id %s' %(id))
- fq_name = db_conn.uuid_to_fq_name(id)
- except NoIdError as e:
- abort(404, str(e))
- prop_dict = obj_dict.get('floating_ip_pool_prefixes')
- if prop_dict:
- buf = cStringIO.StringIO()
- xx_floating_ip_pool_prefixes = FloatingIpPoolType(**prop_dict)
- xx_floating_ip_pool_prefixes.export(buf)
- node = etree.fromstring(buf.getvalue())
- xx_floating_ip_pool_prefixes = FloatingIpPoolType()
- try:
- xx_floating_ip_pool_prefixes.build(node)
- except Exception as e:
- abort(400, str(e))
- prop_dict = obj_dict.get('id_perms')
- if prop_dict:
- buf = cStringIO.StringIO()
- xx_id_perms = IdPermsType(**prop_dict)
- xx_id_perms.export(buf)
- node = etree.fromstring(buf.getvalue())
- xx_id_perms = IdPermsType()
- try:
- xx_id_perms.build(node)
- except Exception as e:
- abort(400, str(e))
- # common handling for all resource put
- (ok, result) = self._put_common(request, 'floating_ip_pool', id, fq_name, obj_dict)
- if not ok:
- (code, msg) = result
- self.config_object_error(id, None, 'floating_ip_pool', 'http_put', msg)
- abort(code, msg)
-
- # Validate perms
- objtype_list = [u'floating_ip']
- for obj_type in objtype_list:
- refs = obj_dict.get('%s_refs'%(obj_type), None)
- if refs:
- for ref in refs:
- try:
- ref_uuid = db_conn.fq_name_to_uuid(obj_type, ref['to'])
- except NoIdError as e:
- abort(404, str(e))
- (ok, status) = self._permissions.check_perms_link(request, ref_uuid)
- if not ok:
- (code, err_msg) = status
- abort(code, err_msg)
-
- # State modification starts from here. Ensure that cleanup is done for all state changes
- cleanup_on_failure = []
- # type-specific hook
- r_class = self.get_resource_class('floating-ip-pool')
- if r_class:
- (ok, put_result) = r_class.http_put(id, fq_name, obj_dict, self._db_conn)
- if not ok:
- (code, msg) = put_result
- self.config_object_error(id, None, 'floating_ip_pool', 'http_put', msg)
- abort(code, msg)
- callable = getattr(r_class, 'http_put_fail', None)
- if callable:
- cleanup_on_failure.append((callable, [id, fq_name, obj_dict, self._db_conn]))
-
- obj_ids = {'uuid': id}
- try:
- (ok, result) = db_conn.dbe_update('floating-ip-pool', obj_ids, obj_dict)
- except Exception as e:
- ok = False
- result = str(e)
- if not ok:
- for fail_cleanup_callable, cleanup_args in cleanup_on_failure:
- fail_cleanup_callable(*cleanup_args)
- self.config_object_error(id, None, 'floating_ip_pool', 'http_put', result)
- abort(404, result)
-
- rsp_body = {}
- rsp_body['uuid'] = id
- rsp_body['href'] = self.generate_url('floating-ip-pool', id)
-
- try:
- self._extension_mgrs['resourceApi'].map_method('post_floating_ip_pool_update', id, obj_dict)
- except Exception as e:
- pass
-
- return {'floating-ip-pool': rsp_body}
- #end floating_ip_pool_http_put
-
- def floating_ip_pool_http_delete(self, id):
- db_conn = self._db_conn
- # if obj doesn't exist return early
- try:
- obj_type = db_conn.uuid_to_obj_type(id)
- if obj_type != 'floating_ip_pool':
- abort(404, 'No floating-ip-pool object found for id %s' %(id))
- _ = db_conn.uuid_to_fq_name(id)
- except NoIdError:
- abort(404, 'ID %s does not exist' %(id))
-
- try:
- self._extension_mgrs['resourceApi'].map_method('pre_floating_ip_pool_delete', id)
- except Exception as e:
- pass
-
- # read in obj from db (accepting error) to get details of it
- obj_ids = {'uuid': id}
- back_references = [u'virtual_network_back_refs', u'project_back_refs']
- children = [u'floating_ips']
- obj_fields = children + back_references
- (read_ok, read_result) = db_conn.dbe_read('floating-ip-pool', obj_ids, obj_fields)
- if not read_ok:
- if read_result.startswith('Unknown id:'):
- abort(404, 'ID %s does not exist' %(id))
- else:
- self.config_object_error(id, None, 'floating_ip_pool', 'http_delete', read_result)
- # proceed down to delete the resource
-
- # common handling for all resource delete
- parent_type = read_result.get('parent_type', None)
- (ok, del_result) = self._delete_common(request, 'floating_ip_pool', id, parent_type)
- if not ok:
- (code, msg) = del_result
- self.config_object_error(id, None, 'floating_ip_pool', 'http_delete', msg)
- abort(code, msg)
-
- fq_name = read_result['fq_name']
- ifmap_id = cfgm_common.imid.get_ifmap_id_from_fq_name('floating-ip-pool', fq_name)
- obj_ids['imid'] = ifmap_id
- if parent_type:
- parent_imid = cfgm_common.imid.get_ifmap_id_from_fq_name(parent_type, fq_name[:-1])
- obj_ids['parent_imid'] = parent_imid
-
- # State modification starts from here. Ensure that cleanup is done for all state changes
- cleanup_on_failure = []
-
- # type-specific hook
- r_class = self.get_resource_class('floating-ip-pool')
- if r_class:
- if read_ok:
- # fail if non-default children or backrefs exist
- floating_ips = read_result.get('floating_ips', None)
- if floating_ips:
- has_infos = read_result['floating_ips']
- if ((len(has_infos) > 1) or
- (len(has_infos) == 1 and has_infos[0]['to'][-1] != 'default-floating-ip')):
- has_urls = [has_info['href'] for has_info in has_infos]
- has_str = ', '.join(has_urls)
- err_msg = 'Children ' + has_str + ' still exist'
- self.config_object_error(id, None, 'floating_ip_pool', 'http_delete', err_msg)
- abort(409, err_msg)
-
- project_back_refs = read_result.get('project_back_refs', None)
- if project_back_refs:
- back_ref_urls = [back_ref_info['href'] for back_ref_info in read_result['project_back_refs']]
- back_ref_str = ', '.join(back_ref_urls)
- err_msg = 'Back-References from ' + back_ref_str + ' still exist'
- self.config_object_error(id, None, 'floating_ip_pool', 'http_delete', err_msg)
- abort(409, err_msg)
-
-
- # Delete default children first
- self._floating_ip_pool_delete_default_children(read_result)
-
- (ok, del_result) = r_class.http_delete(id, read_result, db_conn)
- if not ok:
- (code, msg) = del_result
- self.config_object_error(id, None, 'floating_ip_pool', 'http_delete', msg)
- abort(code, msg)
- callable = getattr(r_class, 'http_delete_fail', None)
- if callable:
- cleanup_on_failure.append((callable, [id, read_result, db_conn]))
- #end if read_ok
-
- try:
- (ok, del_result) = db_conn.dbe_delete('floating-ip-pool', obj_ids, read_result)
- except Exception as e:
- ok = False
- del_result = str(e)
- if not ok:
- for fail_cleanup_callable, cleanup_args in cleanup_on_failure:
- fail_cleanup_callable(*cleanup_args)
- self.config_object_error(id, None, 'floating_ip_pool', 'http_delete', del_result)
- abort(409, del_result)
-
- try:
- self._extension_mgrs['resourceApi'].map_method('post_floating_ip_pool_delete', id, read_result)
- except Exception as e:
- pass
-
- #end floating_ip_pool_http_delete
-
- def floating_ip_pools_http_post(self):
- key = 'floating-ip-pool'
- obj_dict = request.json[key]
- self._post_validate(key, obj_dict=obj_dict)
- fq_name = obj_dict['fq_name']
-
- try:
- self._extension_mgrs['resourceApi'].map_method('pre_floating_ip_pool_create', obj_dict)
- except Exception as e:
- pass
-
- prop_dict = obj_dict.get('floating_ip_pool_prefixes')
- if prop_dict:
- buf = cStringIO.StringIO()
- xx_floating_ip_pool_prefixes = FloatingIpPoolType(**prop_dict)
- xx_floating_ip_pool_prefixes.export(buf)
- node = etree.fromstring(buf.getvalue())
- xx_floating_ip_pool_prefixes = FloatingIpPoolType()
- try:
- xx_floating_ip_pool_prefixes.build(node)
- except Exception as e:
- abort(400, str(e))
- prop_dict = obj_dict.get('id_perms')
- if prop_dict:
- buf = cStringIO.StringIO()
- xx_id_perms = IdPermsType(**prop_dict)
- xx_id_perms.export(buf)
- node = etree.fromstring(buf.getvalue())
- xx_id_perms = IdPermsType()
- try:
- xx_id_perms.build(node)
- except Exception as e:
- abort(400, str(e))
- # common handling for all resource create
- (ok, result) = self._post_common(request, 'floating-ip-pool', obj_dict)
- if not ok:
- (code, msg) = result
- fq_name_str = ':'.join(obj_dict.get('fq_name', []))
- self.config_object_error(None, fq_name_str, 'floating_ip_pool', 'http_post', msg)
- abort(code, msg)
-
- name = obj_dict['fq_name'][-1]
- fq_name = obj_dict['fq_name']
-
- db_conn = self._db_conn
-
- # if client gave parent_type of config-root, ignore and remove
- if 'parent_type' in obj_dict and obj_dict['parent_type'] == 'config-root':
- del obj_dict['parent_type']
-
- if 'parent_type' in obj_dict:
- # non config-root child, verify parent exists
- parent_type = obj_dict['parent_type']
- parent_fq_name = obj_dict['fq_name'][:-1]
- try:
- parent_uuid = self._db_conn.fq_name_to_uuid(parent_type, parent_fq_name)
- (ok, status) = self._permissions.check_perms_write(request, parent_uuid)
- if not ok:
- (code, err_msg) = status
- abort(code, err_msg)
- self._permissions.set_user_role(request, obj_dict)
- except NoIdError:
- err_msg = 'Parent ' + pformat(parent_fq_name) + ' type ' + parent_type + ' does not exist'
- fq_name_str = ':'.join(parent_fq_name)
- self.config_object_error(None, fq_name_str, 'floating_ip_pool', 'http_post', err_msg)
- abort(400, err_msg)
-
- # Validate perms
- objtype_list = [u'floating_ip']
- for obj_type in objtype_list:
- refs = obj_dict.get('%s_refs'%(obj_type), None)
- if refs:
- for ref in refs:
- ref_uuid = db_conn.fq_name_to_uuid(obj_type, ref['to'])
- (ok, status) = self._permissions.check_perms_link(request, ref_uuid)
- if not ok:
- (code, err_msg) = status
- abort(code, err_msg)
-
- # State modification starts from here. Ensure that cleanup is done for all state changes
- cleanup_on_failure = []
- # Alloc and Store id-mappings before creating entry on pubsub store.
- # Else a subscriber can ask for an id mapping before we have stored it
- uuid_requested = result
- (ok, result) = db_conn.dbe_alloc('floating-ip-pool', obj_dict, uuid_requested)
- if not ok:
- (code, msg) = result
- fq_name_str = ':'.join(obj_dict['fq_name'])
- self.config_object_error(None, fq_name_str, 'floating_ip_pool', 'http_post', result)
- abort(code, msg)
- cleanup_on_failure.append((db_conn.dbe_release, ['floating_ip_pool', fq_name]))
-
- obj_ids = result
-
- env = request.headers.environ
- tenant_name = env.get(hdr_server_tenant(), 'default-project')
-
-
- # type-specific hook
- r_class = self.get_resource_class('floating-ip-pool')
- if r_class:
- try:
- (ok, result) = r_class.http_post_collection(tenant_name, obj_dict, db_conn)
- except Exception as e:
- ok = False
- result = (500, str(e))
- if not ok:
- for fail_cleanup_callable, cleanup_args in cleanup_on_failure:
- fail_cleanup_callable(*cleanup_args)
- (code, msg) = result
- fq_name_str = ':'.join(fq_name)
- self.config_object_error(None, fq_name_str, 'floating_ip_pool', 'http_post', msg)
- abort(code, msg)
- callable = getattr(r_class, 'http_post_collection_fail', None)
- if callable:
- cleanup_on_failure.append((callable, [tenant_name, obj_dict, db_conn]))
-
- try:
- (ok, result) = \
- db_conn.dbe_create('floating-ip-pool', obj_ids, obj_dict)
- except Exception as e:
- ok = False
- result = str(e)
-
- if not ok:
- for fail_cleanup_callable, cleanup_args in cleanup_on_failure:
- fail_cleanup_callable(*cleanup_args)
- fq_name_str = ':'.join(fq_name)
- self.config_object_error(None, fq_name_str, 'floating_ip_pool', 'http_post', result)
- abort(404, result)
-
- rsp_body = {}
- rsp_body['name'] = name
- rsp_body['fq_name'] = fq_name
- rsp_body['uuid'] = obj_ids['uuid']
- rsp_body['href'] = self.generate_url('floating-ip-pool', obj_ids['uuid'])
- if 'parent_type' in obj_dict:
- # non config-root child, send back parent uuid/href
- rsp_body['parent_uuid'] = parent_uuid
- rsp_body['parent_href'] = self.generate_url(parent_type, parent_uuid)
-
- try:
- self._extension_mgrs['resourceApi'].map_method('post_floating_ip_pool_create', obj_dict)
- except Exception as e:
- pass
-
- return {'floating-ip-pool': rsp_body}
- #end floating_ip_pools_http_post
-
- def floating_ip_pools_http_get(self):
- # gather list of uuids using 1. any specified anchors
- # 2. any specified filters
- # if not 'detail' return list with any specified 'fields'
- # if 'detail' return list with props+refs + any specified 'fields'
-
- env = request.headers.environ
- tenant_name = env.get(hdr_server_tenant(), 'default-project')
- parent_uuids = None
- back_ref_uuids = None
- obj_uuids = None
- if (('parent_fq_name_str' in request.query) and
- ('parent_type' in request.query)):
- parent_fq_name = request.query.parent_fq_name_str.split(':')
- parent_type = request.query.parent_type
- parent_uuids = [self._db_conn.fq_name_to_uuid(parent_type, parent_fq_name)]
- elif 'parent_id' in request.query:
- parent_ids = request.query.parent_id.split(',')
- parent_uuids = [str(uuid.UUID(p_uuid)) for p_uuid in parent_ids]
- if 'back_ref_id' in request.query:
- back_ref_ids = request.query.back_ref_id.split(',')
- back_ref_uuids = [str(uuid.UUID(b_uuid)) for b_uuid in back_ref_ids]
- if 'obj_uuids' in request.query:
- obj_uuids = request.query.obj_uuids.split(',')
-
- # common handling for all resource get
- (ok, result) = self._get_common(request, parent_uuids)
- if not ok:
- (code, msg) = result
- self.config_object_error(None, None, 'floating_ip_pools', 'http_get_collection', msg)
- abort(code, msg)
-
- if 'count' in request.query:
- count = 'true' in request.query.count.lower()
- else:
- count = False
-
- filter_params = request.query.filters
- if filter_params:
- try:
- ff_key_vals = filter_params.split(',')
- ff_names = [ff.split('==')[0] for ff in ff_key_vals]
- ff_values = [ff.split('==')[1] for ff in ff_key_vals]
- filters = {'field_names': ff_names, 'field_values': ff_values}
- except Exception as e:
- abort(400, 'Invalid filter ' + filter_params)
- else:
- filters = None
- db_conn = self._db_conn
- (ok, result) = \
- db_conn.dbe_list('floating-ip-pool', parent_uuids, back_ref_uuids, obj_uuids, count, filters)
- if not ok:
- self.config_object_error(None, None, 'floating_ip_pools', 'http_get_collection', result)
- abort(404, result)
-
- # If only counting, return early
- if count:
- return {'floating-ip-pools': {'count': result}}
-
- if 'detail' in request.query:
- detail = 'true' in request.query.detail.lower()
- else:
- detail = False
-
- fq_names_uuids = result
- obj_dicts = []
- if not detail:
- if not self.is_admin_request():
- obj_ids_list = [{'uuid': obj_uuid} for _, obj_uuid in fq_names_uuids]
- obj_fields = [u'id_perms']
- (ok, result) = db_conn.dbe_read_multi('floating-ip-pool', obj_ids_list, obj_fields)
- if not ok:
- result = []
- for obj_result in result:
- # give chance for zk heartbeat/ping
- gevent.sleep(0)
- if obj_result['id_perms'].get('user_visible', True):
- obj_dict = {}
- obj_dict['uuid'] = obj_result['uuid']
- obj_dict['href'] = self.generate_url('floating-ip-pool', obj_result['uuid'])
- obj_dict['fq_name'] = obj_result['fq_name']
- obj_dicts.append(obj_dict)
- else:
- for fq_name, obj_uuid in fq_names_uuids:
- # give chance for zk heartbeat/ping
- gevent.sleep(0)
- obj_dict = {}
- obj_dict['uuid'] = obj_uuid
- obj_dict['href'] = self.generate_url('floating-ip-pool', obj_uuid)
- obj_dict['fq_name'] = fq_name
- obj_dicts.append(obj_dict)
- else: #detail
- obj_ids_list = [{'uuid': obj_uuid} for _, obj_uuid in fq_names_uuids]
- obj_fields = [u'floating_ip_pool_prefixes', u'id_perms', u'display_name'] + []
- if 'fields' in request.query:
- obj_fields.extend(request.query.fields.split(','))
- (ok, result) = db_conn.dbe_read_multi('floating-ip-pool', obj_ids_list, obj_fields)
-
- if not ok:
- result = []
-
- for obj_result in result:
- # give chance for zk heartbeat/ping
- gevent.sleep(0)
- obj_dict = {}
- obj_dict['name'] = obj_result['fq_name'][-1]
- obj_dict['href'] = self.generate_url('floating-ip-pool', obj_result['uuid'])
- obj_dict.update(obj_result)
- if (obj_dict['id_perms'].get('user_visible', True) or
- self.is_admin_request()):
- obj_dicts.append({'floating-ip-pool': obj_dict})
-
- return {'floating-ip-pools': obj_dicts}
- #end floating_ip_pools_http_get
-
- def _floating_ip_pool_create_default_children(self, parent_obj):
- # Create a default child only if provisioned for
- r_class = self.get_resource_class('floating-ip')
- if r_class and r_class.generate_default_instance:
- child_obj = FloatingIp(parent_obj = parent_obj)
- child_dict = child_obj.__dict__
- fq_name = child_dict['fq_name']
- child_dict['id_perms'] = self._get_default_id_perms('floating-ip')
-
- db_conn = self._db_conn
- (ok, result) = db_conn.dbe_alloc('floating-ip', child_dict)
- if not ok:
- return (ok, result)
-
- obj_ids = result
- db_conn.dbe_create('floating-ip', obj_ids, child_dict)
- self._floating_ip_create_default_children(child_obj)
-
- pass
- #end _floating_ip_pool_create_default_children
-
- def _floating_ip_pool_delete_default_children(self, parent_dict):
- # Delete a default child only if provisioned for
- r_class = self.get_resource_class('floating-ip')
- if r_class and r_class.generate_default_instance:
- # first locate default child then delete it
- has_infos = parent_dict.get('floating_ips')
- if has_infos:
- for has_info in has_infos:
- if has_info['to'][-1] == 'default-floating-ip':
- default_child_id = has_info['href'].split('/')[-1]
- self.floating_ip_http_delete(default_child_id)
- break
-
- pass
- #end _floating_ip_pool_delete_default_children
-
- def physical_router_http_get(self, id):
- try:
- self._extension_mgrs['resourceApi'].map_method('pre_physical_router_read', id)
- except Exception as e:
- pass
-
- # TODO get vals from request out of the global ASAP
- etag = request.headers.get('If-None-Match')
- try:
- obj_type = self._db_conn.uuid_to_obj_type(id)
- except NoIdError:
- obj_type = None
- if obj_type != 'physical_router':
- abort(404, 'No physical-router object found for id %s' %(id))
- # common handling for all resource get
- (ok, result) = self._get_common(request, id)
- if not ok:
- (code, msg) = result
- self.config_object_error(id, None, 'physical_router', 'http_get', msg)
- abort(code, msg)
-
- # type-specific hook
- r_class = self.get_resource_class('physical-router')
- if r_class:
- r_class.http_get(id)
-
- db_conn = self._db_conn
- if etag:
- obj_ids = {'uuid': id}
- (ok, result) = db_conn.dbe_is_latest(obj_ids, etag.replace('"', ''))
- if not ok:
- # Not present in DB
- self.config_object_error(id, None, 'physical_router', 'http_get', result)
- abort(404, result)
-
- is_latest = result
- if is_latest:
- # send Not-Modified, caches use this for read optimization
- response.status = 304
- return
- #end if etag
-
- obj_ids = {'uuid': id}
-
- # Generate field list for db layer
- properties = [u'physical_router_management_ip', u'physical_router_dataplane_ip', u'physical_router_vendor_name', u'physical_router_product_name', u'physical_router_vnc_managed', u'physical_router_user_credentials', u'physical_router_snmp_credentials', u'physical_router_junos_service_ports', u'id_perms', u'display_name']
- references = ['virtual_router_refs', 'bgp_router_refs', u'virtual_network_refs']
- back_references = [u'global_system_config_back_refs']
- children = [u'physical_interfaces', u'logical_interfaces']
- if 'fields' in request.query:
- obj_fields = request.query.fields.split(',')
- else: # default props + children + refs + backrefs
- obj_fields = properties + references
- if 'exclude_back_refs' not in request.query:
- obj_fields = obj_fields + back_references
- if 'exclude_children' not in request.query:
- obj_fields = obj_fields + children
-
- (ok, result) = db_conn.dbe_read('physical-router', obj_ids, obj_fields)
- if not ok:
- # Not present in DB
- self.config_object_error(id, None, 'physical_router', 'http_get', result)
- abort(404, result)
-
- # check visibility
- if (not result['id_perms'].get('user_visible', True) and
- not self.is_admin_request()):
- result = 'This object is not visible by users: %s' % id
- self.config_object_error(id, None, 'physical_router', 'http_get', result)
- abort(404, result)
-
- rsp_body = {}
- rsp_body['uuid'] = id
- rsp_body['href'] = self.generate_url('physical-router', id)
- rsp_body['name'] = result['fq_name'][-1]
- rsp_body.update(result)
- id_perms = result['id_perms']
- response.set_header('ETag', '"' + id_perms['last_modified'] + '"')
- try:
- self._extension_mgrs['resourceApi'].map_method('post_physical_router_read', id, rsp_body)
- except Exception as e:
- pass
-
- return {'physical-router': rsp_body}
- #end physical_router_http_get
-
- def physical_router_http_put(self, id):
- key = 'physical-router'
- obj_dict = request.json[key]
-
- try:
- self._extension_mgrs['resourceApi'].map_method('pre_physical_router_update', id, obj_dict)
- except Exception as e:
- pass
-
- db_conn = self._db_conn
- try:
- obj_type = db_conn.uuid_to_obj_type(id)
- if obj_type != 'physical_router':
- abort(404, 'No physical-router object found for id %s' %(id))
- fq_name = db_conn.uuid_to_fq_name(id)
- except NoIdError as e:
- abort(404, str(e))
- prop_dict = obj_dict.get('physical_router_user_credentials')
- if prop_dict:
- buf = cStringIO.StringIO()
- xx_physical_router_user_credentials = UserCredentials(**prop_dict)
- xx_physical_router_user_credentials.export(buf)
- node = etree.fromstring(buf.getvalue())
- xx_physical_router_user_credentials = UserCredentials()
- try:
- xx_physical_router_user_credentials.build(node)
- except Exception as e:
- abort(400, str(e))
- prop_dict = obj_dict.get('physical_router_snmp_credentials')
- if prop_dict:
- buf = cStringIO.StringIO()
- xx_physical_router_snmp_credentials = SNMPCredentials(**prop_dict)
- xx_physical_router_snmp_credentials.export(buf)
- node = etree.fromstring(buf.getvalue())
- xx_physical_router_snmp_credentials = SNMPCredentials()
- try:
- xx_physical_router_snmp_credentials.build(node)
- except Exception as e:
- abort(400, str(e))
- prop_dict = obj_dict.get('physical_router_junos_service_ports')
- if prop_dict:
- buf = cStringIO.StringIO()
- xx_physical_router_junos_service_ports = JunosServicePorts(**prop_dict)
- xx_physical_router_junos_service_ports.export(buf)
- node = etree.fromstring(buf.getvalue())
- xx_physical_router_junos_service_ports = JunosServicePorts()
- try:
- xx_physical_router_junos_service_ports.build(node)
- except Exception as e:
- abort(400, str(e))
- prop_dict = obj_dict.get('id_perms')
- if prop_dict:
- buf = cStringIO.StringIO()
- xx_id_perms = IdPermsType(**prop_dict)
- xx_id_perms.export(buf)
- node = etree.fromstring(buf.getvalue())
- xx_id_perms = IdPermsType()
- try:
- xx_id_perms.build(node)
- except Exception as e:
- abort(400, str(e))
- # common handling for all resource put
- (ok, result) = self._put_common(request, 'physical_router', id, fq_name, obj_dict)
- if not ok:
- (code, msg) = result
- self.config_object_error(id, None, 'physical_router', 'http_put', msg)
- abort(code, msg)
-
- # Validate perms
- objtype_list = ['virtual_router', 'bgp_router', u'virtual_network', u'physical_interface', u'logical_interface']
- for obj_type in objtype_list:
- refs = obj_dict.get('%s_refs'%(obj_type), None)
- if refs:
- for ref in refs:
- try:
- ref_uuid = db_conn.fq_name_to_uuid(obj_type, ref['to'])
- except NoIdError as e:
- abort(404, str(e))
- (ok, status) = self._permissions.check_perms_link(request, ref_uuid)
- if not ok:
- (code, err_msg) = status
- abort(code, err_msg)
-
- # State modification starts from here. Ensure that cleanup is done for all state changes
- cleanup_on_failure = []
- # type-specific hook
- r_class = self.get_resource_class('physical-router')
- if r_class:
- (ok, put_result) = r_class.http_put(id, fq_name, obj_dict, self._db_conn)
- if not ok:
- (code, msg) = put_result
- self.config_object_error(id, None, 'physical_router', 'http_put', msg)
- abort(code, msg)
- callable = getattr(r_class, 'http_put_fail', None)
- if callable:
- cleanup_on_failure.append((callable, [id, fq_name, obj_dict, self._db_conn]))
-
- obj_ids = {'uuid': id}
- try:
- (ok, result) = db_conn.dbe_update('physical-router', obj_ids, obj_dict)
- except Exception as e:
- ok = False
- result = str(e)
- if not ok:
- for fail_cleanup_callable, cleanup_args in cleanup_on_failure:
- fail_cleanup_callable(*cleanup_args)
- self.config_object_error(id, None, 'physical_router', 'http_put', result)
- abort(404, result)
-
- rsp_body = {}
- rsp_body['uuid'] = id
- rsp_body['href'] = self.generate_url('physical-router', id)
-
- try:
- self._extension_mgrs['resourceApi'].map_method('post_physical_router_update', id, obj_dict)
- except Exception as e:
- pass
-
- return {'physical-router': rsp_body}
- #end physical_router_http_put
-
- def physical_router_http_delete(self, id):
- db_conn = self._db_conn
- # if obj doesn't exist return early
- try:
- obj_type = db_conn.uuid_to_obj_type(id)
- if obj_type != 'physical_router':
- abort(404, 'No physical-router object found for id %s' %(id))
- _ = db_conn.uuid_to_fq_name(id)
- except NoIdError:
- abort(404, 'ID %s does not exist' %(id))
-
- try:
- self._extension_mgrs['resourceApi'].map_method('pre_physical_router_delete', id)
- except Exception as e:
- pass
-
- # read in obj from db (accepting error) to get details of it
- obj_ids = {'uuid': id}
- back_references = [u'global_system_config_back_refs']
- children = [u'physical_interfaces', u'logical_interfaces']
- obj_fields = children + back_references
- (read_ok, read_result) = db_conn.dbe_read('physical-router', obj_ids, obj_fields)
- if not read_ok:
- if read_result.startswith('Unknown id:'):
- abort(404, 'ID %s does not exist' %(id))
- else:
- self.config_object_error(id, None, 'physical_router', 'http_delete', read_result)
- # proceed down to delete the resource
-
- # common handling for all resource delete
- parent_type = read_result.get('parent_type', None)
- (ok, del_result) = self._delete_common(request, 'physical_router', id, parent_type)
- if not ok:
- (code, msg) = del_result
- self.config_object_error(id, None, 'physical_router', 'http_delete', msg)
- abort(code, msg)
-
- fq_name = read_result['fq_name']
- ifmap_id = cfgm_common.imid.get_ifmap_id_from_fq_name('physical-router', fq_name)
- obj_ids['imid'] = ifmap_id
- if parent_type:
- parent_imid = cfgm_common.imid.get_ifmap_id_from_fq_name(parent_type, fq_name[:-1])
- obj_ids['parent_imid'] = parent_imid
-
- # State modification starts from here. Ensure that cleanup is done for all state changes
- cleanup_on_failure = []
-
- # type-specific hook
- r_class = self.get_resource_class('physical-router')
- if r_class:
- if read_ok:
- # fail if non-default children or backrefs exist
- physical_interfaces = read_result.get('physical_interfaces', None)
- if physical_interfaces:
- has_infos = read_result['physical_interfaces']
- if ((len(has_infos) > 1) or
- (len(has_infos) == 1 and has_infos[0]['to'][-1] != 'default-physical-interface')):
- has_urls = [has_info['href'] for has_info in has_infos]
- has_str = ', '.join(has_urls)
- err_msg = 'Children ' + has_str + ' still exist'
- self.config_object_error(id, None, 'physical_router', 'http_delete', err_msg)
- abort(409, err_msg)
-
- logical_interfaces = read_result.get('logical_interfaces', None)
- if logical_interfaces:
- has_infos = read_result['logical_interfaces']
- if ((len(has_infos) > 1) or
- (len(has_infos) == 1 and has_infos[0]['to'][-1] != 'default-logical-interface')):
- has_urls = [has_info['href'] for has_info in has_infos]
- has_str = ', '.join(has_urls)
- err_msg = 'Children ' + has_str + ' still exist'
- self.config_object_error(id, None, 'physical_router', 'http_delete', err_msg)
- abort(409, err_msg)
-
-
- # Delete default children first
- self._physical_router_delete_default_children(read_result)
-
- (ok, del_result) = r_class.http_delete(id, read_result, db_conn)
- if not ok:
- (code, msg) = del_result
- self.config_object_error(id, None, 'physical_router', 'http_delete', msg)
- abort(code, msg)
- callable = getattr(r_class, 'http_delete_fail', None)
- if callable:
- cleanup_on_failure.append((callable, [id, read_result, db_conn]))
- #end if read_ok
-
- try:
- (ok, del_result) = db_conn.dbe_delete('physical-router', obj_ids, read_result)
- except Exception as e:
- ok = False
- del_result = str(e)
- if not ok:
- for fail_cleanup_callable, cleanup_args in cleanup_on_failure:
- fail_cleanup_callable(*cleanup_args)
- self.config_object_error(id, None, 'physical_router', 'http_delete', del_result)
- abort(409, del_result)
-
- try:
- self._extension_mgrs['resourceApi'].map_method('post_physical_router_delete', id, read_result)
- except Exception as e:
- pass
-
- #end physical_router_http_delete
-
- def physical_routers_http_post(self):
- key = 'physical-router'
- obj_dict = request.json[key]
- self._post_validate(key, obj_dict=obj_dict)
- fq_name = obj_dict['fq_name']
-
- try:
- self._extension_mgrs['resourceApi'].map_method('pre_physical_router_create', obj_dict)
- except Exception as e:
- pass
-
- prop_dict = obj_dict.get('physical_router_user_credentials')
- if prop_dict:
- buf = cStringIO.StringIO()
- xx_physical_router_user_credentials = UserCredentials(**prop_dict)
- xx_physical_router_user_credentials.export(buf)
- node = etree.fromstring(buf.getvalue())
- xx_physical_router_user_credentials = UserCredentials()
- try:
- xx_physical_router_user_credentials.build(node)
- except Exception as e:
- abort(400, str(e))
- prop_dict = obj_dict.get('physical_router_snmp_credentials')
- if prop_dict:
- buf = cStringIO.StringIO()
- xx_physical_router_snmp_credentials = SNMPCredentials(**prop_dict)
- xx_physical_router_snmp_credentials.export(buf)
- node = etree.fromstring(buf.getvalue())
- xx_physical_router_snmp_credentials = SNMPCredentials()
- try:
- xx_physical_router_snmp_credentials.build(node)
- except Exception as e:
- abort(400, str(e))
- prop_dict = obj_dict.get('physical_router_junos_service_ports')
- if prop_dict:
- buf = cStringIO.StringIO()
- xx_physical_router_junos_service_ports = JunosServicePorts(**prop_dict)
- xx_physical_router_junos_service_ports.export(buf)
- node = etree.fromstring(buf.getvalue())
- xx_physical_router_junos_service_ports = JunosServicePorts()
- try:
- xx_physical_router_junos_service_ports.build(node)
- except Exception as e:
- abort(400, str(e))
- prop_dict = obj_dict.get('id_perms')
- if prop_dict:
- buf = cStringIO.StringIO()
- xx_id_perms = IdPermsType(**prop_dict)
- xx_id_perms.export(buf)
- node = etree.fromstring(buf.getvalue())
- xx_id_perms = IdPermsType()
- try:
- xx_id_perms.build(node)
- except Exception as e:
- abort(400, str(e))
- # common handling for all resource create
- (ok, result) = self._post_common(request, 'physical-router', obj_dict)
- if not ok:
- (code, msg) = result
- fq_name_str = ':'.join(obj_dict.get('fq_name', []))
- self.config_object_error(None, fq_name_str, 'physical_router', 'http_post', msg)
- abort(code, msg)
-
- name = obj_dict['fq_name'][-1]
- fq_name = obj_dict['fq_name']
-
- db_conn = self._db_conn
-
- # if client gave parent_type of config-root, ignore and remove
- if 'parent_type' in obj_dict and obj_dict['parent_type'] == 'config-root':
- del obj_dict['parent_type']
-
- if 'parent_type' in obj_dict:
- # non config-root child, verify parent exists
- parent_type = obj_dict['parent_type']
- parent_fq_name = obj_dict['fq_name'][:-1]
- try:
- parent_uuid = self._db_conn.fq_name_to_uuid(parent_type, parent_fq_name)
- (ok, status) = self._permissions.check_perms_write(request, parent_uuid)
- if not ok:
- (code, err_msg) = status
- abort(code, err_msg)
- self._permissions.set_user_role(request, obj_dict)
- except NoIdError:
- err_msg = 'Parent ' + pformat(parent_fq_name) + ' type ' + parent_type + ' does not exist'
- fq_name_str = ':'.join(parent_fq_name)
- self.config_object_error(None, fq_name_str, 'physical_router', 'http_post', err_msg)
- abort(400, err_msg)
-
- # Validate perms
- objtype_list = ['virtual_router', 'bgp_router', u'virtual_network', u'physical_interface', u'logical_interface']
- for obj_type in objtype_list:
- refs = obj_dict.get('%s_refs'%(obj_type), None)
- if refs:
- for ref in refs:
- ref_uuid = db_conn.fq_name_to_uuid(obj_type, ref['to'])
- (ok, status) = self._permissions.check_perms_link(request, ref_uuid)
- if not ok:
- (code, err_msg) = status
- abort(code, err_msg)
-
- # State modification starts from here. Ensure that cleanup is done for all state changes
- cleanup_on_failure = []
- # Alloc and Store id-mappings before creating entry on pubsub store.
- # Else a subscriber can ask for an id mapping before we have stored it
- uuid_requested = result
- (ok, result) = db_conn.dbe_alloc('physical-router', obj_dict, uuid_requested)
- if not ok:
- (code, msg) = result
- fq_name_str = ':'.join(obj_dict['fq_name'])
- self.config_object_error(None, fq_name_str, 'physical_router', 'http_post', result)
- abort(code, msg)
- cleanup_on_failure.append((db_conn.dbe_release, ['physical_router', fq_name]))
-
- obj_ids = result
-
- env = request.headers.environ
- tenant_name = env.get(hdr_server_tenant(), 'default-project')
-
-
- # type-specific hook
- r_class = self.get_resource_class('physical-router')
- if r_class:
- try:
- (ok, result) = r_class.http_post_collection(tenant_name, obj_dict, db_conn)
- except Exception as e:
- ok = False
- result = (500, str(e))
- if not ok:
- for fail_cleanup_callable, cleanup_args in cleanup_on_failure:
- fail_cleanup_callable(*cleanup_args)
- (code, msg) = result
- fq_name_str = ':'.join(fq_name)
- self.config_object_error(None, fq_name_str, 'physical_router', 'http_post', msg)
- abort(code, msg)
- callable = getattr(r_class, 'http_post_collection_fail', None)
- if callable:
- cleanup_on_failure.append((callable, [tenant_name, obj_dict, db_conn]))
-
- try:
- (ok, result) = \
- db_conn.dbe_create('physical-router', obj_ids, obj_dict)
- except Exception as e:
- ok = False
- result = str(e)
-
- if not ok:
- for fail_cleanup_callable, cleanup_args in cleanup_on_failure:
- fail_cleanup_callable(*cleanup_args)
- fq_name_str = ':'.join(fq_name)
- self.config_object_error(None, fq_name_str, 'physical_router', 'http_post', result)
- abort(404, result)
-
- rsp_body = {}
- rsp_body['name'] = name
- rsp_body['fq_name'] = fq_name
- rsp_body['uuid'] = obj_ids['uuid']
- rsp_body['href'] = self.generate_url('physical-router', obj_ids['uuid'])
- if 'parent_type' in obj_dict:
- # non config-root child, send back parent uuid/href
- rsp_body['parent_uuid'] = parent_uuid
- rsp_body['parent_href'] = self.generate_url(parent_type, parent_uuid)
-
- try:
- self._extension_mgrs['resourceApi'].map_method('post_physical_router_create', obj_dict)
- except Exception as e:
- pass
-
- return {'physical-router': rsp_body}
- #end physical_routers_http_post
-
- def physical_routers_http_get(self):
- # gather list of uuids using 1. any specified anchors
- # 2. any specified filters
- # if not 'detail' return list with any specified 'fields'
- # if 'detail' return list with props+refs + any specified 'fields'
-
- env = request.headers.environ
- tenant_name = env.get(hdr_server_tenant(), 'default-project')
- parent_uuids = None
- back_ref_uuids = None
- obj_uuids = None
- if (('parent_fq_name_str' in request.query) and
- ('parent_type' in request.query)):
- parent_fq_name = request.query.parent_fq_name_str.split(':')
- parent_type = request.query.parent_type
- parent_uuids = [self._db_conn.fq_name_to_uuid(parent_type, parent_fq_name)]
- elif 'parent_id' in request.query:
- parent_ids = request.query.parent_id.split(',')
- parent_uuids = [str(uuid.UUID(p_uuid)) for p_uuid in parent_ids]
- if 'back_ref_id' in request.query:
- back_ref_ids = request.query.back_ref_id.split(',')
- back_ref_uuids = [str(uuid.UUID(b_uuid)) for b_uuid in back_ref_ids]
- if 'obj_uuids' in request.query:
- obj_uuids = request.query.obj_uuids.split(',')
-
- # common handling for all resource get
- (ok, result) = self._get_common(request, parent_uuids)
- if not ok:
- (code, msg) = result
- self.config_object_error(None, None, 'physical_routers', 'http_get_collection', msg)
- abort(code, msg)
-
- if 'count' in request.query:
- count = 'true' in request.query.count.lower()
- else:
- count = False
-
- filter_params = request.query.filters
- if filter_params:
- try:
- ff_key_vals = filter_params.split(',')
- ff_names = [ff.split('==')[0] for ff in ff_key_vals]
- ff_values = [ff.split('==')[1] for ff in ff_key_vals]
- filters = {'field_names': ff_names, 'field_values': ff_values}
- except Exception as e:
- abort(400, 'Invalid filter ' + filter_params)
- else:
- filters = None
- db_conn = self._db_conn
- (ok, result) = \
- db_conn.dbe_list('physical-router', parent_uuids, back_ref_uuids, obj_uuids, count, filters)
- if not ok:
- self.config_object_error(None, None, 'physical_routers', 'http_get_collection', result)
- abort(404, result)
-
- # If only counting, return early
- if count:
- return {'physical-routers': {'count': result}}
-
- if 'detail' in request.query:
- detail = 'true' in request.query.detail.lower()
- else:
- detail = False
-
- fq_names_uuids = result
- obj_dicts = []
- if not detail:
- if not self.is_admin_request():
- obj_ids_list = [{'uuid': obj_uuid} for _, obj_uuid in fq_names_uuids]
- obj_fields = [u'id_perms']
- (ok, result) = db_conn.dbe_read_multi('physical-router', obj_ids_list, obj_fields)
- if not ok:
- result = []
- for obj_result in result:
- # give chance for zk heartbeat/ping
- gevent.sleep(0)
- if obj_result['id_perms'].get('user_visible', True):
- obj_dict = {}
- obj_dict['uuid'] = obj_result['uuid']
- obj_dict['href'] = self.generate_url('physical-router', obj_result['uuid'])
- obj_dict['fq_name'] = obj_result['fq_name']
- obj_dicts.append(obj_dict)
- else:
- for fq_name, obj_uuid in fq_names_uuids:
- # give chance for zk heartbeat/ping
- gevent.sleep(0)
- obj_dict = {}
- obj_dict['uuid'] = obj_uuid
- obj_dict['href'] = self.generate_url('physical-router', obj_uuid)
- obj_dict['fq_name'] = fq_name
- obj_dicts.append(obj_dict)
- else: #detail
- obj_ids_list = [{'uuid': obj_uuid} for _, obj_uuid in fq_names_uuids]
- obj_fields = [u'physical_router_management_ip', u'physical_router_dataplane_ip', u'physical_router_vendor_name', u'physical_router_product_name', u'physical_router_vnc_managed', u'physical_router_user_credentials', u'physical_router_snmp_credentials', u'physical_router_junos_service_ports', u'id_perms', u'display_name'] + ['virtual_router_refs', 'bgp_router_refs', u'virtual_network_refs']
- if 'fields' in request.query:
- obj_fields.extend(request.query.fields.split(','))
- (ok, result) = db_conn.dbe_read_multi('physical-router', obj_ids_list, obj_fields)
-
- if not ok:
- result = []
-
- for obj_result in result:
- # give chance for zk heartbeat/ping
- gevent.sleep(0)
- obj_dict = {}
- obj_dict['name'] = obj_result['fq_name'][-1]
- obj_dict['href'] = self.generate_url('physical-router', obj_result['uuid'])
- obj_dict.update(obj_result)
- if (obj_dict['id_perms'].get('user_visible', True) or
- self.is_admin_request()):
- obj_dicts.append({'physical-router': obj_dict})
-
- return {'physical-routers': obj_dicts}
- #end physical_routers_http_get
-
- def _physical_router_create_default_children(self, parent_obj):
- # Create a default child only if provisioned for
- r_class = self.get_resource_class('physical-interface')
- if r_class and r_class.generate_default_instance:
- child_obj = PhysicalInterface(parent_obj = parent_obj)
- child_dict = child_obj.__dict__
- fq_name = child_dict['fq_name']
- child_dict['id_perms'] = self._get_default_id_perms('physical-interface')
-
- db_conn = self._db_conn
- (ok, result) = db_conn.dbe_alloc('physical-interface', child_dict)
- if not ok:
- return (ok, result)
-
- obj_ids = result
- db_conn.dbe_create('physical-interface', obj_ids, child_dict)
- self._physical_interface_create_default_children(child_obj)
-
- # Create a default child only if provisioned for
- r_class = self.get_resource_class('logical-interface')
- if r_class and r_class.generate_default_instance:
- child_obj = LogicalInterface(parent_obj = parent_obj)
- child_dict = child_obj.__dict__
- fq_name = child_dict['fq_name']
- child_dict['id_perms'] = self._get_default_id_perms('logical-interface')
-
- db_conn = self._db_conn
- (ok, result) = db_conn.dbe_alloc('logical-interface', child_dict)
- if not ok:
- return (ok, result)
-
- obj_ids = result
- db_conn.dbe_create('logical-interface', obj_ids, child_dict)
- self._logical_interface_create_default_children(child_obj)
-
- pass
- #end _physical_router_create_default_children
-
- def _physical_router_delete_default_children(self, parent_dict):
- # Delete a default child only if provisioned for
- r_class = self.get_resource_class('logical-interface')
- if r_class and r_class.generate_default_instance:
- # first locate default child then delete it
- has_infos = parent_dict.get('physical_interfaces')
- if has_infos:
- for has_info in has_infos:
- if has_info['to'][-1] == 'default-physical-interface':
- default_child_id = has_info['href'].split('/')[-1]
- self.physical_interface_http_delete(default_child_id)
- break
-
- # Delete a default child only if provisioned for
- r_class = self.get_resource_class('logical-interface')
- if r_class and r_class.generate_default_instance:
- # first locate default child then delete it
- has_infos = parent_dict.get('logical_interfaces')
- if has_infos:
- for has_info in has_infos:
- if has_info['to'][-1] == 'default-logical-interface':
- default_child_id = has_info['href'].split('/')[-1]
- self.logical_interface_http_delete(default_child_id)
- break
-
- pass
- #end _physical_router_delete_default_children
-
- def bgp_router_http_get(self, id):
- try:
- self._extension_mgrs['resourceApi'].map_method('pre_bgp_router_read', id)
- except Exception as e:
- pass
-
- # TODO get vals from request out of the global ASAP
- etag = request.headers.get('If-None-Match')
- try:
- obj_type = self._db_conn.uuid_to_obj_type(id)
- except NoIdError:
- obj_type = None
- if obj_type != 'bgp_router':
- abort(404, 'No bgp-router object found for id %s' %(id))
- # common handling for all resource get
- (ok, result) = self._get_common(request, id)
- if not ok:
- (code, msg) = result
- self.config_object_error(id, None, 'bgp_router', 'http_get', msg)
- abort(code, msg)
-
- # type-specific hook
- r_class = self.get_resource_class('bgp-router')
- if r_class:
- r_class.http_get(id)
-
- db_conn = self._db_conn
- if etag:
- obj_ids = {'uuid': id}
- (ok, result) = db_conn.dbe_is_latest(obj_ids, etag.replace('"', ''))
- if not ok:
- # Not present in DB
- self.config_object_error(id, None, 'bgp_router', 'http_get', result)
- abort(404, result)
-
- is_latest = result
- if is_latest:
- # send Not-Modified, caches use this for read optimization
- response.status = 304
- return
- #end if etag
-
- obj_ids = {'uuid': id}
-
- # Generate field list for db layer
- properties = [u'bgp_router_parameters', u'id_perms', u'display_name']
- references = ['bgp_router_refs']
- back_references = [u'global_system_config_back_refs', u'physical_router_back_refs', 'virtual_router_back_refs', 'routing_instance_back_refs', 'bgp_router_back_refs']
- children = []
- if 'fields' in request.query:
- obj_fields = request.query.fields.split(',')
- else: # default props + children + refs + backrefs
- obj_fields = properties + references
- if 'exclude_back_refs' not in request.query:
- obj_fields = obj_fields + back_references
- if 'exclude_children' not in request.query:
- obj_fields = obj_fields + children
-
- (ok, result) = db_conn.dbe_read('bgp-router', obj_ids, obj_fields)
- if not ok:
- # Not present in DB
- self.config_object_error(id, None, 'bgp_router', 'http_get', result)
- abort(404, result)
-
- # check visibility
- if (not result['id_perms'].get('user_visible', True) and
- not self.is_admin_request()):
- result = 'This object is not visible by users: %s' % id
- self.config_object_error(id, None, 'bgp_router', 'http_get', result)
- abort(404, result)
-
- rsp_body = {}
- rsp_body['uuid'] = id
- rsp_body['href'] = self.generate_url('bgp-router', id)
- rsp_body['name'] = result['fq_name'][-1]
- rsp_body.update(result)
- id_perms = result['id_perms']
- response.set_header('ETag', '"' + id_perms['last_modified'] + '"')
- try:
- self._extension_mgrs['resourceApi'].map_method('post_bgp_router_read', id, rsp_body)
- except Exception as e:
- pass
-
- return {'bgp-router': rsp_body}
- #end bgp_router_http_get
-
- def bgp_router_http_put(self, id):
- key = 'bgp-router'
- obj_dict = request.json[key]
-
- try:
- self._extension_mgrs['resourceApi'].map_method('pre_bgp_router_update', id, obj_dict)
- except Exception as e:
- pass
-
- db_conn = self._db_conn
- try:
- obj_type = db_conn.uuid_to_obj_type(id)
- if obj_type != 'bgp_router':
- abort(404, 'No bgp-router object found for id %s' %(id))
- fq_name = db_conn.uuid_to_fq_name(id)
- except NoIdError as e:
- abort(404, str(e))
- prop_dict = obj_dict.get('bgp_router_parameters')
- if prop_dict:
- buf = cStringIO.StringIO()
- xx_bgp_router_parameters = BgpRouterParams(**prop_dict)
- xx_bgp_router_parameters.export(buf)
- node = etree.fromstring(buf.getvalue())
- xx_bgp_router_parameters = BgpRouterParams()
- try:
- xx_bgp_router_parameters.build(node)
- except Exception as e:
- abort(400, str(e))
- prop_dict = obj_dict.get('id_perms')
- if prop_dict:
- buf = cStringIO.StringIO()
- xx_id_perms = IdPermsType(**prop_dict)
- xx_id_perms.export(buf)
- node = etree.fromstring(buf.getvalue())
- xx_id_perms = IdPermsType()
- try:
- xx_id_perms.build(node)
- except Exception as e:
- abort(400, str(e))
- for ref_dict in obj_dict.get('bgp_router_refs') or []:
- if fq_name == ref_dict['to']:
- abort(404, 'Cannot add reference to self')
- buf = cStringIO.StringIO()
- xx_bgp_router = BgpPeeringAttributes(**ref_dict['attr'])
- xx_bgp_router.export(buf)
- node = etree.fromstring(buf.getvalue())
- try:
- xx_bgp_router.build(node)
- except Exception as e:
- abort(400, str(e))
- # common handling for all resource put
- (ok, result) = self._put_common(request, 'bgp_router', id, fq_name, obj_dict)
- if not ok:
- (code, msg) = result
- self.config_object_error(id, None, 'bgp_router', 'http_put', msg)
- abort(code, msg)
-
- # Validate perms
- objtype_list = ['bgp_router']
- for obj_type in objtype_list:
- refs = obj_dict.get('%s_refs'%(obj_type), None)
- if refs:
- for ref in refs:
- try:
- ref_uuid = db_conn.fq_name_to_uuid(obj_type, ref['to'])
- except NoIdError as e:
- abort(404, str(e))
- (ok, status) = self._permissions.check_perms_link(request, ref_uuid)
- if not ok:
- (code, err_msg) = status
- abort(code, err_msg)
-
- # State modification starts from here. Ensure that cleanup is done for all state changes
- cleanup_on_failure = []
- # type-specific hook
- r_class = self.get_resource_class('bgp-router')
- if r_class:
- (ok, put_result) = r_class.http_put(id, fq_name, obj_dict, self._db_conn)
- if not ok:
- (code, msg) = put_result
- self.config_object_error(id, None, 'bgp_router', 'http_put', msg)
- abort(code, msg)
- callable = getattr(r_class, 'http_put_fail', None)
- if callable:
- cleanup_on_failure.append((callable, [id, fq_name, obj_dict, self._db_conn]))
-
- obj_ids = {'uuid': id}
- try:
- (ok, result) = db_conn.dbe_update('bgp-router', obj_ids, obj_dict)
- except Exception as e:
- ok = False
- result = str(e)
- if not ok:
- for fail_cleanup_callable, cleanup_args in cleanup_on_failure:
- fail_cleanup_callable(*cleanup_args)
- self.config_object_error(id, None, 'bgp_router', 'http_put', result)
- abort(404, result)
-
- rsp_body = {}
- rsp_body['uuid'] = id
- rsp_body['href'] = self.generate_url('bgp-router', id)
-
- try:
- self._extension_mgrs['resourceApi'].map_method('post_bgp_router_update', id, obj_dict)
- except Exception as e:
- pass
-
- return {'bgp-router': rsp_body}
- #end bgp_router_http_put
-
- def bgp_router_http_delete(self, id):
- db_conn = self._db_conn
- # if obj doesn't exist return early
- try:
- obj_type = db_conn.uuid_to_obj_type(id)
- if obj_type != 'bgp_router':
- abort(404, 'No bgp-router object found for id %s' %(id))
- _ = db_conn.uuid_to_fq_name(id)
- except NoIdError:
- abort(404, 'ID %s does not exist' %(id))
-
- try:
- self._extension_mgrs['resourceApi'].map_method('pre_bgp_router_delete', id)
- except Exception as e:
- pass
-
- # read in obj from db (accepting error) to get details of it
- obj_ids = {'uuid': id}
- back_references = [u'global_system_config_back_refs', u'physical_router_back_refs', 'virtual_router_back_refs', 'routing_instance_back_refs', 'bgp_router_back_refs']
- children = []
- obj_fields = children + back_references
- (read_ok, read_result) = db_conn.dbe_read('bgp-router', obj_ids, obj_fields)
- if not read_ok:
- if read_result.startswith('Unknown id:'):
- abort(404, 'ID %s does not exist' %(id))
- else:
- self.config_object_error(id, None, 'bgp_router', 'http_delete', read_result)
- # proceed down to delete the resource
-
- # common handling for all resource delete
- parent_type = read_result.get('parent_type', None)
- (ok, del_result) = self._delete_common(request, 'bgp_router', id, parent_type)
- if not ok:
- (code, msg) = del_result
- self.config_object_error(id, None, 'bgp_router', 'http_delete', msg)
- abort(code, msg)
-
- fq_name = read_result['fq_name']
- ifmap_id = cfgm_common.imid.get_ifmap_id_from_fq_name('bgp-router', fq_name)
- obj_ids['imid'] = ifmap_id
- if parent_type:
- parent_imid = cfgm_common.imid.get_ifmap_id_from_fq_name(parent_type, fq_name[:-1])
- obj_ids['parent_imid'] = parent_imid
-
- # State modification starts from here. Ensure that cleanup is done for all state changes
- cleanup_on_failure = []
-
- # type-specific hook
- r_class = self.get_resource_class('bgp-router')
- if r_class:
- if read_ok:
- # fail if non-default children or backrefs exist
- global_system_config_back_refs = read_result.get('global_system_config_back_refs', None)
- if global_system_config_back_refs:
- back_ref_urls = [back_ref_info['href'] for back_ref_info in read_result['global_system_config_back_refs']]
- back_ref_str = ', '.join(back_ref_urls)
- err_msg = 'Back-References from ' + back_ref_str + ' still exist'
- self.config_object_error(id, None, 'bgp_router', 'http_delete', err_msg)
- abort(409, err_msg)
-
- physical_router_back_refs = read_result.get('physical_router_back_refs', None)
- if physical_router_back_refs:
- back_ref_urls = [back_ref_info['href'] for back_ref_info in read_result['physical_router_back_refs']]
- back_ref_str = ', '.join(back_ref_urls)
- err_msg = 'Back-References from ' + back_ref_str + ' still exist'
- self.config_object_error(id, None, 'bgp_router', 'http_delete', err_msg)
- abort(409, err_msg)
-
- virtual_router_back_refs = read_result.get('virtual_router_back_refs', None)
- if virtual_router_back_refs:
- back_ref_urls = [back_ref_info['href'] for back_ref_info in read_result['virtual_router_back_refs']]
- back_ref_str = ', '.join(back_ref_urls)
- err_msg = 'Back-References from ' + back_ref_str + ' still exist'
- self.config_object_error(id, None, 'bgp_router', 'http_delete', err_msg)
- abort(409, err_msg)
-
- bgp_router_back_refs = read_result.get('bgp_router_back_refs', None)
- if bgp_router_back_refs:
- back_ref_urls = [back_ref_info['href'] for back_ref_info in read_result['bgp_router_back_refs']]
- back_ref_str = ', '.join(back_ref_urls)
- err_msg = 'Back-References from ' + back_ref_str + ' still exist'
- self.config_object_error(id, None, 'bgp_router', 'http_delete', err_msg)
- abort(409, err_msg)
-
-
- # Delete default children first
- self._bgp_router_delete_default_children(read_result)
-
- (ok, del_result) = r_class.http_delete(id, read_result, db_conn)
- if not ok:
- (code, msg) = del_result
- self.config_object_error(id, None, 'bgp_router', 'http_delete', msg)
- abort(code, msg)
- callable = getattr(r_class, 'http_delete_fail', None)
- if callable:
- cleanup_on_failure.append((callable, [id, read_result, db_conn]))
- #end if read_ok
-
- try:
- (ok, del_result) = db_conn.dbe_delete('bgp-router', obj_ids, read_result)
- except Exception as e:
- ok = False
- del_result = str(e)
- if not ok:
- for fail_cleanup_callable, cleanup_args in cleanup_on_failure:
- fail_cleanup_callable(*cleanup_args)
- self.config_object_error(id, None, 'bgp_router', 'http_delete', del_result)
- abort(409, del_result)
-
- try:
- self._extension_mgrs['resourceApi'].map_method('post_bgp_router_delete', id, read_result)
- except Exception as e:
- pass
-
- #end bgp_router_http_delete
-
- def bgp_routers_http_post(self):
- key = 'bgp-router'
- obj_dict = request.json[key]
- self._post_validate(key, obj_dict=obj_dict)
- fq_name = obj_dict['fq_name']
-
- try:
- self._extension_mgrs['resourceApi'].map_method('pre_bgp_router_create', obj_dict)
- except Exception as e:
- pass
-
- prop_dict = obj_dict.get('bgp_router_parameters')
- if prop_dict:
- buf = cStringIO.StringIO()
- xx_bgp_router_parameters = BgpRouterParams(**prop_dict)
- xx_bgp_router_parameters.export(buf)
- node = etree.fromstring(buf.getvalue())
- xx_bgp_router_parameters = BgpRouterParams()
- try:
- xx_bgp_router_parameters.build(node)
- except Exception as e:
- abort(400, str(e))
- prop_dict = obj_dict.get('id_perms')
- if prop_dict:
- buf = cStringIO.StringIO()
- xx_id_perms = IdPermsType(**prop_dict)
- xx_id_perms.export(buf)
- node = etree.fromstring(buf.getvalue())
- xx_id_perms = IdPermsType()
- try:
- xx_id_perms.build(node)
- except Exception as e:
- abort(400, str(e))
- for ref_dict in obj_dict.get('bgp_router_refs') or []:
- if fq_name == ref_dict['to']:
- abort(404, 'Cannot add reference to self')
- buf = cStringIO.StringIO()
- xx_bgp_router = BgpPeeringAttributes(**ref_dict['attr'])
- xx_bgp_router.export(buf)
- node = etree.fromstring(buf.getvalue())
- try:
- xx_bgp_router.build(node)
- except Exception as e:
- abort(400, str(e))
- # common handling for all resource create
- (ok, result) = self._post_common(request, 'bgp-router', obj_dict)
- if not ok:
- (code, msg) = result
- fq_name_str = ':'.join(obj_dict.get('fq_name', []))
- self.config_object_error(None, fq_name_str, 'bgp_router', 'http_post', msg)
- abort(code, msg)
-
- name = obj_dict['fq_name'][-1]
- fq_name = obj_dict['fq_name']
-
- db_conn = self._db_conn
-
- # if client gave parent_type of config-root, ignore and remove
- if 'parent_type' in obj_dict and obj_dict['parent_type'] == 'config-root':
- del obj_dict['parent_type']
-
- if 'parent_type' in obj_dict:
- # non config-root child, verify parent exists
- parent_type = obj_dict['parent_type']
- parent_fq_name = obj_dict['fq_name'][:-1]
- try:
- parent_uuid = self._db_conn.fq_name_to_uuid(parent_type, parent_fq_name)
- (ok, status) = self._permissions.check_perms_write(request, parent_uuid)
- if not ok:
- (code, err_msg) = status
- abort(code, err_msg)
- self._permissions.set_user_role(request, obj_dict)
- except NoIdError:
- err_msg = 'Parent ' + pformat(parent_fq_name) + ' type ' + parent_type + ' does not exist'
- fq_name_str = ':'.join(parent_fq_name)
- self.config_object_error(None, fq_name_str, 'bgp_router', 'http_post', err_msg)
- abort(400, err_msg)
-
- # Validate perms
- objtype_list = ['bgp_router']
- for obj_type in objtype_list:
- refs = obj_dict.get('%s_refs'%(obj_type), None)
- if refs:
- for ref in refs:
- ref_uuid = db_conn.fq_name_to_uuid(obj_type, ref['to'])
- (ok, status) = self._permissions.check_perms_link(request, ref_uuid)
- if not ok:
- (code, err_msg) = status
- abort(code, err_msg)
-
- # State modification starts from here. Ensure that cleanup is done for all state changes
- cleanup_on_failure = []
- # Alloc and Store id-mappings before creating entry on pubsub store.
- # Else a subscriber can ask for an id mapping before we have stored it
- uuid_requested = result
- (ok, result) = db_conn.dbe_alloc('bgp-router', obj_dict, uuid_requested)
- if not ok:
- (code, msg) = result
- fq_name_str = ':'.join(obj_dict['fq_name'])
- self.config_object_error(None, fq_name_str, 'bgp_router', 'http_post', result)
- abort(code, msg)
- cleanup_on_failure.append((db_conn.dbe_release, ['bgp_router', fq_name]))
-
- obj_ids = result
-
- env = request.headers.environ
- tenant_name = env.get(hdr_server_tenant(), 'default-project')
-
-
- # type-specific hook
- r_class = self.get_resource_class('bgp-router')
- if r_class:
- try:
- (ok, result) = r_class.http_post_collection(tenant_name, obj_dict, db_conn)
- except Exception as e:
- ok = False
- result = (500, str(e))
- if not ok:
- for fail_cleanup_callable, cleanup_args in cleanup_on_failure:
- fail_cleanup_callable(*cleanup_args)
- (code, msg) = result
- fq_name_str = ':'.join(fq_name)
- self.config_object_error(None, fq_name_str, 'bgp_router', 'http_post', msg)
- abort(code, msg)
- callable = getattr(r_class, 'http_post_collection_fail', None)
- if callable:
- cleanup_on_failure.append((callable, [tenant_name, obj_dict, db_conn]))
-
- try:
- (ok, result) = \
- db_conn.dbe_create('bgp-router', obj_ids, obj_dict)
- except Exception as e:
- ok = False
- result = str(e)
-
- if not ok:
- for fail_cleanup_callable, cleanup_args in cleanup_on_failure:
- fail_cleanup_callable(*cleanup_args)
- fq_name_str = ':'.join(fq_name)
- self.config_object_error(None, fq_name_str, 'bgp_router', 'http_post', result)
- abort(404, result)
-
- rsp_body = {}
- rsp_body['name'] = name
- rsp_body['fq_name'] = fq_name
- rsp_body['uuid'] = obj_ids['uuid']
- rsp_body['href'] = self.generate_url('bgp-router', obj_ids['uuid'])
- if 'parent_type' in obj_dict:
- # non config-root child, send back parent uuid/href
- rsp_body['parent_uuid'] = parent_uuid
- rsp_body['parent_href'] = self.generate_url(parent_type, parent_uuid)
-
- try:
- self._extension_mgrs['resourceApi'].map_method('post_bgp_router_create', obj_dict)
- except Exception as e:
- pass
-
- return {'bgp-router': rsp_body}
- #end bgp_routers_http_post
-
- def bgp_routers_http_get(self):
- # gather list of uuids using 1. any specified anchors
- # 2. any specified filters
- # if not 'detail' return list with any specified 'fields'
- # if 'detail' return list with props+refs + any specified 'fields'
-
- env = request.headers.environ
- tenant_name = env.get(hdr_server_tenant(), 'default-project')
- parent_uuids = None
- back_ref_uuids = None
- obj_uuids = None
- if (('parent_fq_name_str' in request.query) and
- ('parent_type' in request.query)):
- parent_fq_name = request.query.parent_fq_name_str.split(':')
- parent_type = request.query.parent_type
- parent_uuids = [self._db_conn.fq_name_to_uuid(parent_type, parent_fq_name)]
- elif 'parent_id' in request.query:
- parent_ids = request.query.parent_id.split(',')
- parent_uuids = [str(uuid.UUID(p_uuid)) for p_uuid in parent_ids]
- if 'back_ref_id' in request.query:
- back_ref_ids = request.query.back_ref_id.split(',')
- back_ref_uuids = [str(uuid.UUID(b_uuid)) for b_uuid in back_ref_ids]
- if 'obj_uuids' in request.query:
- obj_uuids = request.query.obj_uuids.split(',')
-
- # common handling for all resource get
- (ok, result) = self._get_common(request, parent_uuids)
- if not ok:
- (code, msg) = result
- self.config_object_error(None, None, 'bgp_routers', 'http_get_collection', msg)
- abort(code, msg)
-
- if 'count' in request.query:
- count = 'true' in request.query.count.lower()
- else:
- count = False
-
- filter_params = request.query.filters
- if filter_params:
- try:
- ff_key_vals = filter_params.split(',')
- ff_names = [ff.split('==')[0] for ff in ff_key_vals]
- ff_values = [ff.split('==')[1] for ff in ff_key_vals]
- filters = {'field_names': ff_names, 'field_values': ff_values}
- except Exception as e:
- abort(400, 'Invalid filter ' + filter_params)
- else:
- filters = None
- db_conn = self._db_conn
- (ok, result) = \
- db_conn.dbe_list('bgp-router', parent_uuids, back_ref_uuids, obj_uuids, count, filters)
- if not ok:
- self.config_object_error(None, None, 'bgp_routers', 'http_get_collection', result)
- abort(404, result)
-
- # If only counting, return early
- if count:
- return {'bgp-routers': {'count': result}}
-
- if 'detail' in request.query:
- detail = 'true' in request.query.detail.lower()
- else:
- detail = False
-
- fq_names_uuids = result
- obj_dicts = []
- if not detail:
- if not self.is_admin_request():
- obj_ids_list = [{'uuid': obj_uuid} for _, obj_uuid in fq_names_uuids]
- obj_fields = [u'id_perms']
- (ok, result) = db_conn.dbe_read_multi('bgp-router', obj_ids_list, obj_fields)
- if not ok:
- result = []
- for obj_result in result:
- # give chance for zk heartbeat/ping
- gevent.sleep(0)
- if obj_result['id_perms'].get('user_visible', True):
- obj_dict = {}
- obj_dict['uuid'] = obj_result['uuid']
- obj_dict['href'] = self.generate_url('bgp-router', obj_result['uuid'])
- obj_dict['fq_name'] = obj_result['fq_name']
- obj_dicts.append(obj_dict)
- else:
- for fq_name, obj_uuid in fq_names_uuids:
- # give chance for zk heartbeat/ping
- gevent.sleep(0)
- obj_dict = {}
- obj_dict['uuid'] = obj_uuid
- obj_dict['href'] = self.generate_url('bgp-router', obj_uuid)
- obj_dict['fq_name'] = fq_name
- obj_dicts.append(obj_dict)
- else: #detail
- obj_ids_list = [{'uuid': obj_uuid} for _, obj_uuid in fq_names_uuids]
- obj_fields = [u'bgp_router_parameters', u'id_perms', u'display_name'] + ['bgp_router_refs']
- if 'fields' in request.query:
- obj_fields.extend(request.query.fields.split(','))
- (ok, result) = db_conn.dbe_read_multi('bgp-router', obj_ids_list, obj_fields)
-
- if not ok:
- result = []
-
- for obj_result in result:
- # give chance for zk heartbeat/ping
- gevent.sleep(0)
- obj_dict = {}
- obj_dict['name'] = obj_result['fq_name'][-1]
- obj_dict['href'] = self.generate_url('bgp-router', obj_result['uuid'])
- obj_dict.update(obj_result)
- if (obj_dict['id_perms'].get('user_visible', True) or
- self.is_admin_request()):
- obj_dicts.append({'bgp-router': obj_dict})
-
- return {'bgp-routers': obj_dicts}
- #end bgp_routers_http_get
-
- def _bgp_router_create_default_children(self, parent_obj):
- pass
- #end _bgp_router_create_default_children
-
- def _bgp_router_delete_default_children(self, parent_dict):
- pass
- #end _bgp_router_delete_default_children
-
- def virtual_router_http_get(self, id):
- try:
- self._extension_mgrs['resourceApi'].map_method('pre_virtual_router_read', id)
- except Exception as e:
- pass
-
- # TODO get vals from request out of the global ASAP
- etag = request.headers.get('If-None-Match')
- try:
- obj_type = self._db_conn.uuid_to_obj_type(id)
- except NoIdError:
- obj_type = None
- if obj_type != 'virtual_router':
- abort(404, 'No virtual-router object found for id %s' %(id))
- # common handling for all resource get
- (ok, result) = self._get_common(request, id)
- if not ok:
- (code, msg) = result
- self.config_object_error(id, None, 'virtual_router', 'http_get', msg)
- abort(code, msg)
-
- # type-specific hook
- r_class = self.get_resource_class('virtual-router')
- if r_class:
- r_class.http_get(id)
-
- db_conn = self._db_conn
- if etag:
- obj_ids = {'uuid': id}
- (ok, result) = db_conn.dbe_is_latest(obj_ids, etag.replace('"', ''))
- if not ok:
- # Not present in DB
- self.config_object_error(id, None, 'virtual_router', 'http_get', result)
- abort(404, result)
-
- is_latest = result
- if is_latest:
- # send Not-Modified, caches use this for read optimization
- response.status = 304
- return
- #end if etag
-
- obj_ids = {'uuid': id}
-
- # Generate field list for db layer
- properties = [u'virtual_router_type', u'virtual_router_ip_address', u'id_perms', u'display_name']
- references = ['bgp_router_refs', u'virtual_machine_refs']
- back_references = [u'physical_router_back_refs', u'global_system_config_back_refs', 'provider_attachment_back_refs']
- children = []
- if 'fields' in request.query:
- obj_fields = request.query.fields.split(',')
- else: # default props + children + refs + backrefs
- obj_fields = properties + references
- if 'exclude_back_refs' not in request.query:
- obj_fields = obj_fields + back_references
- if 'exclude_children' not in request.query:
- obj_fields = obj_fields + children
-
- (ok, result) = db_conn.dbe_read('virtual-router', obj_ids, obj_fields)
- if not ok:
- # Not present in DB
- self.config_object_error(id, None, 'virtual_router', 'http_get', result)
- abort(404, result)
-
- # check visibility
- if (not result['id_perms'].get('user_visible', True) and
- not self.is_admin_request()):
- result = 'This object is not visible by users: %s' % id
- self.config_object_error(id, None, 'virtual_router', 'http_get', result)
- abort(404, result)
-
- rsp_body = {}
- rsp_body['uuid'] = id
- rsp_body['href'] = self.generate_url('virtual-router', id)
- rsp_body['name'] = result['fq_name'][-1]
- rsp_body.update(result)
- id_perms = result['id_perms']
- response.set_header('ETag', '"' + id_perms['last_modified'] + '"')
- try:
- self._extension_mgrs['resourceApi'].map_method('post_virtual_router_read', id, rsp_body)
- except Exception as e:
- pass
-
- return {'virtual-router': rsp_body}
- #end virtual_router_http_get
-
- def virtual_router_http_put(self, id):
- key = 'virtual-router'
- obj_dict = request.json[key]
-
- try:
- self._extension_mgrs['resourceApi'].map_method('pre_virtual_router_update', id, obj_dict)
- except Exception as e:
- pass
-
- db_conn = self._db_conn
- try:
- obj_type = db_conn.uuid_to_obj_type(id)
- if obj_type != 'virtual_router':
- abort(404, 'No virtual-router object found for id %s' %(id))
- fq_name = db_conn.uuid_to_fq_name(id)
- except NoIdError as e:
- abort(404, str(e))
- prop_dict = obj_dict.get('id_perms')
- if prop_dict:
- buf = cStringIO.StringIO()
- xx_id_perms = IdPermsType(**prop_dict)
- xx_id_perms.export(buf)
- node = etree.fromstring(buf.getvalue())
- xx_id_perms = IdPermsType()
- try:
- xx_id_perms.build(node)
- except Exception as e:
- abort(400, str(e))
- # common handling for all resource put
- (ok, result) = self._put_common(request, 'virtual_router', id, fq_name, obj_dict)
- if not ok:
- (code, msg) = result
- self.config_object_error(id, None, 'virtual_router', 'http_put', msg)
- abort(code, msg)
-
- # Validate perms
- objtype_list = ['bgp_router', u'virtual_machine']
- for obj_type in objtype_list:
- refs = obj_dict.get('%s_refs'%(obj_type), None)
- if refs:
- for ref in refs:
- try:
- ref_uuid = db_conn.fq_name_to_uuid(obj_type, ref['to'])
- except NoIdError as e:
- abort(404, str(e))
- (ok, status) = self._permissions.check_perms_link(request, ref_uuid)
- if not ok:
- (code, err_msg) = status
- abort(code, err_msg)
-
- # State modification starts from here. Ensure that cleanup is done for all state changes
- cleanup_on_failure = []
- # type-specific hook
- r_class = self.get_resource_class('virtual-router')
- if r_class:
- (ok, put_result) = r_class.http_put(id, fq_name, obj_dict, self._db_conn)
- if not ok:
- (code, msg) = put_result
- self.config_object_error(id, None, 'virtual_router', 'http_put', msg)
- abort(code, msg)
- callable = getattr(r_class, 'http_put_fail', None)
- if callable:
- cleanup_on_failure.append((callable, [id, fq_name, obj_dict, self._db_conn]))
-
- obj_ids = {'uuid': id}
- try:
- (ok, result) = db_conn.dbe_update('virtual-router', obj_ids, obj_dict)
- except Exception as e:
- ok = False
- result = str(e)
- if not ok:
- for fail_cleanup_callable, cleanup_args in cleanup_on_failure:
- fail_cleanup_callable(*cleanup_args)
- self.config_object_error(id, None, 'virtual_router', 'http_put', result)
- abort(404, result)
-
- rsp_body = {}
- rsp_body['uuid'] = id
- rsp_body['href'] = self.generate_url('virtual-router', id)
-
- try:
- self._extension_mgrs['resourceApi'].map_method('post_virtual_router_update', id, obj_dict)
- except Exception as e:
- pass
-
- return {'virtual-router': rsp_body}
- #end virtual_router_http_put
-
- def virtual_router_http_delete(self, id):
- db_conn = self._db_conn
- # if obj doesn't exist return early
- try:
- obj_type = db_conn.uuid_to_obj_type(id)
- if obj_type != 'virtual_router':
- abort(404, 'No virtual-router object found for id %s' %(id))
- _ = db_conn.uuid_to_fq_name(id)
- except NoIdError:
- abort(404, 'ID %s does not exist' %(id))
-
- try:
- self._extension_mgrs['resourceApi'].map_method('pre_virtual_router_delete', id)
- except Exception as e:
- pass
-
- # read in obj from db (accepting error) to get details of it
- obj_ids = {'uuid': id}
- back_references = [u'physical_router_back_refs', u'global_system_config_back_refs', 'provider_attachment_back_refs']
- children = []
- obj_fields = children + back_references
- (read_ok, read_result) = db_conn.dbe_read('virtual-router', obj_ids, obj_fields)
- if not read_ok:
- if read_result.startswith('Unknown id:'):
- abort(404, 'ID %s does not exist' %(id))
- else:
- self.config_object_error(id, None, 'virtual_router', 'http_delete', read_result)
- # proceed down to delete the resource
-
- # common handling for all resource delete
- parent_type = read_result.get('parent_type', None)
- (ok, del_result) = self._delete_common(request, 'virtual_router', id, parent_type)
- if not ok:
- (code, msg) = del_result
- self.config_object_error(id, None, 'virtual_router', 'http_delete', msg)
- abort(code, msg)
-
- fq_name = read_result['fq_name']
- ifmap_id = cfgm_common.imid.get_ifmap_id_from_fq_name('virtual-router', fq_name)
- obj_ids['imid'] = ifmap_id
- if parent_type:
- parent_imid = cfgm_common.imid.get_ifmap_id_from_fq_name(parent_type, fq_name[:-1])
- obj_ids['parent_imid'] = parent_imid
-
- # State modification starts from here. Ensure that cleanup is done for all state changes
- cleanup_on_failure = []
-
- # type-specific hook
- r_class = self.get_resource_class('virtual-router')
- if r_class:
- if read_ok:
- # fail if non-default children or backrefs exist
- physical_router_back_refs = read_result.get('physical_router_back_refs', None)
- if physical_router_back_refs:
- back_ref_urls = [back_ref_info['href'] for back_ref_info in read_result['physical_router_back_refs']]
- back_ref_str = ', '.join(back_ref_urls)
- err_msg = 'Back-References from ' + back_ref_str + ' still exist'
- self.config_object_error(id, None, 'virtual_router', 'http_delete', err_msg)
- abort(409, err_msg)
-
- provider_attachment_back_refs = read_result.get('provider_attachment_back_refs', None)
- if provider_attachment_back_refs:
- back_ref_urls = [back_ref_info['href'] for back_ref_info in read_result['provider_attachment_back_refs']]
- back_ref_str = ', '.join(back_ref_urls)
- err_msg = 'Back-References from ' + back_ref_str + ' still exist'
- self.config_object_error(id, None, 'virtual_router', 'http_delete', err_msg)
- abort(409, err_msg)
-
-
- # Delete default children first
- self._virtual_router_delete_default_children(read_result)
-
- (ok, del_result) = r_class.http_delete(id, read_result, db_conn)
- if not ok:
- (code, msg) = del_result
- self.config_object_error(id, None, 'virtual_router', 'http_delete', msg)
- abort(code, msg)
- callable = getattr(r_class, 'http_delete_fail', None)
- if callable:
- cleanup_on_failure.append((callable, [id, read_result, db_conn]))
- #end if read_ok
-
- try:
- (ok, del_result) = db_conn.dbe_delete('virtual-router', obj_ids, read_result)
- except Exception as e:
- ok = False
- del_result = str(e)
- if not ok:
- for fail_cleanup_callable, cleanup_args in cleanup_on_failure:
- fail_cleanup_callable(*cleanup_args)
- self.config_object_error(id, None, 'virtual_router', 'http_delete', del_result)
- abort(409, del_result)
-
- try:
- self._extension_mgrs['resourceApi'].map_method('post_virtual_router_delete', id, read_result)
- except Exception as e:
- pass
-
- #end virtual_router_http_delete
-
- def virtual_routers_http_post(self):
- key = 'virtual-router'
- obj_dict = request.json[key]
- self._post_validate(key, obj_dict=obj_dict)
- fq_name = obj_dict['fq_name']
-
- try:
- self._extension_mgrs['resourceApi'].map_method('pre_virtual_router_create', obj_dict)
- except Exception as e:
- pass
-
- prop_dict = obj_dict.get('id_perms')
- if prop_dict:
- buf = cStringIO.StringIO()
- xx_id_perms = IdPermsType(**prop_dict)
- xx_id_perms.export(buf)
- node = etree.fromstring(buf.getvalue())
- xx_id_perms = IdPermsType()
- try:
- xx_id_perms.build(node)
- except Exception as e:
- abort(400, str(e))
- # common handling for all resource create
- (ok, result) = self._post_common(request, 'virtual-router', obj_dict)
- if not ok:
- (code, msg) = result
- fq_name_str = ':'.join(obj_dict.get('fq_name', []))
- self.config_object_error(None, fq_name_str, 'virtual_router', 'http_post', msg)
- abort(code, msg)
-
- name = obj_dict['fq_name'][-1]
- fq_name = obj_dict['fq_name']
-
- db_conn = self._db_conn
-
- # if client gave parent_type of config-root, ignore and remove
- if 'parent_type' in obj_dict and obj_dict['parent_type'] == 'config-root':
- del obj_dict['parent_type']
-
- if 'parent_type' in obj_dict:
- # non config-root child, verify parent exists
- parent_type = obj_dict['parent_type']
- parent_fq_name = obj_dict['fq_name'][:-1]
- try:
- parent_uuid = self._db_conn.fq_name_to_uuid(parent_type, parent_fq_name)
- (ok, status) = self._permissions.check_perms_write(request, parent_uuid)
- if not ok:
- (code, err_msg) = status
- abort(code, err_msg)
- self._permissions.set_user_role(request, obj_dict)
- except NoIdError:
- err_msg = 'Parent ' + pformat(parent_fq_name) + ' type ' + parent_type + ' does not exist'
- fq_name_str = ':'.join(parent_fq_name)
- self.config_object_error(None, fq_name_str, 'virtual_router', 'http_post', err_msg)
- abort(400, err_msg)
-
- # Validate perms
- objtype_list = ['bgp_router', u'virtual_machine']
- for obj_type in objtype_list:
- refs = obj_dict.get('%s_refs'%(obj_type), None)
- if refs:
- for ref in refs:
- ref_uuid = db_conn.fq_name_to_uuid(obj_type, ref['to'])
- (ok, status) = self._permissions.check_perms_link(request, ref_uuid)
- if not ok:
- (code, err_msg) = status
- abort(code, err_msg)
-
- # State modification starts from here. Ensure that cleanup is done for all state changes
- cleanup_on_failure = []
- # Alloc and Store id-mappings before creating entry on pubsub store.
- # Else a subscriber can ask for an id mapping before we have stored it
- uuid_requested = result
- (ok, result) = db_conn.dbe_alloc('virtual-router', obj_dict, uuid_requested)
- if not ok:
- (code, msg) = result
- fq_name_str = ':'.join(obj_dict['fq_name'])
- self.config_object_error(None, fq_name_str, 'virtual_router', 'http_post', result)
- abort(code, msg)
- cleanup_on_failure.append((db_conn.dbe_release, ['virtual_router', fq_name]))
-
- obj_ids = result
-
- env = request.headers.environ
- tenant_name = env.get(hdr_server_tenant(), 'default-project')
-
-
- # type-specific hook
- r_class = self.get_resource_class('virtual-router')
- if r_class:
- try:
- (ok, result) = r_class.http_post_collection(tenant_name, obj_dict, db_conn)
- except Exception as e:
- ok = False
- result = (500, str(e))
- if not ok:
- for fail_cleanup_callable, cleanup_args in cleanup_on_failure:
- fail_cleanup_callable(*cleanup_args)
- (code, msg) = result
- fq_name_str = ':'.join(fq_name)
- self.config_object_error(None, fq_name_str, 'virtual_router', 'http_post', msg)
- abort(code, msg)
- callable = getattr(r_class, 'http_post_collection_fail', None)
- if callable:
- cleanup_on_failure.append((callable, [tenant_name, obj_dict, db_conn]))
-
- try:
- (ok, result) = \
- db_conn.dbe_create('virtual-router', obj_ids, obj_dict)
- except Exception as e:
- ok = False
- result = str(e)
-
- if not ok:
- for fail_cleanup_callable, cleanup_args in cleanup_on_failure:
- fail_cleanup_callable(*cleanup_args)
- fq_name_str = ':'.join(fq_name)
- self.config_object_error(None, fq_name_str, 'virtual_router', 'http_post', result)
- abort(404, result)
-
- rsp_body = {}
- rsp_body['name'] = name
- rsp_body['fq_name'] = fq_name
- rsp_body['uuid'] = obj_ids['uuid']
- rsp_body['href'] = self.generate_url('virtual-router', obj_ids['uuid'])
- if 'parent_type' in obj_dict:
- # non config-root child, send back parent uuid/href
- rsp_body['parent_uuid'] = parent_uuid
- rsp_body['parent_href'] = self.generate_url(parent_type, parent_uuid)
-
- try:
- self._extension_mgrs['resourceApi'].map_method('post_virtual_router_create', obj_dict)
- except Exception as e:
- pass
-
- return {'virtual-router': rsp_body}
- #end virtual_routers_http_post
-
- def virtual_routers_http_get(self):
- # gather list of uuids using 1. any specified anchors
- # 2. any specified filters
- # if not 'detail' return list with any specified 'fields'
- # if 'detail' return list with props+refs + any specified 'fields'
-
- env = request.headers.environ
- tenant_name = env.get(hdr_server_tenant(), 'default-project')
- parent_uuids = None
- back_ref_uuids = None
- obj_uuids = None
- if (('parent_fq_name_str' in request.query) and
- ('parent_type' in request.query)):
- parent_fq_name = request.query.parent_fq_name_str.split(':')
- parent_type = request.query.parent_type
- parent_uuids = [self._db_conn.fq_name_to_uuid(parent_type, parent_fq_name)]
- elif 'parent_id' in request.query:
- parent_ids = request.query.parent_id.split(',')
- parent_uuids = [str(uuid.UUID(p_uuid)) for p_uuid in parent_ids]
- if 'back_ref_id' in request.query:
- back_ref_ids = request.query.back_ref_id.split(',')
- back_ref_uuids = [str(uuid.UUID(b_uuid)) for b_uuid in back_ref_ids]
- if 'obj_uuids' in request.query:
- obj_uuids = request.query.obj_uuids.split(',')
-
- # common handling for all resource get
- (ok, result) = self._get_common(request, parent_uuids)
- if not ok:
- (code, msg) = result
- self.config_object_error(None, None, 'virtual_routers', 'http_get_collection', msg)
- abort(code, msg)
-
- if 'count' in request.query:
- count = 'true' in request.query.count.lower()
- else:
- count = False
-
- filter_params = request.query.filters
- if filter_params:
- try:
- ff_key_vals = filter_params.split(',')
- ff_names = [ff.split('==')[0] for ff in ff_key_vals]
- ff_values = [ff.split('==')[1] for ff in ff_key_vals]
- filters = {'field_names': ff_names, 'field_values': ff_values}
- except Exception as e:
- abort(400, 'Invalid filter ' + filter_params)
- else:
- filters = None
- db_conn = self._db_conn
- (ok, result) = \
- db_conn.dbe_list('virtual-router', parent_uuids, back_ref_uuids, obj_uuids, count, filters)
- if not ok:
- self.config_object_error(None, None, 'virtual_routers', 'http_get_collection', result)
- abort(404, result)
-
- # If only counting, return early
- if count:
- return {'virtual-routers': {'count': result}}
-
- if 'detail' in request.query:
- detail = 'true' in request.query.detail.lower()
- else:
- detail = False
-
- fq_names_uuids = result
- obj_dicts = []
- if not detail:
- if not self.is_admin_request():
- obj_ids_list = [{'uuid': obj_uuid} for _, obj_uuid in fq_names_uuids]
- obj_fields = [u'id_perms']
- (ok, result) = db_conn.dbe_read_multi('virtual-router', obj_ids_list, obj_fields)
- if not ok:
- result = []
- for obj_result in result:
- # give chance for zk heartbeat/ping
- gevent.sleep(0)
- if obj_result['id_perms'].get('user_visible', True):
- obj_dict = {}
- obj_dict['uuid'] = obj_result['uuid']
- obj_dict['href'] = self.generate_url('virtual-router', obj_result['uuid'])
- obj_dict['fq_name'] = obj_result['fq_name']
- obj_dicts.append(obj_dict)
- else:
- for fq_name, obj_uuid in fq_names_uuids:
- # give chance for zk heartbeat/ping
- gevent.sleep(0)
- obj_dict = {}
- obj_dict['uuid'] = obj_uuid
- obj_dict['href'] = self.generate_url('virtual-router', obj_uuid)
- obj_dict['fq_name'] = fq_name
- obj_dicts.append(obj_dict)
- else: #detail
- obj_ids_list = [{'uuid': obj_uuid} for _, obj_uuid in fq_names_uuids]
- obj_fields = [u'virtual_router_type', u'virtual_router_ip_address', u'id_perms', u'display_name'] + ['bgp_router_refs', u'virtual_machine_refs']
- if 'fields' in request.query:
- obj_fields.extend(request.query.fields.split(','))
- (ok, result) = db_conn.dbe_read_multi('virtual-router', obj_ids_list, obj_fields)
-
- if not ok:
- result = []
-
- for obj_result in result:
- # give chance for zk heartbeat/ping
- gevent.sleep(0)
- obj_dict = {}
- obj_dict['name'] = obj_result['fq_name'][-1]
- obj_dict['href'] = self.generate_url('virtual-router', obj_result['uuid'])
- obj_dict.update(obj_result)
- if (obj_dict['id_perms'].get('user_visible', True) or
- self.is_admin_request()):
- obj_dicts.append({'virtual-router': obj_dict})
-
- return {'virtual-routers': obj_dicts}
- #end virtual_routers_http_get
-
- def _virtual_router_create_default_children(self, parent_obj):
- pass
- #end _virtual_router_create_default_children
-
- def _virtual_router_delete_default_children(self, parent_dict):
- pass
- #end _virtual_router_delete_default_children
-
- def config_root_http_get(self, id):
- try:
- self._extension_mgrs['resourceApi'].map_method('pre_config_root_read', id)
- except Exception as e:
- pass
-
- # TODO get vals from request out of the global ASAP
- etag = request.headers.get('If-None-Match')
- try:
- obj_type = self._db_conn.uuid_to_obj_type(id)
- except NoIdError:
- obj_type = None
- if obj_type != 'config_root':
- abort(404, 'No config-root object found for id %s' %(id))
- # common handling for all resource get
- (ok, result) = self._get_common(request, id)
- if not ok:
- (code, msg) = result
- self.config_object_error(id, None, 'config_root', 'http_get', msg)
- abort(code, msg)
-
- # type-specific hook
- r_class = self.get_resource_class('config-root')
- if r_class:
- r_class.http_get(id)
-
- db_conn = self._db_conn
- if etag:
- obj_ids = {'uuid': id}
- (ok, result) = db_conn.dbe_is_latest(obj_ids, etag.replace('"', ''))
- if not ok:
- # Not present in DB
- self.config_object_error(id, None, 'config_root', 'http_get', result)
- abort(404, result)
-
- is_latest = result
- if is_latest:
- # send Not-Modified, caches use this for read optimization
- response.status = 304
- return
- #end if etag
-
- obj_ids = {'uuid': id}
-
- # Generate field list for db layer
- properties = [u'id_perms', u'display_name']
- references = []
- back_references = []
- children = [u'global_system_configs', u'domains']
- if 'fields' in request.query:
- obj_fields = request.query.fields.split(',')
- else: # default props + children + refs + backrefs
- obj_fields = properties + references
- if 'exclude_back_refs' not in request.query:
- obj_fields = obj_fields + back_references
- if 'exclude_children' not in request.query:
- obj_fields = obj_fields + children
-
- (ok, result) = db_conn.dbe_read('config-root', obj_ids, obj_fields)
- if not ok:
- # Not present in DB
- self.config_object_error(id, None, 'config_root', 'http_get', result)
- abort(404, result)
-
- # check visibility
- if (not result['id_perms'].get('user_visible', True) and
- not self.is_admin_request()):
- result = 'This object is not visible by users: %s' % id
- self.config_object_error(id, None, 'config_root', 'http_get', result)
- abort(404, result)
-
- rsp_body = {}
- rsp_body['uuid'] = id
- rsp_body['href'] = self.generate_url('config-root', id)
- rsp_body['name'] = result['fq_name'][-1]
- rsp_body.update(result)
- id_perms = result['id_perms']
- response.set_header('ETag', '"' + id_perms['last_modified'] + '"')
- try:
- self._extension_mgrs['resourceApi'].map_method('post_config_root_read', id, rsp_body)
- except Exception as e:
- pass
-
- return {'config-root': rsp_body}
- #end config_root_http_get
-
- def config_root_http_put(self, id):
- key = 'config-root'
- obj_dict = request.json[key]
-
- try:
- self._extension_mgrs['resourceApi'].map_method('pre_config_root_update', id, obj_dict)
- except Exception as e:
- pass
-
- db_conn = self._db_conn
- try:
- obj_type = db_conn.uuid_to_obj_type(id)
- if obj_type != 'config_root':
- abort(404, 'No config-root object found for id %s' %(id))
- fq_name = db_conn.uuid_to_fq_name(id)
- except NoIdError as e:
- abort(404, str(e))
- prop_dict = obj_dict.get('id_perms')
- if prop_dict:
- buf = cStringIO.StringIO()
- xx_id_perms = IdPermsType(**prop_dict)
- xx_id_perms.export(buf)
- node = etree.fromstring(buf.getvalue())
- xx_id_perms = IdPermsType()
- try:
- xx_id_perms.build(node)
- except Exception as e:
- abort(400, str(e))
- # common handling for all resource put
- (ok, result) = self._put_common(request, 'config_root', id, fq_name, obj_dict)
- if not ok:
- (code, msg) = result
- self.config_object_error(id, None, 'config_root', 'http_put', msg)
- abort(code, msg)
-
- # Validate perms
- objtype_list = [u'global_system_config', u'domain']
- for obj_type in objtype_list:
- refs = obj_dict.get('%s_refs'%(obj_type), None)
- if refs:
- for ref in refs:
- try:
- ref_uuid = db_conn.fq_name_to_uuid(obj_type, ref['to'])
- except NoIdError as e:
- abort(404, str(e))
- (ok, status) = self._permissions.check_perms_link(request, ref_uuid)
- if not ok:
- (code, err_msg) = status
- abort(code, err_msg)
-
- # State modification starts from here. Ensure that cleanup is done for all state changes
- cleanup_on_failure = []
- # type-specific hook
- r_class = self.get_resource_class('config-root')
- if r_class:
- (ok, put_result) = r_class.http_put(id, fq_name, obj_dict, self._db_conn)
- if not ok:
- (code, msg) = put_result
- self.config_object_error(id, None, 'config_root', 'http_put', msg)
- abort(code, msg)
- callable = getattr(r_class, 'http_put_fail', None)
- if callable:
- cleanup_on_failure.append((callable, [id, fq_name, obj_dict, self._db_conn]))
-
- obj_ids = {'uuid': id}
- try:
- (ok, result) = db_conn.dbe_update('config-root', obj_ids, obj_dict)
- except Exception as e:
- ok = False
- result = str(e)
- if not ok:
- for fail_cleanup_callable, cleanup_args in cleanup_on_failure:
- fail_cleanup_callable(*cleanup_args)
- self.config_object_error(id, None, 'config_root', 'http_put', result)
- abort(404, result)
-
- rsp_body = {}
- rsp_body['uuid'] = id
- rsp_body['href'] = self.generate_url('config-root', id)
-
- try:
- self._extension_mgrs['resourceApi'].map_method('post_config_root_update', id, obj_dict)
- except Exception as e:
- pass
-
- return {'config-root': rsp_body}
- #end config_root_http_put
-
- def config_root_http_delete(self, id):
- db_conn = self._db_conn
- # if obj doesn't exist return early
- try:
- obj_type = db_conn.uuid_to_obj_type(id)
- if obj_type != 'config_root':
- abort(404, 'No config-root object found for id %s' %(id))
- _ = db_conn.uuid_to_fq_name(id)
- except NoIdError:
- abort(404, 'ID %s does not exist' %(id))
-
- try:
- self._extension_mgrs['resourceApi'].map_method('pre_config_root_delete', id)
- except Exception as e:
- pass
-
- # read in obj from db (accepting error) to get details of it
- obj_ids = {'uuid': id}
- back_references = []
- children = [u'global_system_configs', u'domains']
- obj_fields = children + back_references
- (read_ok, read_result) = db_conn.dbe_read('config-root', obj_ids, obj_fields)
- if not read_ok:
- if read_result.startswith('Unknown id:'):
- abort(404, 'ID %s does not exist' %(id))
- else:
- self.config_object_error(id, None, 'config_root', 'http_delete', read_result)
- # proceed down to delete the resource
-
- # common handling for all resource delete
- parent_type = read_result.get('parent_type', None)
- (ok, del_result) = self._delete_common(request, 'config_root', id, parent_type)
- if not ok:
- (code, msg) = del_result
- self.config_object_error(id, None, 'config_root', 'http_delete', msg)
- abort(code, msg)
-
- fq_name = read_result['fq_name']
- ifmap_id = cfgm_common.imid.get_ifmap_id_from_fq_name('config-root', fq_name)
- obj_ids['imid'] = ifmap_id
- if parent_type:
- parent_imid = cfgm_common.imid.get_ifmap_id_from_fq_name(parent_type, fq_name[:-1])
- obj_ids['parent_imid'] = parent_imid
-
- # State modification starts from here. Ensure that cleanup is done for all state changes
- cleanup_on_failure = []
-
- # type-specific hook
- r_class = self.get_resource_class('config-root')
- if r_class:
- if read_ok:
- # fail if non-default children or backrefs exist
- global_system_configs = read_result.get('global_system_configs', None)
- if global_system_configs:
- has_infos = read_result['global_system_configs']
- if ((len(has_infos) > 1) or
- (len(has_infos) == 1 and has_infos[0]['to'][-1] != 'default-global-system-config')):
- has_urls = [has_info['href'] for has_info in has_infos]
- has_str = ', '.join(has_urls)
- err_msg = 'Children ' + has_str + ' still exist'
- self.config_object_error(id, None, 'config_root', 'http_delete', err_msg)
- abort(409, err_msg)
-
- domains = read_result.get('domains', None)
- if domains:
- has_infos = read_result['domains']
- if ((len(has_infos) > 1) or
- (len(has_infos) == 1 and has_infos[0]['to'][-1] != 'default-domain')):
- has_urls = [has_info['href'] for has_info in has_infos]
- has_str = ', '.join(has_urls)
- err_msg = 'Children ' + has_str + ' still exist'
- self.config_object_error(id, None, 'config_root', 'http_delete', err_msg)
- abort(409, err_msg)
-
-
- # Delete default children first
- self._config_root_delete_default_children(read_result)
-
- (ok, del_result) = r_class.http_delete(id, read_result, db_conn)
- if not ok:
- (code, msg) = del_result
- self.config_object_error(id, None, 'config_root', 'http_delete', msg)
- abort(code, msg)
- callable = getattr(r_class, 'http_delete_fail', None)
- if callable:
- cleanup_on_failure.append((callable, [id, read_result, db_conn]))
- #end if read_ok
-
- try:
- (ok, del_result) = db_conn.dbe_delete('config-root', obj_ids, read_result)
- except Exception as e:
- ok = False
- del_result = str(e)
- if not ok:
- for fail_cleanup_callable, cleanup_args in cleanup_on_failure:
- fail_cleanup_callable(*cleanup_args)
- self.config_object_error(id, None, 'config_root', 'http_delete', del_result)
- abort(409, del_result)
-
- try:
- self._extension_mgrs['resourceApi'].map_method('post_config_root_delete', id, read_result)
- except Exception as e:
- pass
-
- #end config_root_http_delete
-
- def config_roots_http_post(self):
- key = 'config-root'
- obj_dict = request.json[key]
- self._post_validate(key, obj_dict=obj_dict)
- fq_name = obj_dict['fq_name']
-
- try:
- self._extension_mgrs['resourceApi'].map_method('pre_config_root_create', obj_dict)
- except Exception as e:
- pass
-
- prop_dict = obj_dict.get('id_perms')
- if prop_dict:
- buf = cStringIO.StringIO()
- xx_id_perms = IdPermsType(**prop_dict)
- xx_id_perms.export(buf)
- node = etree.fromstring(buf.getvalue())
- xx_id_perms = IdPermsType()
- try:
- xx_id_perms.build(node)
- except Exception as e:
- abort(400, str(e))
- # common handling for all resource create
- (ok, result) = self._post_common(request, 'config-root', obj_dict)
- if not ok:
- (code, msg) = result
- fq_name_str = ':'.join(obj_dict.get('fq_name', []))
- self.config_object_error(None, fq_name_str, 'config_root', 'http_post', msg)
- abort(code, msg)
-
- name = obj_dict['fq_name'][-1]
- fq_name = obj_dict['fq_name']
-
- db_conn = self._db_conn
-
- # Validate perms
- objtype_list = [u'global_system_config', u'domain']
- for obj_type in objtype_list:
- refs = obj_dict.get('%s_refs'%(obj_type), None)
- if refs:
- for ref in refs:
- ref_uuid = db_conn.fq_name_to_uuid(obj_type, ref['to'])
- (ok, status) = self._permissions.check_perms_link(request, ref_uuid)
- if not ok:
- (code, err_msg) = status
- abort(code, err_msg)
-
- # State modification starts from here. Ensure that cleanup is done for all state changes
- cleanup_on_failure = []
- # Alloc and Store id-mappings before creating entry on pubsub store.
- # Else a subscriber can ask for an id mapping before we have stored it
- uuid_requested = result
- (ok, result) = db_conn.dbe_alloc('config-root', obj_dict, uuid_requested)
- if not ok:
- (code, msg) = result
- fq_name_str = ':'.join(obj_dict['fq_name'])
- self.config_object_error(None, fq_name_str, 'config_root', 'http_post', result)
- abort(code, msg)
- cleanup_on_failure.append((db_conn.dbe_release, ['config_root', fq_name]))
-
- obj_ids = result
-
- env = request.headers.environ
- tenant_name = env.get(hdr_server_tenant(), 'default-project')
-
-
- # type-specific hook
- r_class = self.get_resource_class('config-root')
- if r_class:
- try:
- (ok, result) = r_class.http_post_collection(tenant_name, obj_dict, db_conn)
- except Exception as e:
- ok = False
- result = (500, str(e))
- if not ok:
- for fail_cleanup_callable, cleanup_args in cleanup_on_failure:
- fail_cleanup_callable(*cleanup_args)
- (code, msg) = result
- fq_name_str = ':'.join(fq_name)
- self.config_object_error(None, fq_name_str, 'config_root', 'http_post', msg)
- abort(code, msg)
- callable = getattr(r_class, 'http_post_collection_fail', None)
- if callable:
- cleanup_on_failure.append((callable, [tenant_name, obj_dict, db_conn]))
-
- try:
- (ok, result) = \
- db_conn.dbe_create('config-root', obj_ids, obj_dict)
- except Exception as e:
- ok = False
- result = str(e)
-
- if not ok:
- for fail_cleanup_callable, cleanup_args in cleanup_on_failure:
- fail_cleanup_callable(*cleanup_args)
- fq_name_str = ':'.join(fq_name)
- self.config_object_error(None, fq_name_str, 'config_root', 'http_post', result)
- abort(404, result)
-
- rsp_body = {}
- rsp_body['name'] = name
- rsp_body['fq_name'] = fq_name
- rsp_body['uuid'] = obj_ids['uuid']
- rsp_body['href'] = self.generate_url('config-root', obj_ids['uuid'])
-
- try:
- self._extension_mgrs['resourceApi'].map_method('post_config_root_create', obj_dict)
- except Exception as e:
- pass
-
- return {'config-root': rsp_body}
- #end config_roots_http_post
-
- def config_roots_http_get(self):
- # gather list of uuids using 1. any specified anchors
- # 2. any specified filters
- # if not 'detail' return list with any specified 'fields'
- # if 'detail' return list with props+refs + any specified 'fields'
-
- env = request.headers.environ
- tenant_name = env.get(hdr_server_tenant(), 'default-project')
- parent_uuids = None
- back_ref_uuids = None
- obj_uuids = None
- if (('parent_fq_name_str' in request.query) and
- ('parent_type' in request.query)):
- parent_fq_name = request.query.parent_fq_name_str.split(':')
- parent_type = request.query.parent_type
- parent_uuids = [self._db_conn.fq_name_to_uuid(parent_type, parent_fq_name)]
- elif 'parent_id' in request.query:
- parent_ids = request.query.parent_id.split(',')
- parent_uuids = [str(uuid.UUID(p_uuid)) for p_uuid in parent_ids]
- if 'back_ref_id' in request.query:
- back_ref_ids = request.query.back_ref_id.split(',')
- back_ref_uuids = [str(uuid.UUID(b_uuid)) for b_uuid in back_ref_ids]
- if 'obj_uuids' in request.query:
- obj_uuids = request.query.obj_uuids.split(',')
-
- # common handling for all resource get
- (ok, result) = self._get_common(request, parent_uuids)
- if not ok:
- (code, msg) = result
- self.config_object_error(None, None, 'config_roots', 'http_get_collection', msg)
- abort(code, msg)
-
- if 'count' in request.query:
- count = 'true' in request.query.count.lower()
- else:
- count = False
-
- filter_params = request.query.filters
- if filter_params:
- try:
- ff_key_vals = filter_params.split(',')
- ff_names = [ff.split('==')[0] for ff in ff_key_vals]
- ff_values = [ff.split('==')[1] for ff in ff_key_vals]
- filters = {'field_names': ff_names, 'field_values': ff_values}
- except Exception as e:
- abort(400, 'Invalid filter ' + filter_params)
- else:
- filters = None
- db_conn = self._db_conn
- (ok, result) = \
- db_conn.dbe_list('config-root', parent_uuids, back_ref_uuids, obj_uuids, count, filters)
- if not ok:
- self.config_object_error(None, None, 'config_roots', 'http_get_collection', result)
- abort(404, result)
-
- # If only counting, return early
- if count:
- return {'config-roots': {'count': result}}
-
- if 'detail' in request.query:
- detail = 'true' in request.query.detail.lower()
- else:
- detail = False
-
- fq_names_uuids = result
- obj_dicts = []
- if not detail:
- if not self.is_admin_request():
- obj_ids_list = [{'uuid': obj_uuid} for _, obj_uuid in fq_names_uuids]
- obj_fields = [u'id_perms']
- (ok, result) = db_conn.dbe_read_multi('config-root', obj_ids_list, obj_fields)
- if not ok:
- result = []
- for obj_result in result:
- # give chance for zk heartbeat/ping
- gevent.sleep(0)
- if obj_result['id_perms'].get('user_visible', True):
- obj_dict = {}
- obj_dict['uuid'] = obj_result['uuid']
- obj_dict['href'] = self.generate_url('config-root', obj_result['uuid'])
- obj_dict['fq_name'] = obj_result['fq_name']
- obj_dicts.append(obj_dict)
- else:
- for fq_name, obj_uuid in fq_names_uuids:
- # give chance for zk heartbeat/ping
- gevent.sleep(0)
- obj_dict = {}
- obj_dict['uuid'] = obj_uuid
- obj_dict['href'] = self.generate_url('config-root', obj_uuid)
- obj_dict['fq_name'] = fq_name
- obj_dicts.append(obj_dict)
- else: #detail
- obj_ids_list = [{'uuid': obj_uuid} for _, obj_uuid in fq_names_uuids]
- obj_fields = [u'id_perms', u'display_name'] + []
- if 'fields' in request.query:
- obj_fields.extend(request.query.fields.split(','))
- (ok, result) = db_conn.dbe_read_multi('config-root', obj_ids_list, obj_fields)
-
- if not ok:
- result = []
-
- for obj_result in result:
- # give chance for zk heartbeat/ping
- gevent.sleep(0)
- obj_dict = {}
- obj_dict['name'] = obj_result['fq_name'][-1]
- obj_dict['href'] = self.generate_url('config-root', obj_result['uuid'])
- obj_dict.update(obj_result)
- if (obj_dict['id_perms'].get('user_visible', True) or
- self.is_admin_request()):
- obj_dicts.append({'config-root': obj_dict})
-
- return {'config-roots': obj_dicts}
- #end config_roots_http_get
-
- def _config_root_create_default_children(self, parent_obj):
- # Create a default child only if provisioned for
- r_class = self.get_resource_class('global-system-config')
- if r_class and r_class.generate_default_instance:
- child_obj = GlobalSystemConfig(parent_obj = parent_obj)
- child_dict = child_obj.__dict__
- fq_name = child_dict['fq_name']
- child_dict['id_perms'] = self._get_default_id_perms('global-system-config')
-
- db_conn = self._db_conn
- (ok, result) = db_conn.dbe_alloc('global-system-config', child_dict)
- if not ok:
- return (ok, result)
-
- obj_ids = result
- db_conn.dbe_create('global-system-config', obj_ids, child_dict)
- self._global_system_config_create_default_children(child_obj)
-
- # Create a default child only if provisioned for
- r_class = self.get_resource_class('domain')
- if r_class and r_class.generate_default_instance:
- child_obj = Domain(parent_obj = parent_obj)
- child_dict = child_obj.__dict__
- fq_name = child_dict['fq_name']
- child_dict['id_perms'] = self._get_default_id_perms('domain')
-
- db_conn = self._db_conn
- (ok, result) = db_conn.dbe_alloc('domain', child_dict)
- if not ok:
- return (ok, result)
-
- obj_ids = result
- db_conn.dbe_create('domain', obj_ids, child_dict)
- self._domain_create_default_children(child_obj)
-
- pass
- #end _config_root_create_default_children
-
- def _config_root_delete_default_children(self, parent_dict):
- # Delete a default child only if provisioned for
- r_class = self.get_resource_class('domain')
- if r_class and r_class.generate_default_instance:
- # first locate default child then delete it
- has_infos = parent_dict.get('global_system_configs')
- if has_infos:
- for has_info in has_infos:
- if has_info['to'][-1] == 'default-global-system-config':
- default_child_id = has_info['href'].split('/')[-1]
- self.global_system_config_http_delete(default_child_id)
- break
-
- # Delete a default child only if provisioned for
- r_class = self.get_resource_class('domain')
- if r_class and r_class.generate_default_instance:
- # first locate default child then delete it
- has_infos = parent_dict.get('domains')
- if has_infos:
- for has_info in has_infos:
- if has_info['to'][-1] == 'default-domain':
- default_child_id = has_info['href'].split('/')[-1]
- self.domain_http_delete(default_child_id)
- break
-
- pass
- #end _config_root_delete_default_children
-
- def subnet_http_get(self, id):
- try:
- self._extension_mgrs['resourceApi'].map_method('pre_subnet_read', id)
- except Exception as e:
- pass
-
- # TODO get vals from request out of the global ASAP
- etag = request.headers.get('If-None-Match')
- try:
- obj_type = self._db_conn.uuid_to_obj_type(id)
- except NoIdError:
- obj_type = None
- if obj_type != 'subnet':
- abort(404, 'No subnet object found for id %s' %(id))
- # common handling for all resource get
- (ok, result) = self._get_common(request, id)
- if not ok:
- (code, msg) = result
- self.config_object_error(id, None, 'subnet', 'http_get', msg)
- abort(code, msg)
-
- # type-specific hook
- r_class = self.get_resource_class('subnet')
- if r_class:
- r_class.http_get(id)
-
- db_conn = self._db_conn
- if etag:
- obj_ids = {'uuid': id}
- (ok, result) = db_conn.dbe_is_latest(obj_ids, etag.replace('"', ''))
- if not ok:
- # Not present in DB
- self.config_object_error(id, None, 'subnet', 'http_get', result)
- abort(404, result)
-
- is_latest = result
- if is_latest:
- # send Not-Modified, caches use this for read optimization
- response.status = 304
- return
- #end if etag
-
- obj_ids = {'uuid': id}
-
- # Generate field list for db layer
- properties = [u'subnet_ip_prefix', u'id_perms', u'display_name']
- references = ['virtual_machine_interface_refs']
- back_references = []
- children = []
- if 'fields' in request.query:
- obj_fields = request.query.fields.split(',')
- else: # default props + children + refs + backrefs
- obj_fields = properties + references
- if 'exclude_back_refs' not in request.query:
- obj_fields = obj_fields + back_references
- if 'exclude_children' not in request.query:
- obj_fields = obj_fields + children
-
- (ok, result) = db_conn.dbe_read('subnet', obj_ids, obj_fields)
- if not ok:
- # Not present in DB
- self.config_object_error(id, None, 'subnet', 'http_get', result)
- abort(404, result)
-
- # check visibility
- if (not result['id_perms'].get('user_visible', True) and
- not self.is_admin_request()):
- result = 'This object is not visible by users: %s' % id
- self.config_object_error(id, None, 'subnet', 'http_get', result)
- abort(404, result)
-
- rsp_body = {}
- rsp_body['uuid'] = id
- rsp_body['href'] = self.generate_url('subnet', id)
- rsp_body['name'] = result['fq_name'][-1]
- rsp_body.update(result)
- id_perms = result['id_perms']
- response.set_header('ETag', '"' + id_perms['last_modified'] + '"')
- try:
- self._extension_mgrs['resourceApi'].map_method('post_subnet_read', id, rsp_body)
- except Exception as e:
- pass
-
- return {'subnet': rsp_body}
- #end subnet_http_get
-
- def subnet_http_put(self, id):
- key = 'subnet'
- obj_dict = request.json[key]
-
- try:
- self._extension_mgrs['resourceApi'].map_method('pre_subnet_update', id, obj_dict)
- except Exception as e:
- pass
-
- db_conn = self._db_conn
- try:
- obj_type = db_conn.uuid_to_obj_type(id)
- if obj_type != 'subnet':
- abort(404, 'No subnet object found for id %s' %(id))
- fq_name = db_conn.uuid_to_fq_name(id)
- except NoIdError as e:
- abort(404, str(e))
- prop_dict = obj_dict.get('subnet_ip_prefix')
- if prop_dict:
- buf = cStringIO.StringIO()
- xx_subnet_ip_prefix = SubnetType(**prop_dict)
- xx_subnet_ip_prefix.export(buf)
- node = etree.fromstring(buf.getvalue())
- xx_subnet_ip_prefix = SubnetType()
- try:
- xx_subnet_ip_prefix.build(node)
- except Exception as e:
- abort(400, str(e))
- prop_dict = obj_dict.get('id_perms')
- if prop_dict:
- buf = cStringIO.StringIO()
- xx_id_perms = IdPermsType(**prop_dict)
- xx_id_perms.export(buf)
- node = etree.fromstring(buf.getvalue())
- xx_id_perms = IdPermsType()
- try:
- xx_id_perms.build(node)
- except Exception as e:
- abort(400, str(e))
- # common handling for all resource put
- (ok, result) = self._put_common(request, 'subnet', id, fq_name, obj_dict)
- if not ok:
- (code, msg) = result
- self.config_object_error(id, None, 'subnet', 'http_put', msg)
- abort(code, msg)
-
- # Validate perms
- objtype_list = ['virtual_machine_interface']
- for obj_type in objtype_list:
- refs = obj_dict.get('%s_refs'%(obj_type), None)
- if refs:
- for ref in refs:
- try:
- ref_uuid = db_conn.fq_name_to_uuid(obj_type, ref['to'])
- except NoIdError as e:
- abort(404, str(e))
- (ok, status) = self._permissions.check_perms_link(request, ref_uuid)
- if not ok:
- (code, err_msg) = status
- abort(code, err_msg)
-
- # State modification starts from here. Ensure that cleanup is done for all state changes
- cleanup_on_failure = []
- # type-specific hook
- r_class = self.get_resource_class('subnet')
- if r_class:
- (ok, put_result) = r_class.http_put(id, fq_name, obj_dict, self._db_conn)
- if not ok:
- (code, msg) = put_result
- self.config_object_error(id, None, 'subnet', 'http_put', msg)
- abort(code, msg)
- callable = getattr(r_class, 'http_put_fail', None)
- if callable:
- cleanup_on_failure.append((callable, [id, fq_name, obj_dict, self._db_conn]))
-
- obj_ids = {'uuid': id}
- try:
- (ok, result) = db_conn.dbe_update('subnet', obj_ids, obj_dict)
- except Exception as e:
- ok = False
- result = str(e)
- if not ok:
- for fail_cleanup_callable, cleanup_args in cleanup_on_failure:
- fail_cleanup_callable(*cleanup_args)
- self.config_object_error(id, None, 'subnet', 'http_put', result)
- abort(404, result)
-
- rsp_body = {}
- rsp_body['uuid'] = id
- rsp_body['href'] = self.generate_url('subnet', id)
-
- try:
- self._extension_mgrs['resourceApi'].map_method('post_subnet_update', id, obj_dict)
- except Exception as e:
- pass
-
- return {'subnet': rsp_body}
- #end subnet_http_put
-
- def subnet_http_delete(self, id):
- db_conn = self._db_conn
- # if obj doesn't exist return early
- try:
- obj_type = db_conn.uuid_to_obj_type(id)
- if obj_type != 'subnet':
- abort(404, 'No subnet object found for id %s' %(id))
- _ = db_conn.uuid_to_fq_name(id)
- except NoIdError:
- abort(404, 'ID %s does not exist' %(id))
-
- try:
- self._extension_mgrs['resourceApi'].map_method('pre_subnet_delete', id)
- except Exception as e:
- pass
-
- # read in obj from db (accepting error) to get details of it
- obj_ids = {'uuid': id}
- back_references = []
- children = []
- obj_fields = children + back_references
- (read_ok, read_result) = db_conn.dbe_read('subnet', obj_ids, obj_fields)
- if not read_ok:
- if read_result.startswith('Unknown id:'):
- abort(404, 'ID %s does not exist' %(id))
- else:
- self.config_object_error(id, None, 'subnet', 'http_delete', read_result)
- # proceed down to delete the resource
-
- # common handling for all resource delete
- parent_type = read_result.get('parent_type', None)
- (ok, del_result) = self._delete_common(request, 'subnet', id, parent_type)
- if not ok:
- (code, msg) = del_result
- self.config_object_error(id, None, 'subnet', 'http_delete', msg)
- abort(code, msg)
-
- fq_name = read_result['fq_name']
- ifmap_id = cfgm_common.imid.get_ifmap_id_from_fq_name('subnet', fq_name)
- obj_ids['imid'] = ifmap_id
- if parent_type:
- parent_imid = cfgm_common.imid.get_ifmap_id_from_fq_name(parent_type, fq_name[:-1])
- obj_ids['parent_imid'] = parent_imid
-
- # State modification starts from here. Ensure that cleanup is done for all state changes
- cleanup_on_failure = []
-
- # type-specific hook
- r_class = self.get_resource_class('subnet')
- if r_class:
- if read_ok:
- # fail if non-default children or backrefs exist
-
- # Delete default children first
- self._subnet_delete_default_children(read_result)
-
- (ok, del_result) = r_class.http_delete(id, read_result, db_conn)
- if not ok:
- (code, msg) = del_result
- self.config_object_error(id, None, 'subnet', 'http_delete', msg)
- abort(code, msg)
- callable = getattr(r_class, 'http_delete_fail', None)
- if callable:
- cleanup_on_failure.append((callable, [id, read_result, db_conn]))
- #end if read_ok
-
- try:
- (ok, del_result) = db_conn.dbe_delete('subnet', obj_ids, read_result)
- except Exception as e:
- ok = False
- del_result = str(e)
- if not ok:
- for fail_cleanup_callable, cleanup_args in cleanup_on_failure:
- fail_cleanup_callable(*cleanup_args)
- self.config_object_error(id, None, 'subnet', 'http_delete', del_result)
- abort(409, del_result)
-
- try:
- self._extension_mgrs['resourceApi'].map_method('post_subnet_delete', id, read_result)
- except Exception as e:
- pass
-
- #end subnet_http_delete
-
- def subnets_http_post(self):
- key = 'subnet'
- obj_dict = request.json[key]
- self._post_validate(key, obj_dict=obj_dict)
- fq_name = obj_dict['fq_name']
-
- try:
- self._extension_mgrs['resourceApi'].map_method('pre_subnet_create', obj_dict)
- except Exception as e:
- pass
-
- prop_dict = obj_dict.get('subnet_ip_prefix')
- if prop_dict:
- buf = cStringIO.StringIO()
- xx_subnet_ip_prefix = SubnetType(**prop_dict)
- xx_subnet_ip_prefix.export(buf)
- node = etree.fromstring(buf.getvalue())
- xx_subnet_ip_prefix = SubnetType()
- try:
- xx_subnet_ip_prefix.build(node)
- except Exception as e:
- abort(400, str(e))
- prop_dict = obj_dict.get('id_perms')
- if prop_dict:
- buf = cStringIO.StringIO()
- xx_id_perms = IdPermsType(**prop_dict)
- xx_id_perms.export(buf)
- node = etree.fromstring(buf.getvalue())
- xx_id_perms = IdPermsType()
- try:
- xx_id_perms.build(node)
- except Exception as e:
- abort(400, str(e))
- # common handling for all resource create
- (ok, result) = self._post_common(request, 'subnet', obj_dict)
- if not ok:
- (code, msg) = result
- fq_name_str = ':'.join(obj_dict.get('fq_name', []))
- self.config_object_error(None, fq_name_str, 'subnet', 'http_post', msg)
- abort(code, msg)
-
- name = obj_dict['fq_name'][-1]
- fq_name = obj_dict['fq_name']
-
- db_conn = self._db_conn
-
- # Validate perms
- objtype_list = ['virtual_machine_interface']
- for obj_type in objtype_list:
- refs = obj_dict.get('%s_refs'%(obj_type), None)
- if refs:
- for ref in refs:
- ref_uuid = db_conn.fq_name_to_uuid(obj_type, ref['to'])
- (ok, status) = self._permissions.check_perms_link(request, ref_uuid)
- if not ok:
- (code, err_msg) = status
- abort(code, err_msg)
-
- # State modification starts from here. Ensure that cleanup is done for all state changes
- cleanup_on_failure = []
- # Alloc and Store id-mappings before creating entry on pubsub store.
- # Else a subscriber can ask for an id mapping before we have stored it
- uuid_requested = result
- (ok, result) = db_conn.dbe_alloc('subnet', obj_dict, uuid_requested)
- if not ok:
- (code, msg) = result
- fq_name_str = ':'.join(obj_dict['fq_name'])
- self.config_object_error(None, fq_name_str, 'subnet', 'http_post', result)
- abort(code, msg)
- cleanup_on_failure.append((db_conn.dbe_release, ['subnet', fq_name]))
-
- obj_ids = result
-
- env = request.headers.environ
- tenant_name = env.get(hdr_server_tenant(), 'default-project')
-
-
- # type-specific hook
- r_class = self.get_resource_class('subnet')
- if r_class:
- try:
- (ok, result) = r_class.http_post_collection(tenant_name, obj_dict, db_conn)
- except Exception as e:
- ok = False
- result = (500, str(e))
- if not ok:
- for fail_cleanup_callable, cleanup_args in cleanup_on_failure:
- fail_cleanup_callable(*cleanup_args)
- (code, msg) = result
- fq_name_str = ':'.join(fq_name)
- self.config_object_error(None, fq_name_str, 'subnet', 'http_post', msg)
- abort(code, msg)
- callable = getattr(r_class, 'http_post_collection_fail', None)
- if callable:
- cleanup_on_failure.append((callable, [tenant_name, obj_dict, db_conn]))
-
- try:
- (ok, result) = \
- db_conn.dbe_create('subnet', obj_ids, obj_dict)
- except Exception as e:
- ok = False
- result = str(e)
-
- if not ok:
- for fail_cleanup_callable, cleanup_args in cleanup_on_failure:
- fail_cleanup_callable(*cleanup_args)
- fq_name_str = ':'.join(fq_name)
- self.config_object_error(None, fq_name_str, 'subnet', 'http_post', result)
- abort(404, result)
-
- rsp_body = {}
- rsp_body['name'] = name
- rsp_body['fq_name'] = fq_name
- rsp_body['uuid'] = obj_ids['uuid']
- rsp_body['href'] = self.generate_url('subnet', obj_ids['uuid'])
-
- try:
- self._extension_mgrs['resourceApi'].map_method('post_subnet_create', obj_dict)
- except Exception as e:
- pass
-
- return {'subnet': rsp_body}
- #end subnets_http_post
-
- def subnets_http_get(self):
- # gather list of uuids using 1. any specified anchors
- # 2. any specified filters
- # if not 'detail' return list with any specified 'fields'
- # if 'detail' return list with props+refs + any specified 'fields'
-
- env = request.headers.environ
- tenant_name = env.get(hdr_server_tenant(), 'default-project')
- parent_uuids = None
- back_ref_uuids = None
- obj_uuids = None
- if (('parent_fq_name_str' in request.query) and
- ('parent_type' in request.query)):
- parent_fq_name = request.query.parent_fq_name_str.split(':')
- parent_type = request.query.parent_type
- parent_uuids = [self._db_conn.fq_name_to_uuid(parent_type, parent_fq_name)]
- elif 'parent_id' in request.query:
- parent_ids = request.query.parent_id.split(',')
- parent_uuids = [str(uuid.UUID(p_uuid)) for p_uuid in parent_ids]
- if 'back_ref_id' in request.query:
- back_ref_ids = request.query.back_ref_id.split(',')
- back_ref_uuids = [str(uuid.UUID(b_uuid)) for b_uuid in back_ref_ids]
- if 'obj_uuids' in request.query:
- obj_uuids = request.query.obj_uuids.split(',')
-
- # common handling for all resource get
- (ok, result) = self._get_common(request, parent_uuids)
- if not ok:
- (code, msg) = result
- self.config_object_error(None, None, 'subnets', 'http_get_collection', msg)
- abort(code, msg)
-
- if 'count' in request.query:
- count = 'true' in request.query.count.lower()
- else:
- count = False
-
- filter_params = request.query.filters
- if filter_params:
- try:
- ff_key_vals = filter_params.split(',')
- ff_names = [ff.split('==')[0] for ff in ff_key_vals]
- ff_values = [ff.split('==')[1] for ff in ff_key_vals]
- filters = {'field_names': ff_names, 'field_values': ff_values}
- except Exception as e:
- abort(400, 'Invalid filter ' + filter_params)
- else:
- filters = None
- db_conn = self._db_conn
- (ok, result) = \
- db_conn.dbe_list('subnet', parent_uuids, back_ref_uuids, obj_uuids, count, filters)
- if not ok:
- self.config_object_error(None, None, 'subnets', 'http_get_collection', result)
- abort(404, result)
-
- # If only counting, return early
- if count:
- return {'subnets': {'count': result}}
-
- if 'detail' in request.query:
- detail = 'true' in request.query.detail.lower()
- else:
- detail = False
-
- fq_names_uuids = result
- obj_dicts = []
- if not detail:
- if not self.is_admin_request():
- obj_ids_list = [{'uuid': obj_uuid} for _, obj_uuid in fq_names_uuids]
- obj_fields = [u'id_perms']
- (ok, result) = db_conn.dbe_read_multi('subnet', obj_ids_list, obj_fields)
- if not ok:
- result = []
- for obj_result in result:
- # give chance for zk heartbeat/ping
- gevent.sleep(0)
- if obj_result['id_perms'].get('user_visible', True):
- obj_dict = {}
- obj_dict['uuid'] = obj_result['uuid']
- obj_dict['href'] = self.generate_url('subnet', obj_result['uuid'])
- obj_dict['fq_name'] = obj_result['fq_name']
- obj_dicts.append(obj_dict)
- else:
- for fq_name, obj_uuid in fq_names_uuids:
- # give chance for zk heartbeat/ping
- gevent.sleep(0)
- obj_dict = {}
- obj_dict['uuid'] = obj_uuid
- obj_dict['href'] = self.generate_url('subnet', obj_uuid)
- obj_dict['fq_name'] = fq_name
- obj_dicts.append(obj_dict)
- else: #detail
- obj_ids_list = [{'uuid': obj_uuid} for _, obj_uuid in fq_names_uuids]
- obj_fields = [u'subnet_ip_prefix', u'id_perms', u'display_name'] + ['virtual_machine_interface_refs']
- if 'fields' in request.query:
- obj_fields.extend(request.query.fields.split(','))
- (ok, result) = db_conn.dbe_read_multi('subnet', obj_ids_list, obj_fields)
-
- if not ok:
- result = []
-
- for obj_result in result:
- # give chance for zk heartbeat/ping
- gevent.sleep(0)
- obj_dict = {}
- obj_dict['name'] = obj_result['fq_name'][-1]
- obj_dict['href'] = self.generate_url('subnet', obj_result['uuid'])
- obj_dict.update(obj_result)
- if (obj_dict['id_perms'].get('user_visible', True) or
- self.is_admin_request()):
- obj_dicts.append({'subnet': obj_dict})
-
- return {'subnets': obj_dicts}
- #end subnets_http_get
-
- def _subnet_create_default_children(self, parent_obj):
- pass
- #end _subnet_create_default_children
-
- def _subnet_delete_default_children(self, parent_dict):
- pass
- #end _subnet_delete_default_children
-
- def global_system_config_http_get(self, id):
- try:
- self._extension_mgrs['resourceApi'].map_method('pre_global_system_config_read', id)
- except Exception as e:
- pass
-
- # TODO get vals from request out of the global ASAP
- etag = request.headers.get('If-None-Match')
- try:
- obj_type = self._db_conn.uuid_to_obj_type(id)
- except NoIdError:
- obj_type = None
- if obj_type != 'global_system_config':
- abort(404, 'No global-system-config object found for id %s' %(id))
- # common handling for all resource get
- (ok, result) = self._get_common(request, id)
- if not ok:
- (code, msg) = result
- self.config_object_error(id, None, 'global_system_config', 'http_get', msg)
- abort(code, msg)
-
- # type-specific hook
- r_class = self.get_resource_class('global-system-config')
- if r_class:
- r_class.http_get(id)
-
- db_conn = self._db_conn
- if etag:
- obj_ids = {'uuid': id}
- (ok, result) = db_conn.dbe_is_latest(obj_ids, etag.replace('"', ''))
- if not ok:
- # Not present in DB
- self.config_object_error(id, None, 'global_system_config', 'http_get', result)
- abort(404, result)
-
- is_latest = result
- if is_latest:
- # send Not-Modified, caches use this for read optimization
- response.status = 304
- return
- #end if etag
-
- obj_ids = {'uuid': id}
-
- # Generate field list for db layer
- properties = [u'autonomous_system', u'config_version', u'plugin_tuning', u'ibgp_auto_mesh', u'ip_fabric_subnets', u'id_perms', u'display_name']
- references = ['bgp_router_refs']
- back_references = [u'config_root_back_refs']
- children = [u'global_vrouter_configs', u'physical_routers', 'virtual_routers', u'config_nodes', u'analytics_nodes', u'database_nodes', u'service_appliance_sets']
- if 'fields' in request.query:
- obj_fields = request.query.fields.split(',')
- else: # default props + children + refs + backrefs
- obj_fields = properties + references
- if 'exclude_back_refs' not in request.query:
- obj_fields = obj_fields + back_references
- if 'exclude_children' not in request.query:
- obj_fields = obj_fields + children
-
- (ok, result) = db_conn.dbe_read('global-system-config', obj_ids, obj_fields)
- if not ok:
- # Not present in DB
- self.config_object_error(id, None, 'global_system_config', 'http_get', result)
- abort(404, result)
-
- # check visibility
- if (not result['id_perms'].get('user_visible', True) and
- not self.is_admin_request()):
- result = 'This object is not visible by users: %s' % id
- self.config_object_error(id, None, 'global_system_config', 'http_get', result)
- abort(404, result)
-
- rsp_body = {}
- rsp_body['uuid'] = id
- rsp_body['href'] = self.generate_url('global-system-config', id)
- rsp_body['name'] = result['fq_name'][-1]
- rsp_body.update(result)
- id_perms = result['id_perms']
- response.set_header('ETag', '"' + id_perms['last_modified'] + '"')
- try:
- self._extension_mgrs['resourceApi'].map_method('post_global_system_config_read', id, rsp_body)
- except Exception as e:
- pass
-
- return {'global-system-config': rsp_body}
- #end global_system_config_http_get
-
- def global_system_config_http_put(self, id):
- key = 'global-system-config'
- obj_dict = request.json[key]
-
- try:
- self._extension_mgrs['resourceApi'].map_method('pre_global_system_config_update', id, obj_dict)
- except Exception as e:
- pass
-
- db_conn = self._db_conn
- try:
- obj_type = db_conn.uuid_to_obj_type(id)
- if obj_type != 'global_system_config':
- abort(404, 'No global-system-config object found for id %s' %(id))
- fq_name = db_conn.uuid_to_fq_name(id)
- except NoIdError as e:
- abort(404, str(e))
- prop_dict = obj_dict.get('plugin_tuning')
- if prop_dict:
- buf = cStringIO.StringIO()
- xx_plugin_tuning = PluginProperties(**prop_dict)
- xx_plugin_tuning.export(buf)
- node = etree.fromstring(buf.getvalue())
- xx_plugin_tuning = PluginProperties()
- try:
- xx_plugin_tuning.build(node)
- except Exception as e:
- abort(400, str(e))
- prop_dict = obj_dict.get('ip_fabric_subnets')
- if prop_dict:
- buf = cStringIO.StringIO()
- xx_ip_fabric_subnets = SubnetListType(**prop_dict)
- xx_ip_fabric_subnets.export(buf)
- node = etree.fromstring(buf.getvalue())
- xx_ip_fabric_subnets = SubnetListType()
- try:
- xx_ip_fabric_subnets.build(node)
- except Exception as e:
- abort(400, str(e))
- prop_dict = obj_dict.get('id_perms')
- if prop_dict:
- buf = cStringIO.StringIO()
- xx_id_perms = IdPermsType(**prop_dict)
- xx_id_perms.export(buf)
- node = etree.fromstring(buf.getvalue())
- xx_id_perms = IdPermsType()
- try:
- xx_id_perms.build(node)
- except Exception as e:
- abort(400, str(e))
- # common handling for all resource put
- (ok, result) = self._put_common(request, 'global_system_config', id, fq_name, obj_dict)
- if not ok:
- (code, msg) = result
- self.config_object_error(id, None, 'global_system_config', 'http_put', msg)
- abort(code, msg)
-
- # Validate perms
- objtype_list = ['bgp_router', u'global_vrouter_config', u'physical_router', 'virtual_router', u'config_node', u'analytics_node', u'database_node', u'service_appliance_set']
- for obj_type in objtype_list:
- refs = obj_dict.get('%s_refs'%(obj_type), None)
- if refs:
- for ref in refs:
- try:
- ref_uuid = db_conn.fq_name_to_uuid(obj_type, ref['to'])
- except NoIdError as e:
- abort(404, str(e))
- (ok, status) = self._permissions.check_perms_link(request, ref_uuid)
- if not ok:
- (code, err_msg) = status
- abort(code, err_msg)
-
- # State modification starts from here. Ensure that cleanup is done for all state changes
- cleanup_on_failure = []
- # type-specific hook
- r_class = self.get_resource_class('global-system-config')
- if r_class:
- (ok, put_result) = r_class.http_put(id, fq_name, obj_dict, self._db_conn)
- if not ok:
- (code, msg) = put_result
- self.config_object_error(id, None, 'global_system_config', 'http_put', msg)
- abort(code, msg)
- callable = getattr(r_class, 'http_put_fail', None)
- if callable:
- cleanup_on_failure.append((callable, [id, fq_name, obj_dict, self._db_conn]))
-
- obj_ids = {'uuid': id}
- try:
- (ok, result) = db_conn.dbe_update('global-system-config', obj_ids, obj_dict)
- except Exception as e:
- ok = False
- result = str(e)
- if not ok:
- for fail_cleanup_callable, cleanup_args in cleanup_on_failure:
- fail_cleanup_callable(*cleanup_args)
- self.config_object_error(id, None, 'global_system_config', 'http_put', result)
- abort(404, result)
-
- rsp_body = {}
- rsp_body['uuid'] = id
- rsp_body['href'] = self.generate_url('global-system-config', id)
-
- try:
- self._extension_mgrs['resourceApi'].map_method('post_global_system_config_update', id, obj_dict)
- except Exception as e:
- pass
-
- return {'global-system-config': rsp_body}
- #end global_system_config_http_put
-
- def global_system_config_http_delete(self, id):
- db_conn = self._db_conn
- # if obj doesn't exist return early
- try:
- obj_type = db_conn.uuid_to_obj_type(id)
- if obj_type != 'global_system_config':
- abort(404, 'No global-system-config object found for id %s' %(id))
- _ = db_conn.uuid_to_fq_name(id)
- except NoIdError:
- abort(404, 'ID %s does not exist' %(id))
-
- try:
- self._extension_mgrs['resourceApi'].map_method('pre_global_system_config_delete', id)
- except Exception as e:
- pass
-
- # read in obj from db (accepting error) to get details of it
- obj_ids = {'uuid': id}
- back_references = [u'config_root_back_refs']
- children = [u'global_vrouter_configs', u'physical_routers', 'virtual_routers', u'config_nodes', u'analytics_nodes', u'database_nodes', u'service_appliance_sets']
- obj_fields = children + back_references
- (read_ok, read_result) = db_conn.dbe_read('global-system-config', obj_ids, obj_fields)
- if not read_ok:
- if read_result.startswith('Unknown id:'):
- abort(404, 'ID %s does not exist' %(id))
- else:
- self.config_object_error(id, None, 'global_system_config', 'http_delete', read_result)
- # proceed down to delete the resource
-
- # common handling for all resource delete
- parent_type = read_result.get('parent_type', None)
- (ok, del_result) = self._delete_common(request, 'global_system_config', id, parent_type)
- if not ok:
- (code, msg) = del_result
- self.config_object_error(id, None, 'global_system_config', 'http_delete', msg)
- abort(code, msg)
-
- fq_name = read_result['fq_name']
- ifmap_id = cfgm_common.imid.get_ifmap_id_from_fq_name('global-system-config', fq_name)
- obj_ids['imid'] = ifmap_id
- if parent_type:
- parent_imid = cfgm_common.imid.get_ifmap_id_from_fq_name(parent_type, fq_name[:-1])
- obj_ids['parent_imid'] = parent_imid
-
- # State modification starts from here. Ensure that cleanup is done for all state changes
- cleanup_on_failure = []
-
- # type-specific hook
- r_class = self.get_resource_class('global-system-config')
- if r_class:
- if read_ok:
- # fail if non-default children or backrefs exist
- global_vrouter_configs = read_result.get('global_vrouter_configs', None)
- if global_vrouter_configs:
- has_infos = read_result['global_vrouter_configs']
- if ((len(has_infos) > 1) or
- (len(has_infos) == 1 and has_infos[0]['to'][-1] != 'default-global-vrouter-config')):
- has_urls = [has_info['href'] for has_info in has_infos]
- has_str = ', '.join(has_urls)
- err_msg = 'Children ' + has_str + ' still exist'
- self.config_object_error(id, None, 'global_system_config', 'http_delete', err_msg)
- abort(409, err_msg)
-
- physical_routers = read_result.get('physical_routers', None)
- if physical_routers:
- has_infos = read_result['physical_routers']
- if ((len(has_infos) > 1) or
- (len(has_infos) == 1 and has_infos[0]['to'][-1] != 'default-physical-router')):
- has_urls = [has_info['href'] for has_info in has_infos]
- has_str = ', '.join(has_urls)
- err_msg = 'Children ' + has_str + ' still exist'
- self.config_object_error(id, None, 'global_system_config', 'http_delete', err_msg)
- abort(409, err_msg)
-
- virtual_routers = read_result.get('virtual_routers', None)
- if virtual_routers:
- has_infos = read_result['virtual_routers']
- if ((len(has_infos) > 1) or
- (len(has_infos) == 1 and has_infos[0]['to'][-1] != 'default-virtual-router')):
- has_urls = [has_info['href'] for has_info in has_infos]
- has_str = ', '.join(has_urls)
- err_msg = 'Children ' + has_str + ' still exist'
- self.config_object_error(id, None, 'global_system_config', 'http_delete', err_msg)
- abort(409, err_msg)
-
- config_nodes = read_result.get('config_nodes', None)
- if config_nodes:
- has_infos = read_result['config_nodes']
- if ((len(has_infos) > 1) or
- (len(has_infos) == 1 and has_infos[0]['to'][-1] != 'default-config-node')):
- has_urls = [has_info['href'] for has_info in has_infos]
- has_str = ', '.join(has_urls)
- err_msg = 'Children ' + has_str + ' still exist'
- self.config_object_error(id, None, 'global_system_config', 'http_delete', err_msg)
- abort(409, err_msg)
-
- analytics_nodes = read_result.get('analytics_nodes', None)
- if analytics_nodes:
- has_infos = read_result['analytics_nodes']
- if ((len(has_infos) > 1) or
- (len(has_infos) == 1 and has_infos[0]['to'][-1] != 'default-analytics-node')):
- has_urls = [has_info['href'] for has_info in has_infos]
- has_str = ', '.join(has_urls)
- err_msg = 'Children ' + has_str + ' still exist'
- self.config_object_error(id, None, 'global_system_config', 'http_delete', err_msg)
- abort(409, err_msg)
-
- database_nodes = read_result.get('database_nodes', None)
- if database_nodes:
- has_infos = read_result['database_nodes']
- if ((len(has_infos) > 1) or
- (len(has_infos) == 1 and has_infos[0]['to'][-1] != 'default-database-node')):
- has_urls = [has_info['href'] for has_info in has_infos]
- has_str = ', '.join(has_urls)
- err_msg = 'Children ' + has_str + ' still exist'
- self.config_object_error(id, None, 'global_system_config', 'http_delete', err_msg)
- abort(409, err_msg)
-
- service_appliance_sets = read_result.get('service_appliance_sets', None)
- if service_appliance_sets:
- has_infos = read_result['service_appliance_sets']
- if ((len(has_infos) > 1) or
- (len(has_infos) == 1 and has_infos[0]['to'][-1] != 'default-service-appliance-set')):
- has_urls = [has_info['href'] for has_info in has_infos]
- has_str = ', '.join(has_urls)
- err_msg = 'Children ' + has_str + ' still exist'
- self.config_object_error(id, None, 'global_system_config', 'http_delete', err_msg)
- abort(409, err_msg)
-
-
- # Delete default children first
- self._global_system_config_delete_default_children(read_result)
-
- (ok, del_result) = r_class.http_delete(id, read_result, db_conn)
- if not ok:
- (code, msg) = del_result
- self.config_object_error(id, None, 'global_system_config', 'http_delete', msg)
- abort(code, msg)
- callable = getattr(r_class, 'http_delete_fail', None)
- if callable:
- cleanup_on_failure.append((callable, [id, read_result, db_conn]))
- #end if read_ok
-
- try:
- (ok, del_result) = db_conn.dbe_delete('global-system-config', obj_ids, read_result)
- except Exception as e:
- ok = False
- del_result = str(e)
- if not ok:
- for fail_cleanup_callable, cleanup_args in cleanup_on_failure:
- fail_cleanup_callable(*cleanup_args)
- self.config_object_error(id, None, 'global_system_config', 'http_delete', del_result)
- abort(409, del_result)
-
- try:
- self._extension_mgrs['resourceApi'].map_method('post_global_system_config_delete', id, read_result)
- except Exception as e:
- pass
-
- #end global_system_config_http_delete
-
- def global_system_configs_http_post(self):
- key = 'global-system-config'
- obj_dict = request.json[key]
- self._post_validate(key, obj_dict=obj_dict)
- fq_name = obj_dict['fq_name']
-
- try:
- self._extension_mgrs['resourceApi'].map_method('pre_global_system_config_create', obj_dict)
- except Exception as e:
- pass
-
- prop_dict = obj_dict.get('plugin_tuning')
- if prop_dict:
- buf = cStringIO.StringIO()
- xx_plugin_tuning = PluginProperties(**prop_dict)
- xx_plugin_tuning.export(buf)
- node = etree.fromstring(buf.getvalue())
- xx_plugin_tuning = PluginProperties()
- try:
- xx_plugin_tuning.build(node)
- except Exception as e:
- abort(400, str(e))
- prop_dict = obj_dict.get('ip_fabric_subnets')
- if prop_dict:
- buf = cStringIO.StringIO()
- xx_ip_fabric_subnets = SubnetListType(**prop_dict)
- xx_ip_fabric_subnets.export(buf)
- node = etree.fromstring(buf.getvalue())
- xx_ip_fabric_subnets = SubnetListType()
- try:
- xx_ip_fabric_subnets.build(node)
- except Exception as e:
- abort(400, str(e))
- prop_dict = obj_dict.get('id_perms')
- if prop_dict:
- buf = cStringIO.StringIO()
- xx_id_perms = IdPermsType(**prop_dict)
- xx_id_perms.export(buf)
- node = etree.fromstring(buf.getvalue())
- xx_id_perms = IdPermsType()
- try:
- xx_id_perms.build(node)
- except Exception as e:
- abort(400, str(e))
- # common handling for all resource create
- (ok, result) = self._post_common(request, 'global-system-config', obj_dict)
- if not ok:
- (code, msg) = result
- fq_name_str = ':'.join(obj_dict.get('fq_name', []))
- self.config_object_error(None, fq_name_str, 'global_system_config', 'http_post', msg)
- abort(code, msg)
-
- name = obj_dict['fq_name'][-1]
- fq_name = obj_dict['fq_name']
-
- db_conn = self._db_conn
-
- # if client gave parent_type of config-root, ignore and remove
- if 'parent_type' in obj_dict and obj_dict['parent_type'] == 'config-root':
- del obj_dict['parent_type']
-
- if 'parent_type' in obj_dict:
- # non config-root child, verify parent exists
- parent_type = obj_dict['parent_type']
- parent_fq_name = obj_dict['fq_name'][:-1]
- try:
- parent_uuid = self._db_conn.fq_name_to_uuid(parent_type, parent_fq_name)
- (ok, status) = self._permissions.check_perms_write(request, parent_uuid)
- if not ok:
- (code, err_msg) = status
- abort(code, err_msg)
- self._permissions.set_user_role(request, obj_dict)
- except NoIdError:
- err_msg = 'Parent ' + pformat(parent_fq_name) + ' type ' + parent_type + ' does not exist'
- fq_name_str = ':'.join(parent_fq_name)
- self.config_object_error(None, fq_name_str, 'global_system_config', 'http_post', err_msg)
- abort(400, err_msg)
-
- # Validate perms
- objtype_list = ['bgp_router', u'global_vrouter_config', u'physical_router', 'virtual_router', u'config_node', u'analytics_node', u'database_node', u'service_appliance_set']
- for obj_type in objtype_list:
- refs = obj_dict.get('%s_refs'%(obj_type), None)
- if refs:
- for ref in refs:
- ref_uuid = db_conn.fq_name_to_uuid(obj_type, ref['to'])
- (ok, status) = self._permissions.check_perms_link(request, ref_uuid)
- if not ok:
- (code, err_msg) = status
- abort(code, err_msg)
-
- # State modification starts from here. Ensure that cleanup is done for all state changes
- cleanup_on_failure = []
- # Alloc and Store id-mappings before creating entry on pubsub store.
- # Else a subscriber can ask for an id mapping before we have stored it
- uuid_requested = result
- (ok, result) = db_conn.dbe_alloc('global-system-config', obj_dict, uuid_requested)
- if not ok:
- (code, msg) = result
- fq_name_str = ':'.join(obj_dict['fq_name'])
- self.config_object_error(None, fq_name_str, 'global_system_config', 'http_post', result)
- abort(code, msg)
- cleanup_on_failure.append((db_conn.dbe_release, ['global_system_config', fq_name]))
-
- obj_ids = result
-
- env = request.headers.environ
- tenant_name = env.get(hdr_server_tenant(), 'default-project')
-
-
- # type-specific hook
- r_class = self.get_resource_class('global-system-config')
- if r_class:
- try:
- (ok, result) = r_class.http_post_collection(tenant_name, obj_dict, db_conn)
- except Exception as e:
- ok = False
- result = (500, str(e))
- if not ok:
- for fail_cleanup_callable, cleanup_args in cleanup_on_failure:
- fail_cleanup_callable(*cleanup_args)
- (code, msg) = result
- fq_name_str = ':'.join(fq_name)
- self.config_object_error(None, fq_name_str, 'global_system_config', 'http_post', msg)
- abort(code, msg)
- callable = getattr(r_class, 'http_post_collection_fail', None)
- if callable:
- cleanup_on_failure.append((callable, [tenant_name, obj_dict, db_conn]))
-
- try:
- (ok, result) = \
- db_conn.dbe_create('global-system-config', obj_ids, obj_dict)
- except Exception as e:
- ok = False
- result = str(e)
-
- if not ok:
- for fail_cleanup_callable, cleanup_args in cleanup_on_failure:
- fail_cleanup_callable(*cleanup_args)
- fq_name_str = ':'.join(fq_name)
- self.config_object_error(None, fq_name_str, 'global_system_config', 'http_post', result)
- abort(404, result)
-
- rsp_body = {}
- rsp_body['name'] = name
- rsp_body['fq_name'] = fq_name
- rsp_body['uuid'] = obj_ids['uuid']
- rsp_body['href'] = self.generate_url('global-system-config', obj_ids['uuid'])
- if 'parent_type' in obj_dict:
- # non config-root child, send back parent uuid/href
- rsp_body['parent_uuid'] = parent_uuid
- rsp_body['parent_href'] = self.generate_url(parent_type, parent_uuid)
-
- try:
- self._extension_mgrs['resourceApi'].map_method('post_global_system_config_create', obj_dict)
- except Exception as e:
- pass
-
- return {'global-system-config': rsp_body}
- #end global_system_configs_http_post
-
- def global_system_configs_http_get(self):
- # gather list of uuids using 1. any specified anchors
- # 2. any specified filters
- # if not 'detail' return list with any specified 'fields'
- # if 'detail' return list with props+refs + any specified 'fields'
-
- env = request.headers.environ
- tenant_name = env.get(hdr_server_tenant(), 'default-project')
- parent_uuids = None
- back_ref_uuids = None
- obj_uuids = None
- if (('parent_fq_name_str' in request.query) and
- ('parent_type' in request.query)):
- parent_fq_name = request.query.parent_fq_name_str.split(':')
- parent_type = request.query.parent_type
- parent_uuids = [self._db_conn.fq_name_to_uuid(parent_type, parent_fq_name)]
- elif 'parent_id' in request.query:
- parent_ids = request.query.parent_id.split(',')
- parent_uuids = [str(uuid.UUID(p_uuid)) for p_uuid in parent_ids]
- if 'back_ref_id' in request.query:
- back_ref_ids = request.query.back_ref_id.split(',')
- back_ref_uuids = [str(uuid.UUID(b_uuid)) for b_uuid in back_ref_ids]
- if 'obj_uuids' in request.query:
- obj_uuids = request.query.obj_uuids.split(',')
-
- # common handling for all resource get
- (ok, result) = self._get_common(request, parent_uuids)
- if not ok:
- (code, msg) = result
- self.config_object_error(None, None, 'global_system_configs', 'http_get_collection', msg)
- abort(code, msg)
-
- if 'count' in request.query:
- count = 'true' in request.query.count.lower()
- else:
- count = False
-
- filter_params = request.query.filters
- if filter_params:
- try:
- ff_key_vals = filter_params.split(',')
- ff_names = [ff.split('==')[0] for ff in ff_key_vals]
- ff_values = [ff.split('==')[1] for ff in ff_key_vals]
- filters = {'field_names': ff_names, 'field_values': ff_values}
- except Exception as e:
- abort(400, 'Invalid filter ' + filter_params)
- else:
- filters = None
- db_conn = self._db_conn
- (ok, result) = \
- db_conn.dbe_list('global-system-config', parent_uuids, back_ref_uuids, obj_uuids, count, filters)
- if not ok:
- self.config_object_error(None, None, 'global_system_configs', 'http_get_collection', result)
- abort(404, result)
-
- # If only counting, return early
- if count:
- return {'global-system-configs': {'count': result}}
-
- if 'detail' in request.query:
- detail = 'true' in request.query.detail.lower()
- else:
- detail = False
-
- fq_names_uuids = result
- obj_dicts = []
- if not detail:
- if not self.is_admin_request():
- obj_ids_list = [{'uuid': obj_uuid} for _, obj_uuid in fq_names_uuids]
- obj_fields = [u'id_perms']
- (ok, result) = db_conn.dbe_read_multi('global-system-config', obj_ids_list, obj_fields)
- if not ok:
- result = []
- for obj_result in result:
- # give chance for zk heartbeat/ping
- gevent.sleep(0)
- if obj_result['id_perms'].get('user_visible', True):
- obj_dict = {}
- obj_dict['uuid'] = obj_result['uuid']
- obj_dict['href'] = self.generate_url('global-system-config', obj_result['uuid'])
- obj_dict['fq_name'] = obj_result['fq_name']
- obj_dicts.append(obj_dict)
- else:
- for fq_name, obj_uuid in fq_names_uuids:
- # give chance for zk heartbeat/ping
- gevent.sleep(0)
- obj_dict = {}
- obj_dict['uuid'] = obj_uuid
- obj_dict['href'] = self.generate_url('global-system-config', obj_uuid)
- obj_dict['fq_name'] = fq_name
- obj_dicts.append(obj_dict)
- else: #detail
- obj_ids_list = [{'uuid': obj_uuid} for _, obj_uuid in fq_names_uuids]
- obj_fields = [u'autonomous_system', u'config_version', u'plugin_tuning', u'ibgp_auto_mesh', u'ip_fabric_subnets', u'id_perms', u'display_name'] + ['bgp_router_refs']
- if 'fields' in request.query:
- obj_fields.extend(request.query.fields.split(','))
- (ok, result) = db_conn.dbe_read_multi('global-system-config', obj_ids_list, obj_fields)
-
- if not ok:
- result = []
-
- for obj_result in result:
- # give chance for zk heartbeat/ping
- gevent.sleep(0)
- obj_dict = {}
- obj_dict['name'] = obj_result['fq_name'][-1]
- obj_dict['href'] = self.generate_url('global-system-config', obj_result['uuid'])
- obj_dict.update(obj_result)
- if (obj_dict['id_perms'].get('user_visible', True) or
- self.is_admin_request()):
- obj_dicts.append({'global-system-config': obj_dict})
-
- return {'global-system-configs': obj_dicts}
- #end global_system_configs_http_get
-
- def _global_system_config_create_default_children(self, parent_obj):
- # Create a default child only if provisioned for
- r_class = self.get_resource_class('global-vrouter-config')
- if r_class and r_class.generate_default_instance:
- child_obj = GlobalVrouterConfig(parent_obj = parent_obj)
- child_dict = child_obj.__dict__
- fq_name = child_dict['fq_name']
- child_dict['id_perms'] = self._get_default_id_perms('global-vrouter-config')
-
- db_conn = self._db_conn
- (ok, result) = db_conn.dbe_alloc('global-vrouter-config', child_dict)
- if not ok:
- return (ok, result)
-
- obj_ids = result
- db_conn.dbe_create('global-vrouter-config', obj_ids, child_dict)
- self._global_vrouter_config_create_default_children(child_obj)
-
- # Create a default child only if provisioned for
- r_class = self.get_resource_class('physical-router')
- if r_class and r_class.generate_default_instance:
- child_obj = PhysicalRouter(parent_obj = parent_obj)
- child_dict = child_obj.__dict__
- fq_name = child_dict['fq_name']
- child_dict['id_perms'] = self._get_default_id_perms('physical-router')
-
- db_conn = self._db_conn
- (ok, result) = db_conn.dbe_alloc('physical-router', child_dict)
- if not ok:
- return (ok, result)
-
- obj_ids = result
- db_conn.dbe_create('physical-router', obj_ids, child_dict)
- self._physical_router_create_default_children(child_obj)
-
- # Create a default child only if provisioned for
- r_class = self.get_resource_class('virtual-router')
- if r_class and r_class.generate_default_instance:
- child_obj = VirtualRouter(parent_obj = parent_obj)
- child_dict = child_obj.__dict__
- fq_name = child_dict['fq_name']
- child_dict['id_perms'] = self._get_default_id_perms('virtual-router')
-
- db_conn = self._db_conn
- (ok, result) = db_conn.dbe_alloc('virtual-router', child_dict)
- if not ok:
- return (ok, result)
-
- obj_ids = result
- db_conn.dbe_create('virtual-router', obj_ids, child_dict)
- self._virtual_router_create_default_children(child_obj)
-
- # Create a default child only if provisioned for
- r_class = self.get_resource_class('config-node')
- if r_class and r_class.generate_default_instance:
- child_obj = ConfigNode(parent_obj = parent_obj)
- child_dict = child_obj.__dict__
- fq_name = child_dict['fq_name']
- child_dict['id_perms'] = self._get_default_id_perms('config-node')
-
- db_conn = self._db_conn
- (ok, result) = db_conn.dbe_alloc('config-node', child_dict)
- if not ok:
- return (ok, result)
-
- obj_ids = result
- db_conn.dbe_create('config-node', obj_ids, child_dict)
- self._config_node_create_default_children(child_obj)
-
- # Create a default child only if provisioned for
- r_class = self.get_resource_class('analytics-node')
- if r_class and r_class.generate_default_instance:
- child_obj = AnalyticsNode(parent_obj = parent_obj)
- child_dict = child_obj.__dict__
- fq_name = child_dict['fq_name']
- child_dict['id_perms'] = self._get_default_id_perms('analytics-node')
-
- db_conn = self._db_conn
- (ok, result) = db_conn.dbe_alloc('analytics-node', child_dict)
- if not ok:
- return (ok, result)
-
- obj_ids = result
- db_conn.dbe_create('analytics-node', obj_ids, child_dict)
- self._analytics_node_create_default_children(child_obj)
-
- # Create a default child only if provisioned for
- r_class = self.get_resource_class('database-node')
- if r_class and r_class.generate_default_instance:
- child_obj = DatabaseNode(parent_obj = parent_obj)
- child_dict = child_obj.__dict__
- fq_name = child_dict['fq_name']
- child_dict['id_perms'] = self._get_default_id_perms('database-node')
-
- db_conn = self._db_conn
- (ok, result) = db_conn.dbe_alloc('database-node', child_dict)
- if not ok:
- return (ok, result)
-
- obj_ids = result
- db_conn.dbe_create('database-node', obj_ids, child_dict)
- self._database_node_create_default_children(child_obj)
-
- # Create a default child only if provisioned for
- r_class = self.get_resource_class('service-appliance-set')
- if r_class and r_class.generate_default_instance:
- child_obj = ServiceApplianceSet(parent_obj = parent_obj)
- child_dict = child_obj.__dict__
- fq_name = child_dict['fq_name']
- child_dict['id_perms'] = self._get_default_id_perms('service-appliance-set')
-
- db_conn = self._db_conn
- (ok, result) = db_conn.dbe_alloc('service-appliance-set', child_dict)
- if not ok:
- return (ok, result)
-
- obj_ids = result
- db_conn.dbe_create('service-appliance-set', obj_ids, child_dict)
- self._service_appliance_set_create_default_children(child_obj)
-
- pass
- #end _global_system_config_create_default_children
-
- def _global_system_config_delete_default_children(self, parent_dict):
- # Delete a default child only if provisioned for
- r_class = self.get_resource_class('service-appliance-set')
- if r_class and r_class.generate_default_instance:
- # first locate default child then delete it
- has_infos = parent_dict.get('global_vrouter_configs')
- if has_infos:
- for has_info in has_infos:
- if has_info['to'][-1] == 'default-global-vrouter-config':
- default_child_id = has_info['href'].split('/')[-1]
- self.global_vrouter_config_http_delete(default_child_id)
- break
-
- # Delete a default child only if provisioned for
- r_class = self.get_resource_class('service-appliance-set')
- if r_class and r_class.generate_default_instance:
- # first locate default child then delete it
- has_infos = parent_dict.get('physical_routers')
- if has_infos:
- for has_info in has_infos:
- if has_info['to'][-1] == 'default-physical-router':
- default_child_id = has_info['href'].split('/')[-1]
- self.physical_router_http_delete(default_child_id)
- break
-
- # Delete a default child only if provisioned for
- r_class = self.get_resource_class('service-appliance-set')
- if r_class and r_class.generate_default_instance:
- # first locate default child then delete it
- has_infos = parent_dict.get('virtual_routers')
- if has_infos:
- for has_info in has_infos:
- if has_info['to'][-1] == 'default-virtual-router':
- default_child_id = has_info['href'].split('/')[-1]
- self.virtual_router_http_delete(default_child_id)
- break
-
- # Delete a default child only if provisioned for
- r_class = self.get_resource_class('service-appliance-set')
- if r_class and r_class.generate_default_instance:
- # first locate default child then delete it
- has_infos = parent_dict.get('config_nodes')
- if has_infos:
- for has_info in has_infos:
- if has_info['to'][-1] == 'default-config-node':
- default_child_id = has_info['href'].split('/')[-1]
- self.config_node_http_delete(default_child_id)
- break
-
- # Delete a default child only if provisioned for
- r_class = self.get_resource_class('service-appliance-set')
- if r_class and r_class.generate_default_instance:
- # first locate default child then delete it
- has_infos = parent_dict.get('analytics_nodes')
- if has_infos:
- for has_info in has_infos:
- if has_info['to'][-1] == 'default-analytics-node':
- default_child_id = has_info['href'].split('/')[-1]
- self.analytics_node_http_delete(default_child_id)
- break
-
- # Delete a default child only if provisioned for
- r_class = self.get_resource_class('service-appliance-set')
- if r_class and r_class.generate_default_instance:
- # first locate default child then delete it
- has_infos = parent_dict.get('database_nodes')
- if has_infos:
- for has_info in has_infos:
- if has_info['to'][-1] == 'default-database-node':
- default_child_id = has_info['href'].split('/')[-1]
- self.database_node_http_delete(default_child_id)
- break
-
- # Delete a default child only if provisioned for
- r_class = self.get_resource_class('service-appliance-set')
- if r_class and r_class.generate_default_instance:
- # first locate default child then delete it
- has_infos = parent_dict.get('service_appliance_sets')
- if has_infos:
- for has_info in has_infos:
- if has_info['to'][-1] == 'default-service-appliance-set':
- default_child_id = has_info['href'].split('/')[-1]
- self.service_appliance_set_http_delete(default_child_id)
- break
-
- pass
- #end _global_system_config_delete_default_children
-
- def service_appliance_http_get(self, id):
- try:
- self._extension_mgrs['resourceApi'].map_method('pre_service_appliance_read', id)
- except Exception as e:
- pass
-
- # TODO get vals from request out of the global ASAP
- etag = request.headers.get('If-None-Match')
- try:
- obj_type = self._db_conn.uuid_to_obj_type(id)
- except NoIdError:
- obj_type = None
- if obj_type != 'service_appliance':
- abort(404, 'No service-appliance object found for id %s' %(id))
- # common handling for all resource get
- (ok, result) = self._get_common(request, id)
- if not ok:
- (code, msg) = result
- self.config_object_error(id, None, 'service_appliance', 'http_get', msg)
- abort(code, msg)
-
- # type-specific hook
- r_class = self.get_resource_class('service-appliance')
- if r_class:
- r_class.http_get(id)
-
- db_conn = self._db_conn
- if etag:
- obj_ids = {'uuid': id}
- (ok, result) = db_conn.dbe_is_latest(obj_ids, etag.replace('"', ''))
- if not ok:
- # Not present in DB
- self.config_object_error(id, None, 'service_appliance', 'http_get', result)
- abort(404, result)
-
- is_latest = result
- if is_latest:
- # send Not-Modified, caches use this for read optimization
- response.status = 304
- return
- #end if etag
-
- obj_ids = {'uuid': id}
-
- # Generate field list for db layer
- properties = [u'service_appliance_user_credentials', u'service_appliance_ip_address', u'service_appliance_properties', u'id_perms', u'display_name']
- references = []
- back_references = [u'service_appliance_set_back_refs']
- children = []
- if 'fields' in request.query:
- obj_fields = request.query.fields.split(',')
- else: # default props + children + refs + backrefs
- obj_fields = properties + references
- if 'exclude_back_refs' not in request.query:
- obj_fields = obj_fields + back_references
- if 'exclude_children' not in request.query:
- obj_fields = obj_fields + children
-
- (ok, result) = db_conn.dbe_read('service-appliance', obj_ids, obj_fields)
- if not ok:
- # Not present in DB
- self.config_object_error(id, None, 'service_appliance', 'http_get', result)
- abort(404, result)
-
- # check visibility
- if (not result['id_perms'].get('user_visible', True) and
- not self.is_admin_request()):
- result = 'This object is not visible by users: %s' % id
- self.config_object_error(id, None, 'service_appliance', 'http_get', result)
- abort(404, result)
-
- rsp_body = {}
- rsp_body['uuid'] = id
- rsp_body['href'] = self.generate_url('service-appliance', id)
- rsp_body['name'] = result['fq_name'][-1]
- rsp_body.update(result)
- id_perms = result['id_perms']
- response.set_header('ETag', '"' + id_perms['last_modified'] + '"')
- try:
- self._extension_mgrs['resourceApi'].map_method('post_service_appliance_read', id, rsp_body)
- except Exception as e:
- pass
-
- return {'service-appliance': rsp_body}
- #end service_appliance_http_get
-
- def service_appliance_http_put(self, id):
- key = 'service-appliance'
- obj_dict = request.json[key]
-
- try:
- self._extension_mgrs['resourceApi'].map_method('pre_service_appliance_update', id, obj_dict)
- except Exception as e:
- pass
-
- db_conn = self._db_conn
- try:
- obj_type = db_conn.uuid_to_obj_type(id)
- if obj_type != 'service_appliance':
- abort(404, 'No service-appliance object found for id %s' %(id))
- fq_name = db_conn.uuid_to_fq_name(id)
- except NoIdError as e:
- abort(404, str(e))
- prop_dict = obj_dict.get('service_appliance_user_credentials')
- if prop_dict:
- buf = cStringIO.StringIO()
- xx_service_appliance_user_credentials = UserCredentials(**prop_dict)
- xx_service_appliance_user_credentials.export(buf)
- node = etree.fromstring(buf.getvalue())
- xx_service_appliance_user_credentials = UserCredentials()
- try:
- xx_service_appliance_user_credentials.build(node)
- except Exception as e:
- abort(400, str(e))
- prop_dict = obj_dict.get('service_appliance_properties')
- if prop_dict:
- buf = cStringIO.StringIO()
- xx_service_appliance_properties = KeyValuePairs(**prop_dict)
- xx_service_appliance_properties.export(buf)
- node = etree.fromstring(buf.getvalue())
- xx_service_appliance_properties = KeyValuePairs()
- try:
- xx_service_appliance_properties.build(node)
- except Exception as e:
- abort(400, str(e))
- prop_dict = obj_dict.get('id_perms')
- if prop_dict:
- buf = cStringIO.StringIO()
- xx_id_perms = IdPermsType(**prop_dict)
- xx_id_perms.export(buf)
- node = etree.fromstring(buf.getvalue())
- xx_id_perms = IdPermsType()
- try:
- xx_id_perms.build(node)
- except Exception as e:
- abort(400, str(e))
- # common handling for all resource put
- (ok, result) = self._put_common(request, 'service_appliance', id, fq_name, obj_dict)
- if not ok:
- (code, msg) = result
- self.config_object_error(id, None, 'service_appliance', 'http_put', msg)
- abort(code, msg)
-
-
- # State modification starts from here. Ensure that cleanup is done for all state changes
- cleanup_on_failure = []
- # type-specific hook
- r_class = self.get_resource_class('service-appliance')
- if r_class:
- (ok, put_result) = r_class.http_put(id, fq_name, obj_dict, self._db_conn)
- if not ok:
- (code, msg) = put_result
- self.config_object_error(id, None, 'service_appliance', 'http_put', msg)
- abort(code, msg)
- callable = getattr(r_class, 'http_put_fail', None)
- if callable:
- cleanup_on_failure.append((callable, [id, fq_name, obj_dict, self._db_conn]))
-
- obj_ids = {'uuid': id}
- try:
- (ok, result) = db_conn.dbe_update('service-appliance', obj_ids, obj_dict)
- except Exception as e:
- ok = False
- result = str(e)
- if not ok:
- for fail_cleanup_callable, cleanup_args in cleanup_on_failure:
- fail_cleanup_callable(*cleanup_args)
- self.config_object_error(id, None, 'service_appliance', 'http_put', result)
- abort(404, result)
-
- rsp_body = {}
- rsp_body['uuid'] = id
- rsp_body['href'] = self.generate_url('service-appliance', id)
-
- try:
- self._extension_mgrs['resourceApi'].map_method('post_service_appliance_update', id, obj_dict)
- except Exception as e:
- pass
-
- return {'service-appliance': rsp_body}
- #end service_appliance_http_put
-
- def service_appliance_http_delete(self, id):
- db_conn = self._db_conn
- # if obj doesn't exist return early
- try:
- obj_type = db_conn.uuid_to_obj_type(id)
- if obj_type != 'service_appliance':
- abort(404, 'No service-appliance object found for id %s' %(id))
- _ = db_conn.uuid_to_fq_name(id)
- except NoIdError:
- abort(404, 'ID %s does not exist' %(id))
-
- try:
- self._extension_mgrs['resourceApi'].map_method('pre_service_appliance_delete', id)
- except Exception as e:
- pass
-
- # read in obj from db (accepting error) to get details of it
- obj_ids = {'uuid': id}
- back_references = [u'service_appliance_set_back_refs']
- children = []
- obj_fields = children + back_references
- (read_ok, read_result) = db_conn.dbe_read('service-appliance', obj_ids, obj_fields)
- if not read_ok:
- if read_result.startswith('Unknown id:'):
- abort(404, 'ID %s does not exist' %(id))
- else:
- self.config_object_error(id, None, 'service_appliance', 'http_delete', read_result)
- # proceed down to delete the resource
-
- # common handling for all resource delete
- parent_type = read_result.get('parent_type', None)
- (ok, del_result) = self._delete_common(request, 'service_appliance', id, parent_type)
- if not ok:
- (code, msg) = del_result
- self.config_object_error(id, None, 'service_appliance', 'http_delete', msg)
- abort(code, msg)
-
- fq_name = read_result['fq_name']
- ifmap_id = cfgm_common.imid.get_ifmap_id_from_fq_name('service-appliance', fq_name)
- obj_ids['imid'] = ifmap_id
- if parent_type:
- parent_imid = cfgm_common.imid.get_ifmap_id_from_fq_name(parent_type, fq_name[:-1])
- obj_ids['parent_imid'] = parent_imid
-
- # State modification starts from here. Ensure that cleanup is done for all state changes
- cleanup_on_failure = []
-
- # type-specific hook
- r_class = self.get_resource_class('service-appliance')
- if r_class:
- if read_ok:
- # fail if non-default children or backrefs exist
-
- # Delete default children first
- self._service_appliance_delete_default_children(read_result)
-
- (ok, del_result) = r_class.http_delete(id, read_result, db_conn)
- if not ok:
- (code, msg) = del_result
- self.config_object_error(id, None, 'service_appliance', 'http_delete', msg)
- abort(code, msg)
- callable = getattr(r_class, 'http_delete_fail', None)
- if callable:
- cleanup_on_failure.append((callable, [id, read_result, db_conn]))
- #end if read_ok
-
- try:
- (ok, del_result) = db_conn.dbe_delete('service-appliance', obj_ids, read_result)
- except Exception as e:
- ok = False
- del_result = str(e)
- if not ok:
- for fail_cleanup_callable, cleanup_args in cleanup_on_failure:
- fail_cleanup_callable(*cleanup_args)
- self.config_object_error(id, None, 'service_appliance', 'http_delete', del_result)
- abort(409, del_result)
-
- try:
- self._extension_mgrs['resourceApi'].map_method('post_service_appliance_delete', id, read_result)
- except Exception as e:
- pass
-
- #end service_appliance_http_delete
-
- def service_appliances_http_post(self):
- key = 'service-appliance'
- obj_dict = request.json[key]
- self._post_validate(key, obj_dict=obj_dict)
- fq_name = obj_dict['fq_name']
-
- try:
- self._extension_mgrs['resourceApi'].map_method('pre_service_appliance_create', obj_dict)
- except Exception as e:
- pass
-
- prop_dict = obj_dict.get('service_appliance_user_credentials')
- if prop_dict:
- buf = cStringIO.StringIO()
- xx_service_appliance_user_credentials = UserCredentials(**prop_dict)
- xx_service_appliance_user_credentials.export(buf)
- node = etree.fromstring(buf.getvalue())
- xx_service_appliance_user_credentials = UserCredentials()
- try:
- xx_service_appliance_user_credentials.build(node)
- except Exception as e:
- abort(400, str(e))
- prop_dict = obj_dict.get('service_appliance_properties')
- if prop_dict:
- buf = cStringIO.StringIO()
- xx_service_appliance_properties = KeyValuePairs(**prop_dict)
- xx_service_appliance_properties.export(buf)
- node = etree.fromstring(buf.getvalue())
- xx_service_appliance_properties = KeyValuePairs()
- try:
- xx_service_appliance_properties.build(node)
- except Exception as e:
- abort(400, str(e))
- prop_dict = obj_dict.get('id_perms')
- if prop_dict:
- buf = cStringIO.StringIO()
- xx_id_perms = IdPermsType(**prop_dict)
- xx_id_perms.export(buf)
- node = etree.fromstring(buf.getvalue())
- xx_id_perms = IdPermsType()
- try:
- xx_id_perms.build(node)
- except Exception as e:
- abort(400, str(e))
- # common handling for all resource create
- (ok, result) = self._post_common(request, 'service-appliance', obj_dict)
- if not ok:
- (code, msg) = result
- fq_name_str = ':'.join(obj_dict.get('fq_name', []))
- self.config_object_error(None, fq_name_str, 'service_appliance', 'http_post', msg)
- abort(code, msg)
-
- name = obj_dict['fq_name'][-1]
- fq_name = obj_dict['fq_name']
-
- db_conn = self._db_conn
-
- # if client gave parent_type of config-root, ignore and remove
- if 'parent_type' in obj_dict and obj_dict['parent_type'] == 'config-root':
- del obj_dict['parent_type']
-
- if 'parent_type' in obj_dict:
- # non config-root child, verify parent exists
- parent_type = obj_dict['parent_type']
- parent_fq_name = obj_dict['fq_name'][:-1]
- try:
- parent_uuid = self._db_conn.fq_name_to_uuid(parent_type, parent_fq_name)
- (ok, status) = self._permissions.check_perms_write(request, parent_uuid)
- if not ok:
- (code, err_msg) = status
- abort(code, err_msg)
- self._permissions.set_user_role(request, obj_dict)
- except NoIdError:
- err_msg = 'Parent ' + pformat(parent_fq_name) + ' type ' + parent_type + ' does not exist'
- fq_name_str = ':'.join(parent_fq_name)
- self.config_object_error(None, fq_name_str, 'service_appliance', 'http_post', err_msg)
- abort(400, err_msg)
-
-
- # State modification starts from here. Ensure that cleanup is done for all state changes
- cleanup_on_failure = []
- # Alloc and Store id-mappings before creating entry on pubsub store.
- # Else a subscriber can ask for an id mapping before we have stored it
- uuid_requested = result
- (ok, result) = db_conn.dbe_alloc('service-appliance', obj_dict, uuid_requested)
- if not ok:
- (code, msg) = result
- fq_name_str = ':'.join(obj_dict['fq_name'])
- self.config_object_error(None, fq_name_str, 'service_appliance', 'http_post', result)
- abort(code, msg)
- cleanup_on_failure.append((db_conn.dbe_release, ['service_appliance', fq_name]))
-
- obj_ids = result
-
- env = request.headers.environ
- tenant_name = env.get(hdr_server_tenant(), 'default-project')
-
-
- # type-specific hook
- r_class = self.get_resource_class('service-appliance')
- if r_class:
- try:
- (ok, result) = r_class.http_post_collection(tenant_name, obj_dict, db_conn)
- except Exception as e:
- ok = False
- result = (500, str(e))
- if not ok:
- for fail_cleanup_callable, cleanup_args in cleanup_on_failure:
- fail_cleanup_callable(*cleanup_args)
- (code, msg) = result
- fq_name_str = ':'.join(fq_name)
- self.config_object_error(None, fq_name_str, 'service_appliance', 'http_post', msg)
- abort(code, msg)
- callable = getattr(r_class, 'http_post_collection_fail', None)
- if callable:
- cleanup_on_failure.append((callable, [tenant_name, obj_dict, db_conn]))
-
- try:
- (ok, result) = \
- db_conn.dbe_create('service-appliance', obj_ids, obj_dict)
- except Exception as e:
- ok = False
- result = str(e)
-
- if not ok:
- for fail_cleanup_callable, cleanup_args in cleanup_on_failure:
- fail_cleanup_callable(*cleanup_args)
- fq_name_str = ':'.join(fq_name)
- self.config_object_error(None, fq_name_str, 'service_appliance', 'http_post', result)
- abort(404, result)
-
- rsp_body = {}
- rsp_body['name'] = name
- rsp_body['fq_name'] = fq_name
- rsp_body['uuid'] = obj_ids['uuid']
- rsp_body['href'] = self.generate_url('service-appliance', obj_ids['uuid'])
- if 'parent_type' in obj_dict:
- # non config-root child, send back parent uuid/href
- rsp_body['parent_uuid'] = parent_uuid
- rsp_body['parent_href'] = self.generate_url(parent_type, parent_uuid)
-
- try:
- self._extension_mgrs['resourceApi'].map_method('post_service_appliance_create', obj_dict)
- except Exception as e:
- pass
-
- return {'service-appliance': rsp_body}
- #end service_appliances_http_post
-
- def service_appliances_http_get(self):
- # gather list of uuids using 1. any specified anchors
- # 2. any specified filters
- # if not 'detail' return list with any specified 'fields'
- # if 'detail' return list with props+refs + any specified 'fields'
-
- env = request.headers.environ
- tenant_name = env.get(hdr_server_tenant(), 'default-project')
- parent_uuids = None
- back_ref_uuids = None
- obj_uuids = None
- if (('parent_fq_name_str' in request.query) and
- ('parent_type' in request.query)):
- parent_fq_name = request.query.parent_fq_name_str.split(':')
- parent_type = request.query.parent_type
- parent_uuids = [self._db_conn.fq_name_to_uuid(parent_type, parent_fq_name)]
- elif 'parent_id' in request.query:
- parent_ids = request.query.parent_id.split(',')
- parent_uuids = [str(uuid.UUID(p_uuid)) for p_uuid in parent_ids]
- if 'back_ref_id' in request.query:
- back_ref_ids = request.query.back_ref_id.split(',')
- back_ref_uuids = [str(uuid.UUID(b_uuid)) for b_uuid in back_ref_ids]
- if 'obj_uuids' in request.query:
- obj_uuids = request.query.obj_uuids.split(',')
-
- # common handling for all resource get
- (ok, result) = self._get_common(request, parent_uuids)
- if not ok:
- (code, msg) = result
- self.config_object_error(None, None, 'service_appliances', 'http_get_collection', msg)
- abort(code, msg)
-
- if 'count' in request.query:
- count = 'true' in request.query.count.lower()
- else:
- count = False
-
- filter_params = request.query.filters
- if filter_params:
- try:
- ff_key_vals = filter_params.split(',')
- ff_names = [ff.split('==')[0] for ff in ff_key_vals]
- ff_values = [ff.split('==')[1] for ff in ff_key_vals]
- filters = {'field_names': ff_names, 'field_values': ff_values}
- except Exception as e:
- abort(400, 'Invalid filter ' + filter_params)
- else:
- filters = None
- db_conn = self._db_conn
- (ok, result) = \
- db_conn.dbe_list('service-appliance', parent_uuids, back_ref_uuids, obj_uuids, count, filters)
- if not ok:
- self.config_object_error(None, None, 'service_appliances', 'http_get_collection', result)
- abort(404, result)
-
- # If only counting, return early
- if count:
- return {'service-appliances': {'count': result}}
-
- if 'detail' in request.query:
- detail = 'true' in request.query.detail.lower()
- else:
- detail = False
-
- fq_names_uuids = result
- obj_dicts = []
- if not detail:
- if not self.is_admin_request():
- obj_ids_list = [{'uuid': obj_uuid} for _, obj_uuid in fq_names_uuids]
- obj_fields = [u'id_perms']
- (ok, result) = db_conn.dbe_read_multi('service-appliance', obj_ids_list, obj_fields)
- if not ok:
- result = []
- for obj_result in result:
- # give chance for zk heartbeat/ping
- gevent.sleep(0)
- if obj_result['id_perms'].get('user_visible', True):
- obj_dict = {}
- obj_dict['uuid'] = obj_result['uuid']
- obj_dict['href'] = self.generate_url('service-appliance', obj_result['uuid'])
- obj_dict['fq_name'] = obj_result['fq_name']
- obj_dicts.append(obj_dict)
- else:
- for fq_name, obj_uuid in fq_names_uuids:
- # give chance for zk heartbeat/ping
- gevent.sleep(0)
- obj_dict = {}
- obj_dict['uuid'] = obj_uuid
- obj_dict['href'] = self.generate_url('service-appliance', obj_uuid)
- obj_dict['fq_name'] = fq_name
- obj_dicts.append(obj_dict)
- else: #detail
- obj_ids_list = [{'uuid': obj_uuid} for _, obj_uuid in fq_names_uuids]
- obj_fields = [u'service_appliance_user_credentials', u'service_appliance_ip_address', u'service_appliance_properties', u'id_perms', u'display_name'] + []
- if 'fields' in request.query:
- obj_fields.extend(request.query.fields.split(','))
- (ok, result) = db_conn.dbe_read_multi('service-appliance', obj_ids_list, obj_fields)
-
- if not ok:
- result = []
-
- for obj_result in result:
- # give chance for zk heartbeat/ping
- gevent.sleep(0)
- obj_dict = {}
- obj_dict['name'] = obj_result['fq_name'][-1]
- obj_dict['href'] = self.generate_url('service-appliance', obj_result['uuid'])
- obj_dict.update(obj_result)
- if (obj_dict['id_perms'].get('user_visible', True) or
- self.is_admin_request()):
- obj_dicts.append({'service-appliance': obj_dict})
-
- return {'service-appliances': obj_dicts}
- #end service_appliances_http_get
-
- def _service_appliance_create_default_children(self, parent_obj):
- pass
- #end _service_appliance_create_default_children
-
- def _service_appliance_delete_default_children(self, parent_dict):
- pass
- #end _service_appliance_delete_default_children
-
- def service_instance_http_get(self, id):
- try:
- self._extension_mgrs['resourceApi'].map_method('pre_service_instance_read', id)
- except Exception as e:
- pass
-
- # TODO get vals from request out of the global ASAP
- etag = request.headers.get('If-None-Match')
- try:
- obj_type = self._db_conn.uuid_to_obj_type(id)
- except NoIdError:
- obj_type = None
- if obj_type != 'service_instance':
- abort(404, 'No service-instance object found for id %s' %(id))
- # common handling for all resource get
- (ok, result) = self._get_common(request, id)
- if not ok:
- (code, msg) = result
- self.config_object_error(id, None, 'service_instance', 'http_get', msg)
- abort(code, msg)
-
- # type-specific hook
- r_class = self.get_resource_class('service-instance')
- if r_class:
- r_class.http_get(id)
-
- db_conn = self._db_conn
- if etag:
- obj_ids = {'uuid': id}
- (ok, result) = db_conn.dbe_is_latest(obj_ids, etag.replace('"', ''))
- if not ok:
- # Not present in DB
- self.config_object_error(id, None, 'service_instance', 'http_get', result)
- abort(404, result)
-
- is_latest = result
- if is_latest:
- # send Not-Modified, caches use this for read optimization
- response.status = 304
- return
- #end if etag
-
- obj_ids = {'uuid': id}
-
- # Generate field list for db layer
- properties = [u'service_instance_properties', u'id_perms', u'display_name']
- references = ['service_template_refs']
- back_references = [u'project_back_refs', u'virtual_machine_back_refs', u'logical_router_back_refs', u'loadbalancer_pool_back_refs']
- children = []
- if 'fields' in request.query:
- obj_fields = request.query.fields.split(',')
- else: # default props + children + refs + backrefs
- obj_fields = properties + references
- if 'exclude_back_refs' not in request.query:
- obj_fields = obj_fields + back_references
- if 'exclude_children' not in request.query:
- obj_fields = obj_fields + children
-
- (ok, result) = db_conn.dbe_read('service-instance', obj_ids, obj_fields)
- if not ok:
- # Not present in DB
- self.config_object_error(id, None, 'service_instance', 'http_get', result)
- abort(404, result)
-
- # check visibility
- if (not result['id_perms'].get('user_visible', True) and
- not self.is_admin_request()):
- result = 'This object is not visible by users: %s' % id
- self.config_object_error(id, None, 'service_instance', 'http_get', result)
- abort(404, result)
-
- rsp_body = {}
- rsp_body['uuid'] = id
- rsp_body['href'] = self.generate_url('service-instance', id)
- rsp_body['name'] = result['fq_name'][-1]
- rsp_body.update(result)
- id_perms = result['id_perms']
- response.set_header('ETag', '"' + id_perms['last_modified'] + '"')
- try:
- self._extension_mgrs['resourceApi'].map_method('post_service_instance_read', id, rsp_body)
- except Exception as e:
- pass
-
- return {'service-instance': rsp_body}
- #end service_instance_http_get
-
- def service_instance_http_put(self, id):
- key = 'service-instance'
- obj_dict = request.json[key]
-
- try:
- self._extension_mgrs['resourceApi'].map_method('pre_service_instance_update', id, obj_dict)
- except Exception as e:
- pass
-
- db_conn = self._db_conn
- try:
- obj_type = db_conn.uuid_to_obj_type(id)
- if obj_type != 'service_instance':
- abort(404, 'No service-instance object found for id %s' %(id))
- fq_name = db_conn.uuid_to_fq_name(id)
- except NoIdError as e:
- abort(404, str(e))
- prop_dict = obj_dict.get('service_instance_properties')
- if prop_dict:
- buf = cStringIO.StringIO()
- xx_service_instance_properties = ServiceInstanceType(**prop_dict)
- xx_service_instance_properties.export(buf)
- node = etree.fromstring(buf.getvalue())
- xx_service_instance_properties = ServiceInstanceType()
- try:
- xx_service_instance_properties.build(node)
- except Exception as e:
- abort(400, str(e))
- prop_dict = obj_dict.get('id_perms')
- if prop_dict:
- buf = cStringIO.StringIO()
- xx_id_perms = IdPermsType(**prop_dict)
- xx_id_perms.export(buf)
- node = etree.fromstring(buf.getvalue())
- xx_id_perms = IdPermsType()
- try:
- xx_id_perms.build(node)
- except Exception as e:
- abort(400, str(e))
- # common handling for all resource put
- (ok, result) = self._put_common(request, 'service_instance', id, fq_name, obj_dict)
- if not ok:
- (code, msg) = result
- self.config_object_error(id, None, 'service_instance', 'http_put', msg)
- abort(code, msg)
-
- # Validate perms
- objtype_list = ['service_template']
- for obj_type in objtype_list:
- refs = obj_dict.get('%s_refs'%(obj_type), None)
- if refs:
- for ref in refs:
- try:
- ref_uuid = db_conn.fq_name_to_uuid(obj_type, ref['to'])
- except NoIdError as e:
- abort(404, str(e))
- (ok, status) = self._permissions.check_perms_link(request, ref_uuid)
- if not ok:
- (code, err_msg) = status
- abort(code, err_msg)
-
- # State modification starts from here. Ensure that cleanup is done for all state changes
- cleanup_on_failure = []
- # type-specific hook
- r_class = self.get_resource_class('service-instance')
- if r_class:
- (ok, put_result) = r_class.http_put(id, fq_name, obj_dict, self._db_conn)
- if not ok:
- (code, msg) = put_result
- self.config_object_error(id, None, 'service_instance', 'http_put', msg)
- abort(code, msg)
- callable = getattr(r_class, 'http_put_fail', None)
- if callable:
- cleanup_on_failure.append((callable, [id, fq_name, obj_dict, self._db_conn]))
-
- obj_ids = {'uuid': id}
- try:
- (ok, result) = db_conn.dbe_update('service-instance', obj_ids, obj_dict)
- except Exception as e:
- ok = False
- result = str(e)
- if not ok:
- for fail_cleanup_callable, cleanup_args in cleanup_on_failure:
- fail_cleanup_callable(*cleanup_args)
- self.config_object_error(id, None, 'service_instance', 'http_put', result)
- abort(404, result)
-
- rsp_body = {}
- rsp_body['uuid'] = id
- rsp_body['href'] = self.generate_url('service-instance', id)
-
- try:
- self._extension_mgrs['resourceApi'].map_method('post_service_instance_update', id, obj_dict)
- except Exception as e:
- pass
-
- return {'service-instance': rsp_body}
- #end service_instance_http_put
-
- def service_instance_http_delete(self, id):
- db_conn = self._db_conn
- # if obj doesn't exist return early
- try:
- obj_type = db_conn.uuid_to_obj_type(id)
- if obj_type != 'service_instance':
- abort(404, 'No service-instance object found for id %s' %(id))
- _ = db_conn.uuid_to_fq_name(id)
- except NoIdError:
- abort(404, 'ID %s does not exist' %(id))
-
- try:
- self._extension_mgrs['resourceApi'].map_method('pre_service_instance_delete', id)
- except Exception as e:
- pass
-
- # read in obj from db (accepting error) to get details of it
- obj_ids = {'uuid': id}
- back_references = [u'project_back_refs', u'virtual_machine_back_refs', u'logical_router_back_refs', u'loadbalancer_pool_back_refs']
- children = []
- obj_fields = children + back_references
- (read_ok, read_result) = db_conn.dbe_read('service-instance', obj_ids, obj_fields)
- if not read_ok:
- if read_result.startswith('Unknown id:'):
- abort(404, 'ID %s does not exist' %(id))
- else:
- self.config_object_error(id, None, 'service_instance', 'http_delete', read_result)
- # proceed down to delete the resource
-
- # common handling for all resource delete
- parent_type = read_result.get('parent_type', None)
- (ok, del_result) = self._delete_common(request, 'service_instance', id, parent_type)
- if not ok:
- (code, msg) = del_result
- self.config_object_error(id, None, 'service_instance', 'http_delete', msg)
- abort(code, msg)
-
- fq_name = read_result['fq_name']
- ifmap_id = cfgm_common.imid.get_ifmap_id_from_fq_name('service-instance', fq_name)
- obj_ids['imid'] = ifmap_id
- if parent_type:
- parent_imid = cfgm_common.imid.get_ifmap_id_from_fq_name(parent_type, fq_name[:-1])
- obj_ids['parent_imid'] = parent_imid
-
- # State modification starts from here. Ensure that cleanup is done for all state changes
- cleanup_on_failure = []
-
- # type-specific hook
- r_class = self.get_resource_class('service-instance')
- if r_class:
- if read_ok:
- # fail if non-default children or backrefs exist
- logical_router_back_refs = read_result.get('logical_router_back_refs', None)
- if logical_router_back_refs:
- back_ref_urls = [back_ref_info['href'] for back_ref_info in read_result['logical_router_back_refs']]
- back_ref_str = ', '.join(back_ref_urls)
- err_msg = 'Back-References from ' + back_ref_str + ' still exist'
- self.config_object_error(id, None, 'service_instance', 'http_delete', err_msg)
- abort(409, err_msg)
-
- loadbalancer_pool_back_refs = read_result.get('loadbalancer_pool_back_refs', None)
- if loadbalancer_pool_back_refs:
- back_ref_urls = [back_ref_info['href'] for back_ref_info in read_result['loadbalancer_pool_back_refs']]
- back_ref_str = ', '.join(back_ref_urls)
- err_msg = 'Back-References from ' + back_ref_str + ' still exist'
- self.config_object_error(id, None, 'service_instance', 'http_delete', err_msg)
- abort(409, err_msg)
-
-
- # Delete default children first
- self._service_instance_delete_default_children(read_result)
-
- (ok, del_result) = r_class.http_delete(id, read_result, db_conn)
- if not ok:
- (code, msg) = del_result
- self.config_object_error(id, None, 'service_instance', 'http_delete', msg)
- abort(code, msg)
- callable = getattr(r_class, 'http_delete_fail', None)
- if callable:
- cleanup_on_failure.append((callable, [id, read_result, db_conn]))
- #end if read_ok
-
- try:
- (ok, del_result) = db_conn.dbe_delete('service-instance', obj_ids, read_result)
- except Exception as e:
- ok = False
- del_result = str(e)
- if not ok:
- for fail_cleanup_callable, cleanup_args in cleanup_on_failure:
- fail_cleanup_callable(*cleanup_args)
- self.config_object_error(id, None, 'service_instance', 'http_delete', del_result)
- abort(409, del_result)
-
- try:
- self._extension_mgrs['resourceApi'].map_method('post_service_instance_delete', id, read_result)
- except Exception as e:
- pass
-
- #end service_instance_http_delete
-
- def service_instances_http_post(self):
- key = 'service-instance'
- obj_dict = request.json[key]
- self._post_validate(key, obj_dict=obj_dict)
- fq_name = obj_dict['fq_name']
-
- try:
- self._extension_mgrs['resourceApi'].map_method('pre_service_instance_create', obj_dict)
- except Exception as e:
- pass
-
- prop_dict = obj_dict.get('service_instance_properties')
- if prop_dict:
- buf = cStringIO.StringIO()
- xx_service_instance_properties = ServiceInstanceType(**prop_dict)
- xx_service_instance_properties.export(buf)
- node = etree.fromstring(buf.getvalue())
- xx_service_instance_properties = ServiceInstanceType()
- try:
- xx_service_instance_properties.build(node)
- except Exception as e:
- abort(400, str(e))
- prop_dict = obj_dict.get('id_perms')
- if prop_dict:
- buf = cStringIO.StringIO()
- xx_id_perms = IdPermsType(**prop_dict)
- xx_id_perms.export(buf)
- node = etree.fromstring(buf.getvalue())
- xx_id_perms = IdPermsType()
- try:
- xx_id_perms.build(node)
- except Exception as e:
- abort(400, str(e))
- # common handling for all resource create
- (ok, result) = self._post_common(request, 'service-instance', obj_dict)
- if not ok:
- (code, msg) = result
- fq_name_str = ':'.join(obj_dict.get('fq_name', []))
- self.config_object_error(None, fq_name_str, 'service_instance', 'http_post', msg)
- abort(code, msg)
-
- name = obj_dict['fq_name'][-1]
- fq_name = obj_dict['fq_name']
-
- db_conn = self._db_conn
-
- # if client gave parent_type of config-root, ignore and remove
- if 'parent_type' in obj_dict and obj_dict['parent_type'] == 'config-root':
- del obj_dict['parent_type']
-
- if 'parent_type' in obj_dict:
- # non config-root child, verify parent exists
- parent_type = obj_dict['parent_type']
- parent_fq_name = obj_dict['fq_name'][:-1]
- try:
- parent_uuid = self._db_conn.fq_name_to_uuid(parent_type, parent_fq_name)
- (ok, status) = self._permissions.check_perms_write(request, parent_uuid)
- if not ok:
- (code, err_msg) = status
- abort(code, err_msg)
- self._permissions.set_user_role(request, obj_dict)
- except NoIdError:
- err_msg = 'Parent ' + pformat(parent_fq_name) + ' type ' + parent_type + ' does not exist'
- fq_name_str = ':'.join(parent_fq_name)
- self.config_object_error(None, fq_name_str, 'service_instance', 'http_post', err_msg)
- abort(400, err_msg)
-
- # Validate perms
- objtype_list = ['service_template']
- for obj_type in objtype_list:
- refs = obj_dict.get('%s_refs'%(obj_type), None)
- if refs:
- for ref in refs:
- ref_uuid = db_conn.fq_name_to_uuid(obj_type, ref['to'])
- (ok, status) = self._permissions.check_perms_link(request, ref_uuid)
- if not ok:
- (code, err_msg) = status
- abort(code, err_msg)
-
- # State modification starts from here. Ensure that cleanup is done for all state changes
- cleanup_on_failure = []
- # Alloc and Store id-mappings before creating entry on pubsub store.
- # Else a subscriber can ask for an id mapping before we have stored it
- uuid_requested = result
- (ok, result) = db_conn.dbe_alloc('service-instance', obj_dict, uuid_requested)
- if not ok:
- (code, msg) = result
- fq_name_str = ':'.join(obj_dict['fq_name'])
- self.config_object_error(None, fq_name_str, 'service_instance', 'http_post', result)
- abort(code, msg)
- cleanup_on_failure.append((db_conn.dbe_release, ['service_instance', fq_name]))
-
- obj_ids = result
-
- env = request.headers.environ
- tenant_name = env.get(hdr_server_tenant(), 'default-project')
-
-
- # type-specific hook
- r_class = self.get_resource_class('service-instance')
- if r_class:
- try:
- (ok, result) = r_class.http_post_collection(tenant_name, obj_dict, db_conn)
- except Exception as e:
- ok = False
- result = (500, str(e))
- if not ok:
- for fail_cleanup_callable, cleanup_args in cleanup_on_failure:
- fail_cleanup_callable(*cleanup_args)
- (code, msg) = result
- fq_name_str = ':'.join(fq_name)
- self.config_object_error(None, fq_name_str, 'service_instance', 'http_post', msg)
- abort(code, msg)
- callable = getattr(r_class, 'http_post_collection_fail', None)
- if callable:
- cleanup_on_failure.append((callable, [tenant_name, obj_dict, db_conn]))
-
- try:
- (ok, result) = \
- db_conn.dbe_create('service-instance', obj_ids, obj_dict)
- except Exception as e:
- ok = False
- result = str(e)
-
- if not ok:
- for fail_cleanup_callable, cleanup_args in cleanup_on_failure:
- fail_cleanup_callable(*cleanup_args)
- fq_name_str = ':'.join(fq_name)
- self.config_object_error(None, fq_name_str, 'service_instance', 'http_post', result)
- abort(404, result)
-
- rsp_body = {}
- rsp_body['name'] = name
- rsp_body['fq_name'] = fq_name
- rsp_body['uuid'] = obj_ids['uuid']
- rsp_body['href'] = self.generate_url('service-instance', obj_ids['uuid'])
- if 'parent_type' in obj_dict:
- # non config-root child, send back parent uuid/href
- rsp_body['parent_uuid'] = parent_uuid
- rsp_body['parent_href'] = self.generate_url(parent_type, parent_uuid)
-
- try:
- self._extension_mgrs['resourceApi'].map_method('post_service_instance_create', obj_dict)
- except Exception as e:
- pass
-
- return {'service-instance': rsp_body}
- #end service_instances_http_post
-
- def service_instances_http_get(self):
- # gather list of uuids using 1. any specified anchors
- # 2. any specified filters
- # if not 'detail' return list with any specified 'fields'
- # if 'detail' return list with props+refs + any specified 'fields'
-
- env = request.headers.environ
- tenant_name = env.get(hdr_server_tenant(), 'default-project')
- parent_uuids = None
- back_ref_uuids = None
- obj_uuids = None
- if (('parent_fq_name_str' in request.query) and
- ('parent_type' in request.query)):
- parent_fq_name = request.query.parent_fq_name_str.split(':')
- parent_type = request.query.parent_type
- parent_uuids = [self._db_conn.fq_name_to_uuid(parent_type, parent_fq_name)]
- elif 'parent_id' in request.query:
- parent_ids = request.query.parent_id.split(',')
- parent_uuids = [str(uuid.UUID(p_uuid)) for p_uuid in parent_ids]
- if 'back_ref_id' in request.query:
- back_ref_ids = request.query.back_ref_id.split(',')
- back_ref_uuids = [str(uuid.UUID(b_uuid)) for b_uuid in back_ref_ids]
- if 'obj_uuids' in request.query:
- obj_uuids = request.query.obj_uuids.split(',')
-
- # common handling for all resource get
- (ok, result) = self._get_common(request, parent_uuids)
- if not ok:
- (code, msg) = result
- self.config_object_error(None, None, 'service_instances', 'http_get_collection', msg)
- abort(code, msg)
-
- if 'count' in request.query:
- count = 'true' in request.query.count.lower()
- else:
- count = False
-
- filter_params = request.query.filters
- if filter_params:
- try:
- ff_key_vals = filter_params.split(',')
- ff_names = [ff.split('==')[0] for ff in ff_key_vals]
- ff_values = [ff.split('==')[1] for ff in ff_key_vals]
- filters = {'field_names': ff_names, 'field_values': ff_values}
- except Exception as e:
- abort(400, 'Invalid filter ' + filter_params)
- else:
- filters = None
- db_conn = self._db_conn
- (ok, result) = \
- db_conn.dbe_list('service-instance', parent_uuids, back_ref_uuids, obj_uuids, count, filters)
- if not ok:
- self.config_object_error(None, None, 'service_instances', 'http_get_collection', result)
- abort(404, result)
-
- # If only counting, return early
- if count:
- return {'service-instances': {'count': result}}
-
- if 'detail' in request.query:
- detail = 'true' in request.query.detail.lower()
- else:
- detail = False
-
- fq_names_uuids = result
- obj_dicts = []
- if not detail:
- if not self.is_admin_request():
- obj_ids_list = [{'uuid': obj_uuid} for _, obj_uuid in fq_names_uuids]
- obj_fields = [u'id_perms']
- (ok, result) = db_conn.dbe_read_multi('service-instance', obj_ids_list, obj_fields)
- if not ok:
- result = []
- for obj_result in result:
- # give chance for zk heartbeat/ping
- gevent.sleep(0)
- if obj_result['id_perms'].get('user_visible', True):
- obj_dict = {}
- obj_dict['uuid'] = obj_result['uuid']
- obj_dict['href'] = self.generate_url('service-instance', obj_result['uuid'])
- obj_dict['fq_name'] = obj_result['fq_name']
- obj_dicts.append(obj_dict)
- else:
- for fq_name, obj_uuid in fq_names_uuids:
- # give chance for zk heartbeat/ping
- gevent.sleep(0)
- obj_dict = {}
- obj_dict['uuid'] = obj_uuid
- obj_dict['href'] = self.generate_url('service-instance', obj_uuid)
- obj_dict['fq_name'] = fq_name
- obj_dicts.append(obj_dict)
- else: #detail
- obj_ids_list = [{'uuid': obj_uuid} for _, obj_uuid in fq_names_uuids]
- obj_fields = [u'service_instance_properties', u'id_perms', u'display_name'] + ['service_template_refs']
- if 'fields' in request.query:
- obj_fields.extend(request.query.fields.split(','))
- (ok, result) = db_conn.dbe_read_multi('service-instance', obj_ids_list, obj_fields)
-
- if not ok:
- result = []
-
- for obj_result in result:
- # give chance for zk heartbeat/ping
- gevent.sleep(0)
- obj_dict = {}
- obj_dict['name'] = obj_result['fq_name'][-1]
- obj_dict['href'] = self.generate_url('service-instance', obj_result['uuid'])
- obj_dict.update(obj_result)
- if (obj_dict['id_perms'].get('user_visible', True) or
- self.is_admin_request()):
- obj_dicts.append({'service-instance': obj_dict})
-
- return {'service-instances': obj_dicts}
- #end service_instances_http_get
-
- def _service_instance_create_default_children(self, parent_obj):
- pass
- #end _service_instance_create_default_children
-
- def _service_instance_delete_default_children(self, parent_dict):
- pass
- #end _service_instance_delete_default_children
-
- def namespace_http_get(self, id):
- try:
- self._extension_mgrs['resourceApi'].map_method('pre_namespace_read', id)
- except Exception as e:
- pass
-
- # TODO get vals from request out of the global ASAP
- etag = request.headers.get('If-None-Match')
- try:
- obj_type = self._db_conn.uuid_to_obj_type(id)
- except NoIdError:
- obj_type = None
- if obj_type != 'namespace':
- abort(404, 'No namespace object found for id %s' %(id))
- # common handling for all resource get
- (ok, result) = self._get_common(request, id)
- if not ok:
- (code, msg) = result
- self.config_object_error(id, None, 'namespace', 'http_get', msg)
- abort(code, msg)
-
- # type-specific hook
- r_class = self.get_resource_class('namespace')
- if r_class:
- r_class.http_get(id)
-
- db_conn = self._db_conn
- if etag:
- obj_ids = {'uuid': id}
- (ok, result) = db_conn.dbe_is_latest(obj_ids, etag.replace('"', ''))
- if not ok:
- # Not present in DB
- self.config_object_error(id, None, 'namespace', 'http_get', result)
- abort(404, result)
-
- is_latest = result
- if is_latest:
- # send Not-Modified, caches use this for read optimization
- response.status = 304
- return
- #end if etag
-
- obj_ids = {'uuid': id}
-
- # Generate field list for db layer
- properties = [u'namespace_cidr', u'id_perms', u'display_name']
- references = []
- back_references = [u'domain_back_refs', u'project_back_refs']
- children = []
- if 'fields' in request.query:
- obj_fields = request.query.fields.split(',')
- else: # default props + children + refs + backrefs
- obj_fields = properties + references
- if 'exclude_back_refs' not in request.query:
- obj_fields = obj_fields + back_references
- if 'exclude_children' not in request.query:
- obj_fields = obj_fields + children
-
- (ok, result) = db_conn.dbe_read('namespace', obj_ids, obj_fields)
- if not ok:
- # Not present in DB
- self.config_object_error(id, None, 'namespace', 'http_get', result)
- abort(404, result)
-
- # check visibility
- if (not result['id_perms'].get('user_visible', True) and
- not self.is_admin_request()):
- result = 'This object is not visible by users: %s' % id
- self.config_object_error(id, None, 'namespace', 'http_get', result)
- abort(404, result)
-
- rsp_body = {}
- rsp_body['uuid'] = id
- rsp_body['href'] = self.generate_url('namespace', id)
- rsp_body['name'] = result['fq_name'][-1]
- rsp_body.update(result)
- id_perms = result['id_perms']
- response.set_header('ETag', '"' + id_perms['last_modified'] + '"')
- try:
- self._extension_mgrs['resourceApi'].map_method('post_namespace_read', id, rsp_body)
- except Exception as e:
- pass
-
- return {'namespace': rsp_body}
- #end namespace_http_get
-
- def namespace_http_put(self, id):
- key = 'namespace'
- obj_dict = request.json[key]
-
- try:
- self._extension_mgrs['resourceApi'].map_method('pre_namespace_update', id, obj_dict)
- except Exception as e:
- pass
-
- db_conn = self._db_conn
- try:
- obj_type = db_conn.uuid_to_obj_type(id)
- if obj_type != 'namespace':
- abort(404, 'No namespace object found for id %s' %(id))
- fq_name = db_conn.uuid_to_fq_name(id)
- except NoIdError as e:
- abort(404, str(e))
- prop_dict = obj_dict.get('namespace_cidr')
- if prop_dict:
- buf = cStringIO.StringIO()
- xx_namespace_cidr = SubnetType(**prop_dict)
- xx_namespace_cidr.export(buf)
- node = etree.fromstring(buf.getvalue())
- xx_namespace_cidr = SubnetType()
- try:
- xx_namespace_cidr.build(node)
- except Exception as e:
- abort(400, str(e))
- prop_dict = obj_dict.get('id_perms')
- if prop_dict:
- buf = cStringIO.StringIO()
- xx_id_perms = IdPermsType(**prop_dict)
- xx_id_perms.export(buf)
- node = etree.fromstring(buf.getvalue())
- xx_id_perms = IdPermsType()
- try:
- xx_id_perms.build(node)
- except Exception as e:
- abort(400, str(e))
- # common handling for all resource put
- (ok, result) = self._put_common(request, 'namespace', id, fq_name, obj_dict)
- if not ok:
- (code, msg) = result
- self.config_object_error(id, None, 'namespace', 'http_put', msg)
- abort(code, msg)
-
-
- # State modification starts from here. Ensure that cleanup is done for all state changes
- cleanup_on_failure = []
- # type-specific hook
- r_class = self.get_resource_class('namespace')
- if r_class:
- (ok, put_result) = r_class.http_put(id, fq_name, obj_dict, self._db_conn)
- if not ok:
- (code, msg) = put_result
- self.config_object_error(id, None, 'namespace', 'http_put', msg)
- abort(code, msg)
- callable = getattr(r_class, 'http_put_fail', None)
- if callable:
- cleanup_on_failure.append((callable, [id, fq_name, obj_dict, self._db_conn]))
-
- obj_ids = {'uuid': id}
- try:
- (ok, result) = db_conn.dbe_update('namespace', obj_ids, obj_dict)
- except Exception as e:
- ok = False
- result = str(e)
- if not ok:
- for fail_cleanup_callable, cleanup_args in cleanup_on_failure:
- fail_cleanup_callable(*cleanup_args)
- self.config_object_error(id, None, 'namespace', 'http_put', result)
- abort(404, result)
-
- rsp_body = {}
- rsp_body['uuid'] = id
- rsp_body['href'] = self.generate_url('namespace', id)
-
- try:
- self._extension_mgrs['resourceApi'].map_method('post_namespace_update', id, obj_dict)
- except Exception as e:
- pass
-
- return {'namespace': rsp_body}
- #end namespace_http_put
-
- def namespace_http_delete(self, id):
- db_conn = self._db_conn
- # if obj doesn't exist return early
- try:
- obj_type = db_conn.uuid_to_obj_type(id)
- if obj_type != 'namespace':
- abort(404, 'No namespace object found for id %s' %(id))
- _ = db_conn.uuid_to_fq_name(id)
- except NoIdError:
- abort(404, 'ID %s does not exist' %(id))
-
- try:
- self._extension_mgrs['resourceApi'].map_method('pre_namespace_delete', id)
- except Exception as e:
- pass
-
- # read in obj from db (accepting error) to get details of it
- obj_ids = {'uuid': id}
- back_references = [u'domain_back_refs', u'project_back_refs']
- children = []
- obj_fields = children + back_references
- (read_ok, read_result) = db_conn.dbe_read('namespace', obj_ids, obj_fields)
- if not read_ok:
- if read_result.startswith('Unknown id:'):
- abort(404, 'ID %s does not exist' %(id))
- else:
- self.config_object_error(id, None, 'namespace', 'http_delete', read_result)
- # proceed down to delete the resource
-
- # common handling for all resource delete
- parent_type = read_result.get('parent_type', None)
- (ok, del_result) = self._delete_common(request, 'namespace', id, parent_type)
- if not ok:
- (code, msg) = del_result
- self.config_object_error(id, None, 'namespace', 'http_delete', msg)
- abort(code, msg)
-
- fq_name = read_result['fq_name']
- ifmap_id = cfgm_common.imid.get_ifmap_id_from_fq_name('namespace', fq_name)
- obj_ids['imid'] = ifmap_id
- if parent_type:
- parent_imid = cfgm_common.imid.get_ifmap_id_from_fq_name(parent_type, fq_name[:-1])
- obj_ids['parent_imid'] = parent_imid
-
- # State modification starts from here. Ensure that cleanup is done for all state changes
- cleanup_on_failure = []
-
- # type-specific hook
- r_class = self.get_resource_class('namespace')
- if r_class:
- if read_ok:
- # fail if non-default children or backrefs exist
- project_back_refs = read_result.get('project_back_refs', None)
- if project_back_refs:
- back_ref_urls = [back_ref_info['href'] for back_ref_info in read_result['project_back_refs']]
- back_ref_str = ', '.join(back_ref_urls)
- err_msg = 'Back-References from ' + back_ref_str + ' still exist'
- self.config_object_error(id, None, 'namespace', 'http_delete', err_msg)
- abort(409, err_msg)
-
-
- # Delete default children first
- self._namespace_delete_default_children(read_result)
-
- (ok, del_result) = r_class.http_delete(id, read_result, db_conn)
- if not ok:
- (code, msg) = del_result
- self.config_object_error(id, None, 'namespace', 'http_delete', msg)
- abort(code, msg)
- callable = getattr(r_class, 'http_delete_fail', None)
- if callable:
- cleanup_on_failure.append((callable, [id, read_result, db_conn]))
- #end if read_ok
-
- try:
- (ok, del_result) = db_conn.dbe_delete('namespace', obj_ids, read_result)
- except Exception as e:
- ok = False
- del_result = str(e)
- if not ok:
- for fail_cleanup_callable, cleanup_args in cleanup_on_failure:
- fail_cleanup_callable(*cleanup_args)
- self.config_object_error(id, None, 'namespace', 'http_delete', del_result)
- abort(409, del_result)
-
- try:
- self._extension_mgrs['resourceApi'].map_method('post_namespace_delete', id, read_result)
- except Exception as e:
- pass
-
- #end namespace_http_delete
-
- def namespaces_http_post(self):
- key = 'namespace'
- obj_dict = request.json[key]
- self._post_validate(key, obj_dict=obj_dict)
- fq_name = obj_dict['fq_name']
-
- try:
- self._extension_mgrs['resourceApi'].map_method('pre_namespace_create', obj_dict)
- except Exception as e:
- pass
-
- prop_dict = obj_dict.get('namespace_cidr')
- if prop_dict:
- buf = cStringIO.StringIO()
- xx_namespace_cidr = SubnetType(**prop_dict)
- xx_namespace_cidr.export(buf)
- node = etree.fromstring(buf.getvalue())
- xx_namespace_cidr = SubnetType()
- try:
- xx_namespace_cidr.build(node)
- except Exception as e:
- abort(400, str(e))
- prop_dict = obj_dict.get('id_perms')
- if prop_dict:
- buf = cStringIO.StringIO()
- xx_id_perms = IdPermsType(**prop_dict)
- xx_id_perms.export(buf)
- node = etree.fromstring(buf.getvalue())
- xx_id_perms = IdPermsType()
- try:
- xx_id_perms.build(node)
- except Exception as e:
- abort(400, str(e))
- # common handling for all resource create
- (ok, result) = self._post_common(request, 'namespace', obj_dict)
- if not ok:
- (code, msg) = result
- fq_name_str = ':'.join(obj_dict.get('fq_name', []))
- self.config_object_error(None, fq_name_str, 'namespace', 'http_post', msg)
- abort(code, msg)
-
- name = obj_dict['fq_name'][-1]
- fq_name = obj_dict['fq_name']
-
- db_conn = self._db_conn
-
- # if client gave parent_type of config-root, ignore and remove
- if 'parent_type' in obj_dict and obj_dict['parent_type'] == 'config-root':
- del obj_dict['parent_type']
-
- if 'parent_type' in obj_dict:
- # non config-root child, verify parent exists
- parent_type = obj_dict['parent_type']
- parent_fq_name = obj_dict['fq_name'][:-1]
- try:
- parent_uuid = self._db_conn.fq_name_to_uuid(parent_type, parent_fq_name)
- (ok, status) = self._permissions.check_perms_write(request, parent_uuid)
- if not ok:
- (code, err_msg) = status
- abort(code, err_msg)
- self._permissions.set_user_role(request, obj_dict)
- except NoIdError:
- err_msg = 'Parent ' + pformat(parent_fq_name) + ' type ' + parent_type + ' does not exist'
- fq_name_str = ':'.join(parent_fq_name)
- self.config_object_error(None, fq_name_str, 'namespace', 'http_post', err_msg)
- abort(400, err_msg)
-
-
- # State modification starts from here. Ensure that cleanup is done for all state changes
- cleanup_on_failure = []
- # Alloc and Store id-mappings before creating entry on pubsub store.
- # Else a subscriber can ask for an id mapping before we have stored it
- uuid_requested = result
- (ok, result) = db_conn.dbe_alloc('namespace', obj_dict, uuid_requested)
- if not ok:
- (code, msg) = result
- fq_name_str = ':'.join(obj_dict['fq_name'])
- self.config_object_error(None, fq_name_str, 'namespace', 'http_post', result)
- abort(code, msg)
- cleanup_on_failure.append((db_conn.dbe_release, ['namespace', fq_name]))
-
- obj_ids = result
-
- env = request.headers.environ
- tenant_name = env.get(hdr_server_tenant(), 'default-project')
-
-
- # type-specific hook
- r_class = self.get_resource_class('namespace')
- if r_class:
- try:
- (ok, result) = r_class.http_post_collection(tenant_name, obj_dict, db_conn)
- except Exception as e:
- ok = False
- result = (500, str(e))
- if not ok:
- for fail_cleanup_callable, cleanup_args in cleanup_on_failure:
- fail_cleanup_callable(*cleanup_args)
- (code, msg) = result
- fq_name_str = ':'.join(fq_name)
- self.config_object_error(None, fq_name_str, 'namespace', 'http_post', msg)
- abort(code, msg)
- callable = getattr(r_class, 'http_post_collection_fail', None)
- if callable:
- cleanup_on_failure.append((callable, [tenant_name, obj_dict, db_conn]))
-
- try:
- (ok, result) = \
- db_conn.dbe_create('namespace', obj_ids, obj_dict)
- except Exception as e:
- ok = False
- result = str(e)
-
- if not ok:
- for fail_cleanup_callable, cleanup_args in cleanup_on_failure:
- fail_cleanup_callable(*cleanup_args)
- fq_name_str = ':'.join(fq_name)
- self.config_object_error(None, fq_name_str, 'namespace', 'http_post', result)
- abort(404, result)
-
- rsp_body = {}
- rsp_body['name'] = name
- rsp_body['fq_name'] = fq_name
- rsp_body['uuid'] = obj_ids['uuid']
- rsp_body['href'] = self.generate_url('namespace', obj_ids['uuid'])
- if 'parent_type' in obj_dict:
- # non config-root child, send back parent uuid/href
- rsp_body['parent_uuid'] = parent_uuid
- rsp_body['parent_href'] = self.generate_url(parent_type, parent_uuid)
-
- try:
- self._extension_mgrs['resourceApi'].map_method('post_namespace_create', obj_dict)
- except Exception as e:
- pass
-
- return {'namespace': rsp_body}
- #end namespaces_http_post
-
- def namespaces_http_get(self):
- # gather list of uuids using 1. any specified anchors
- # 2. any specified filters
- # if not 'detail' return list with any specified 'fields'
- # if 'detail' return list with props+refs + any specified 'fields'
-
- env = request.headers.environ
- tenant_name = env.get(hdr_server_tenant(), 'default-project')
- parent_uuids = None
- back_ref_uuids = None
- obj_uuids = None
- if (('parent_fq_name_str' in request.query) and
- ('parent_type' in request.query)):
- parent_fq_name = request.query.parent_fq_name_str.split(':')
- parent_type = request.query.parent_type
- parent_uuids = [self._db_conn.fq_name_to_uuid(parent_type, parent_fq_name)]
- elif 'parent_id' in request.query:
- parent_ids = request.query.parent_id.split(',')
- parent_uuids = [str(uuid.UUID(p_uuid)) for p_uuid in parent_ids]
- if 'back_ref_id' in request.query:
- back_ref_ids = request.query.back_ref_id.split(',')
- back_ref_uuids = [str(uuid.UUID(b_uuid)) for b_uuid in back_ref_ids]
- if 'obj_uuids' in request.query:
- obj_uuids = request.query.obj_uuids.split(',')
-
- # common handling for all resource get
- (ok, result) = self._get_common(request, parent_uuids)
- if not ok:
- (code, msg) = result
- self.config_object_error(None, None, 'namespaces', 'http_get_collection', msg)
- abort(code, msg)
-
- if 'count' in request.query:
- count = 'true' in request.query.count.lower()
- else:
- count = False
-
- filter_params = request.query.filters
- if filter_params:
- try:
- ff_key_vals = filter_params.split(',')
- ff_names = [ff.split('==')[0] for ff in ff_key_vals]
- ff_values = [ff.split('==')[1] for ff in ff_key_vals]
- filters = {'field_names': ff_names, 'field_values': ff_values}
- except Exception as e:
- abort(400, 'Invalid filter ' + filter_params)
- else:
- filters = None
- db_conn = self._db_conn
- (ok, result) = \
- db_conn.dbe_list('namespace', parent_uuids, back_ref_uuids, obj_uuids, count, filters)
- if not ok:
- self.config_object_error(None, None, 'namespaces', 'http_get_collection', result)
- abort(404, result)
-
- # If only counting, return early
- if count:
- return {'namespaces': {'count': result}}
-
- if 'detail' in request.query:
- detail = 'true' in request.query.detail.lower()
- else:
- detail = False
-
- fq_names_uuids = result
- obj_dicts = []
- if not detail:
- if not self.is_admin_request():
- obj_ids_list = [{'uuid': obj_uuid} for _, obj_uuid in fq_names_uuids]
- obj_fields = [u'id_perms']
- (ok, result) = db_conn.dbe_read_multi('namespace', obj_ids_list, obj_fields)
- if not ok:
- result = []
- for obj_result in result:
- # give chance for zk heartbeat/ping
- gevent.sleep(0)
- if obj_result['id_perms'].get('user_visible', True):
- obj_dict = {}
- obj_dict['uuid'] = obj_result['uuid']
- obj_dict['href'] = self.generate_url('namespace', obj_result['uuid'])
- obj_dict['fq_name'] = obj_result['fq_name']
- obj_dicts.append(obj_dict)
- else:
- for fq_name, obj_uuid in fq_names_uuids:
- # give chance for zk heartbeat/ping
- gevent.sleep(0)
- obj_dict = {}
- obj_dict['uuid'] = obj_uuid
- obj_dict['href'] = self.generate_url('namespace', obj_uuid)
- obj_dict['fq_name'] = fq_name
- obj_dicts.append(obj_dict)
- else: #detail
- obj_ids_list = [{'uuid': obj_uuid} for _, obj_uuid in fq_names_uuids]
- obj_fields = [u'namespace_cidr', u'id_perms', u'display_name'] + []
- if 'fields' in request.query:
- obj_fields.extend(request.query.fields.split(','))
- (ok, result) = db_conn.dbe_read_multi('namespace', obj_ids_list, obj_fields)
-
- if not ok:
- result = []
-
- for obj_result in result:
- # give chance for zk heartbeat/ping
- gevent.sleep(0)
- obj_dict = {}
- obj_dict['name'] = obj_result['fq_name'][-1]
- obj_dict['href'] = self.generate_url('namespace', obj_result['uuid'])
- obj_dict.update(obj_result)
- if (obj_dict['id_perms'].get('user_visible', True) or
- self.is_admin_request()):
- obj_dicts.append({'namespace': obj_dict})
-
- return {'namespaces': obj_dicts}
- #end namespaces_http_get
-
- def _namespace_create_default_children(self, parent_obj):
- pass
- #end _namespace_create_default_children
-
- def _namespace_delete_default_children(self, parent_dict):
- pass
- #end _namespace_delete_default_children
-
- def logical_interface_http_get(self, id):
- try:
- self._extension_mgrs['resourceApi'].map_method('pre_logical_interface_read', id)
- except Exception as e:
- pass
-
- # TODO get vals from request out of the global ASAP
- etag = request.headers.get('If-None-Match')
- try:
- obj_type = self._db_conn.uuid_to_obj_type(id)
- except NoIdError:
- obj_type = None
- if obj_type != 'logical_interface':
- abort(404, 'No logical-interface object found for id %s' %(id))
- # common handling for all resource get
- (ok, result) = self._get_common(request, id)
- if not ok:
- (code, msg) = result
- self.config_object_error(id, None, 'logical_interface', 'http_get', msg)
- abort(code, msg)
-
- # type-specific hook
- r_class = self.get_resource_class('logical-interface')
- if r_class:
- r_class.http_get(id)
-
- db_conn = self._db_conn
- if etag:
- obj_ids = {'uuid': id}
- (ok, result) = db_conn.dbe_is_latest(obj_ids, etag.replace('"', ''))
- if not ok:
- # Not present in DB
- self.config_object_error(id, None, 'logical_interface', 'http_get', result)
- abort(404, result)
-
- is_latest = result
- if is_latest:
- # send Not-Modified, caches use this for read optimization
- response.status = 304
- return
- #end if etag
-
- obj_ids = {'uuid': id}
-
- # Generate field list for db layer
- properties = [u'logical_interface_vlan_tag', u'logical_interface_type', u'id_perms', u'display_name']
- references = ['virtual_machine_interface_refs']
- back_references = [u'physical_router_back_refs', u'physical_interface_back_refs']
- children = []
- if 'fields' in request.query:
- obj_fields = request.query.fields.split(',')
- else: # default props + children + refs + backrefs
- obj_fields = properties + references
- if 'exclude_back_refs' not in request.query:
- obj_fields = obj_fields + back_references
- if 'exclude_children' not in request.query:
- obj_fields = obj_fields + children
-
- (ok, result) = db_conn.dbe_read('logical-interface', obj_ids, obj_fields)
- if not ok:
- # Not present in DB
- self.config_object_error(id, None, 'logical_interface', 'http_get', result)
- abort(404, result)
-
- # check visibility
- if (not result['id_perms'].get('user_visible', True) and
- not self.is_admin_request()):
- result = 'This object is not visible by users: %s' % id
- self.config_object_error(id, None, 'logical_interface', 'http_get', result)
- abort(404, result)
-
- rsp_body = {}
- rsp_body['uuid'] = id
- rsp_body['href'] = self.generate_url('logical-interface', id)
- rsp_body['name'] = result['fq_name'][-1]
- rsp_body.update(result)
- id_perms = result['id_perms']
- response.set_header('ETag', '"' + id_perms['last_modified'] + '"')
- try:
- self._extension_mgrs['resourceApi'].map_method('post_logical_interface_read', id, rsp_body)
- except Exception as e:
- pass
-
- return {'logical-interface': rsp_body}
- #end logical_interface_http_get
-
- def logical_interface_http_put(self, id):
- key = 'logical-interface'
- obj_dict = request.json[key]
-
- try:
- self._extension_mgrs['resourceApi'].map_method('pre_logical_interface_update', id, obj_dict)
- except Exception as e:
- pass
-
- db_conn = self._db_conn
- try:
- obj_type = db_conn.uuid_to_obj_type(id)
- if obj_type != 'logical_interface':
- abort(404, 'No logical-interface object found for id %s' %(id))
- fq_name = db_conn.uuid_to_fq_name(id)
- except NoIdError as e:
- abort(404, str(e))
- prop_dict = obj_dict.get('id_perms')
- if prop_dict:
- buf = cStringIO.StringIO()
- xx_id_perms = IdPermsType(**prop_dict)
- xx_id_perms.export(buf)
- node = etree.fromstring(buf.getvalue())
- xx_id_perms = IdPermsType()
- try:
- xx_id_perms.build(node)
- except Exception as e:
- abort(400, str(e))
- # common handling for all resource put
- (ok, result) = self._put_common(request, 'logical_interface', id, fq_name, obj_dict)
- if not ok:
- (code, msg) = result
- self.config_object_error(id, None, 'logical_interface', 'http_put', msg)
- abort(code, msg)
-
- # Validate perms
- objtype_list = ['virtual_machine_interface']
- for obj_type in objtype_list:
- refs = obj_dict.get('%s_refs'%(obj_type), None)
- if refs:
- for ref in refs:
- try:
- ref_uuid = db_conn.fq_name_to_uuid(obj_type, ref['to'])
- except NoIdError as e:
- abort(404, str(e))
- (ok, status) = self._permissions.check_perms_link(request, ref_uuid)
- if not ok:
- (code, err_msg) = status
- abort(code, err_msg)
-
- # State modification starts from here. Ensure that cleanup is done for all state changes
- cleanup_on_failure = []
- # type-specific hook
- r_class = self.get_resource_class('logical-interface')
- if r_class:
- (ok, put_result) = r_class.http_put(id, fq_name, obj_dict, self._db_conn)
- if not ok:
- (code, msg) = put_result
- self.config_object_error(id, None, 'logical_interface', 'http_put', msg)
- abort(code, msg)
- callable = getattr(r_class, 'http_put_fail', None)
- if callable:
- cleanup_on_failure.append((callable, [id, fq_name, obj_dict, self._db_conn]))
-
- obj_ids = {'uuid': id}
- try:
- (ok, result) = db_conn.dbe_update('logical-interface', obj_ids, obj_dict)
- except Exception as e:
- ok = False
- result = str(e)
- if not ok:
- for fail_cleanup_callable, cleanup_args in cleanup_on_failure:
- fail_cleanup_callable(*cleanup_args)
- self.config_object_error(id, None, 'logical_interface', 'http_put', result)
- abort(404, result)
-
- rsp_body = {}
- rsp_body['uuid'] = id
- rsp_body['href'] = self.generate_url('logical-interface', id)
-
- try:
- self._extension_mgrs['resourceApi'].map_method('post_logical_interface_update', id, obj_dict)
- except Exception as e:
- pass
-
- return {'logical-interface': rsp_body}
- #end logical_interface_http_put
-
- def logical_interface_http_delete(self, id):
- db_conn = self._db_conn
- # if obj doesn't exist return early
- try:
- obj_type = db_conn.uuid_to_obj_type(id)
- if obj_type != 'logical_interface':
- abort(404, 'No logical-interface object found for id %s' %(id))
- _ = db_conn.uuid_to_fq_name(id)
- except NoIdError:
- abort(404, 'ID %s does not exist' %(id))
-
- try:
- self._extension_mgrs['resourceApi'].map_method('pre_logical_interface_delete', id)
- except Exception as e:
- pass
-
- # read in obj from db (accepting error) to get details of it
- obj_ids = {'uuid': id}
- back_references = [u'physical_router_back_refs', u'physical_interface_back_refs']
- children = []
- obj_fields = children + back_references
- (read_ok, read_result) = db_conn.dbe_read('logical-interface', obj_ids, obj_fields)
- if not read_ok:
- if read_result.startswith('Unknown id:'):
- abort(404, 'ID %s does not exist' %(id))
- else:
- self.config_object_error(id, None, 'logical_interface', 'http_delete', read_result)
- # proceed down to delete the resource
-
- # common handling for all resource delete
- parent_type = read_result.get('parent_type', None)
- (ok, del_result) = self._delete_common(request, 'logical_interface', id, parent_type)
- if not ok:
- (code, msg) = del_result
- self.config_object_error(id, None, 'logical_interface', 'http_delete', msg)
- abort(code, msg)
-
- fq_name = read_result['fq_name']
- ifmap_id = cfgm_common.imid.get_ifmap_id_from_fq_name('logical-interface', fq_name)
- obj_ids['imid'] = ifmap_id
- if parent_type:
- parent_imid = cfgm_common.imid.get_ifmap_id_from_fq_name(parent_type, fq_name[:-1])
- obj_ids['parent_imid'] = parent_imid
-
- # State modification starts from here. Ensure that cleanup is done for all state changes
- cleanup_on_failure = []
-
- # type-specific hook
- r_class = self.get_resource_class('logical-interface')
- if r_class:
- if read_ok:
- # fail if non-default children or backrefs exist
-
- # Delete default children first
- self._logical_interface_delete_default_children(read_result)
-
- (ok, del_result) = r_class.http_delete(id, read_result, db_conn)
- if not ok:
- (code, msg) = del_result
- self.config_object_error(id, None, 'logical_interface', 'http_delete', msg)
- abort(code, msg)
- callable = getattr(r_class, 'http_delete_fail', None)
- if callable:
- cleanup_on_failure.append((callable, [id, read_result, db_conn]))
- #end if read_ok
-
- try:
- (ok, del_result) = db_conn.dbe_delete('logical-interface', obj_ids, read_result)
- except Exception as e:
- ok = False
- del_result = str(e)
- if not ok:
- for fail_cleanup_callable, cleanup_args in cleanup_on_failure:
- fail_cleanup_callable(*cleanup_args)
- self.config_object_error(id, None, 'logical_interface', 'http_delete', del_result)
- abort(409, del_result)
-
- try:
- self._extension_mgrs['resourceApi'].map_method('post_logical_interface_delete', id, read_result)
- except Exception as e:
- pass
-
- #end logical_interface_http_delete
-
- def logical_interfaces_http_post(self):
- key = 'logical-interface'
- obj_dict = request.json[key]
- self._post_validate(key, obj_dict=obj_dict)
- fq_name = obj_dict['fq_name']
-
- try:
- self._extension_mgrs['resourceApi'].map_method('pre_logical_interface_create', obj_dict)
- except Exception as e:
- pass
-
- prop_dict = obj_dict.get('id_perms')
- if prop_dict:
- buf = cStringIO.StringIO()
- xx_id_perms = IdPermsType(**prop_dict)
- xx_id_perms.export(buf)
- node = etree.fromstring(buf.getvalue())
- xx_id_perms = IdPermsType()
- try:
- xx_id_perms.build(node)
- except Exception as e:
- abort(400, str(e))
- # common handling for all resource create
- (ok, result) = self._post_common(request, 'logical-interface', obj_dict)
- if not ok:
- (code, msg) = result
- fq_name_str = ':'.join(obj_dict.get('fq_name', []))
- self.config_object_error(None, fq_name_str, 'logical_interface', 'http_post', msg)
- abort(code, msg)
-
- name = obj_dict['fq_name'][-1]
- fq_name = obj_dict['fq_name']
-
- db_conn = self._db_conn
-
- # if client gave parent_type of config-root, ignore and remove
- if 'parent_type' in obj_dict and obj_dict['parent_type'] == 'config-root':
- del obj_dict['parent_type']
-
- if 'parent_type' in obj_dict:
- # non config-root child, verify parent exists
- parent_type = obj_dict['parent_type']
- parent_fq_name = obj_dict['fq_name'][:-1]
- try:
- parent_uuid = self._db_conn.fq_name_to_uuid(parent_type, parent_fq_name)
- (ok, status) = self._permissions.check_perms_write(request, parent_uuid)
- if not ok:
- (code, err_msg) = status
- abort(code, err_msg)
- self._permissions.set_user_role(request, obj_dict)
- except NoIdError:
- err_msg = 'Parent ' + pformat(parent_fq_name) + ' type ' + parent_type + ' does not exist'
- fq_name_str = ':'.join(parent_fq_name)
- self.config_object_error(None, fq_name_str, 'logical_interface', 'http_post', err_msg)
- abort(400, err_msg)
-
- # Validate perms
- objtype_list = ['virtual_machine_interface']
- for obj_type in objtype_list:
- refs = obj_dict.get('%s_refs'%(obj_type), None)
- if refs:
- for ref in refs:
- ref_uuid = db_conn.fq_name_to_uuid(obj_type, ref['to'])
- (ok, status) = self._permissions.check_perms_link(request, ref_uuid)
- if not ok:
- (code, err_msg) = status
- abort(code, err_msg)
-
- # State modification starts from here. Ensure that cleanup is done for all state changes
- cleanup_on_failure = []
- # Alloc and Store id-mappings before creating entry on pubsub store.
- # Else a subscriber can ask for an id mapping before we have stored it
- uuid_requested = result
- (ok, result) = db_conn.dbe_alloc('logical-interface', obj_dict, uuid_requested)
- if not ok:
- (code, msg) = result
- fq_name_str = ':'.join(obj_dict['fq_name'])
- self.config_object_error(None, fq_name_str, 'logical_interface', 'http_post', result)
- abort(code, msg)
- cleanup_on_failure.append((db_conn.dbe_release, ['logical_interface', fq_name]))
-
- obj_ids = result
-
- env = request.headers.environ
- tenant_name = env.get(hdr_server_tenant(), 'default-project')
-
-
- # type-specific hook
- r_class = self.get_resource_class('logical-interface')
- if r_class:
- try:
- (ok, result) = r_class.http_post_collection(tenant_name, obj_dict, db_conn)
- except Exception as e:
- ok = False
- result = (500, str(e))
- if not ok:
- for fail_cleanup_callable, cleanup_args in cleanup_on_failure:
- fail_cleanup_callable(*cleanup_args)
- (code, msg) = result
- fq_name_str = ':'.join(fq_name)
- self.config_object_error(None, fq_name_str, 'logical_interface', 'http_post', msg)
- abort(code, msg)
- callable = getattr(r_class, 'http_post_collection_fail', None)
- if callable:
- cleanup_on_failure.append((callable, [tenant_name, obj_dict, db_conn]))
-
- try:
- (ok, result) = \
- db_conn.dbe_create('logical-interface', obj_ids, obj_dict)
- except Exception as e:
- ok = False
- result = str(e)
-
- if not ok:
- for fail_cleanup_callable, cleanup_args in cleanup_on_failure:
- fail_cleanup_callable(*cleanup_args)
- fq_name_str = ':'.join(fq_name)
- self.config_object_error(None, fq_name_str, 'logical_interface', 'http_post', result)
- abort(404, result)
-
- rsp_body = {}
- rsp_body['name'] = name
- rsp_body['fq_name'] = fq_name
- rsp_body['uuid'] = obj_ids['uuid']
- rsp_body['href'] = self.generate_url('logical-interface', obj_ids['uuid'])
- if 'parent_type' in obj_dict:
- # non config-root child, send back parent uuid/href
- rsp_body['parent_uuid'] = parent_uuid
- rsp_body['parent_href'] = self.generate_url(parent_type, parent_uuid)
-
- try:
- self._extension_mgrs['resourceApi'].map_method('post_logical_interface_create', obj_dict)
- except Exception as e:
- pass
-
- return {'logical-interface': rsp_body}
- #end logical_interfaces_http_post
-
- def logical_interfaces_http_get(self):
- # gather list of uuids using 1. any specified anchors
- # 2. any specified filters
- # if not 'detail' return list with any specified 'fields'
- # if 'detail' return list with props+refs + any specified 'fields'
-
- env = request.headers.environ
- tenant_name = env.get(hdr_server_tenant(), 'default-project')
- parent_uuids = None
- back_ref_uuids = None
- obj_uuids = None
- if (('parent_fq_name_str' in request.query) and
- ('parent_type' in request.query)):
- parent_fq_name = request.query.parent_fq_name_str.split(':')
- parent_type = request.query.parent_type
- parent_uuids = [self._db_conn.fq_name_to_uuid(parent_type, parent_fq_name)]
- elif 'parent_id' in request.query:
- parent_ids = request.query.parent_id.split(',')
- parent_uuids = [str(uuid.UUID(p_uuid)) for p_uuid in parent_ids]
- if 'back_ref_id' in request.query:
- back_ref_ids = request.query.back_ref_id.split(',')
- back_ref_uuids = [str(uuid.UUID(b_uuid)) for b_uuid in back_ref_ids]
- if 'obj_uuids' in request.query:
- obj_uuids = request.query.obj_uuids.split(',')
-
- # common handling for all resource get
- (ok, result) = self._get_common(request, parent_uuids)
- if not ok:
- (code, msg) = result
- self.config_object_error(None, None, 'logical_interfaces', 'http_get_collection', msg)
- abort(code, msg)
-
- if 'count' in request.query:
- count = 'true' in request.query.count.lower()
- else:
- count = False
-
- filter_params = request.query.filters
- if filter_params:
- try:
- ff_key_vals = filter_params.split(',')
- ff_names = [ff.split('==')[0] for ff in ff_key_vals]
- ff_values = [ff.split('==')[1] for ff in ff_key_vals]
- filters = {'field_names': ff_names, 'field_values': ff_values}
- except Exception as e:
- abort(400, 'Invalid filter ' + filter_params)
- else:
- filters = None
- db_conn = self._db_conn
- (ok, result) = \
- db_conn.dbe_list('logical-interface', parent_uuids, back_ref_uuids, obj_uuids, count, filters)
- if not ok:
- self.config_object_error(None, None, 'logical_interfaces', 'http_get_collection', result)
- abort(404, result)
-
- # If only counting, return early
- if count:
- return {'logical-interfaces': {'count': result}}
-
- if 'detail' in request.query:
- detail = 'true' in request.query.detail.lower()
- else:
- detail = False
-
- fq_names_uuids = result
- obj_dicts = []
- if not detail:
- if not self.is_admin_request():
- obj_ids_list = [{'uuid': obj_uuid} for _, obj_uuid in fq_names_uuids]
- obj_fields = [u'id_perms']
- (ok, result) = db_conn.dbe_read_multi('logical-interface', obj_ids_list, obj_fields)
- if not ok:
- result = []
- for obj_result in result:
- # give chance for zk heartbeat/ping
- gevent.sleep(0)
- if obj_result['id_perms'].get('user_visible', True):
- obj_dict = {}
- obj_dict['uuid'] = obj_result['uuid']
- obj_dict['href'] = self.generate_url('logical-interface', obj_result['uuid'])
- obj_dict['fq_name'] = obj_result['fq_name']
- obj_dicts.append(obj_dict)
- else:
- for fq_name, obj_uuid in fq_names_uuids:
- # give chance for zk heartbeat/ping
- gevent.sleep(0)
- obj_dict = {}
- obj_dict['uuid'] = obj_uuid
- obj_dict['href'] = self.generate_url('logical-interface', obj_uuid)
- obj_dict['fq_name'] = fq_name
- obj_dicts.append(obj_dict)
- else: #detail
- obj_ids_list = [{'uuid': obj_uuid} for _, obj_uuid in fq_names_uuids]
- obj_fields = [u'logical_interface_vlan_tag', u'logical_interface_type', u'id_perms', u'display_name'] + ['virtual_machine_interface_refs']
- if 'fields' in request.query:
- obj_fields.extend(request.query.fields.split(','))
- (ok, result) = db_conn.dbe_read_multi('logical-interface', obj_ids_list, obj_fields)
-
- if not ok:
- result = []
-
- for obj_result in result:
- # give chance for zk heartbeat/ping
- gevent.sleep(0)
- obj_dict = {}
- obj_dict['name'] = obj_result['fq_name'][-1]
- obj_dict['href'] = self.generate_url('logical-interface', obj_result['uuid'])
- obj_dict.update(obj_result)
- if (obj_dict['id_perms'].get('user_visible', True) or
- self.is_admin_request()):
- obj_dicts.append({'logical-interface': obj_dict})
-
- return {'logical-interfaces': obj_dicts}
- #end logical_interfaces_http_get
-
- def _logical_interface_create_default_children(self, parent_obj):
- pass
- #end _logical_interface_create_default_children
-
- def _logical_interface_delete_default_children(self, parent_dict):
- pass
- #end _logical_interface_delete_default_children
-
- def route_table_http_get(self, id):
- try:
- self._extension_mgrs['resourceApi'].map_method('pre_route_table_read', id)
- except Exception as e:
- pass
-
- # TODO get vals from request out of the global ASAP
- etag = request.headers.get('If-None-Match')
- try:
- obj_type = self._db_conn.uuid_to_obj_type(id)
- except NoIdError:
- obj_type = None
- if obj_type != 'route_table':
- abort(404, 'No route-table object found for id %s' %(id))
- # common handling for all resource get
- (ok, result) = self._get_common(request, id)
- if not ok:
- (code, msg) = result
- self.config_object_error(id, None, 'route_table', 'http_get', msg)
- abort(code, msg)
-
- # type-specific hook
- r_class = self.get_resource_class('route-table')
- if r_class:
- r_class.http_get(id)
-
- db_conn = self._db_conn
- if etag:
- obj_ids = {'uuid': id}
- (ok, result) = db_conn.dbe_is_latest(obj_ids, etag.replace('"', ''))
- if not ok:
- # Not present in DB
- self.config_object_error(id, None, 'route_table', 'http_get', result)
- abort(404, result)
-
- is_latest = result
- if is_latest:
- # send Not-Modified, caches use this for read optimization
- response.status = 304
- return
- #end if etag
-
- obj_ids = {'uuid': id}
-
- # Generate field list for db layer
- properties = [u'routes', u'id_perms', u'display_name']
- references = []
- back_references = [u'project_back_refs', u'virtual_network_back_refs']
- children = []
- if 'fields' in request.query:
- obj_fields = request.query.fields.split(',')
- else: # default props + children + refs + backrefs
- obj_fields = properties + references
- if 'exclude_back_refs' not in request.query:
- obj_fields = obj_fields + back_references
- if 'exclude_children' not in request.query:
- obj_fields = obj_fields + children
-
- (ok, result) = db_conn.dbe_read('route-table', obj_ids, obj_fields)
- if not ok:
- # Not present in DB
- self.config_object_error(id, None, 'route_table', 'http_get', result)
- abort(404, result)
-
- # check visibility
- if (not result['id_perms'].get('user_visible', True) and
- not self.is_admin_request()):
- result = 'This object is not visible by users: %s' % id
- self.config_object_error(id, None, 'route_table', 'http_get', result)
- abort(404, result)
-
- rsp_body = {}
- rsp_body['uuid'] = id
- rsp_body['href'] = self.generate_url('route-table', id)
- rsp_body['name'] = result['fq_name'][-1]
- rsp_body.update(result)
- id_perms = result['id_perms']
- response.set_header('ETag', '"' + id_perms['last_modified'] + '"')
- try:
- self._extension_mgrs['resourceApi'].map_method('post_route_table_read', id, rsp_body)
- except Exception as e:
- pass
-
- return {'route-table': rsp_body}
- #end route_table_http_get
-
- def route_table_http_put(self, id):
- key = 'route-table'
- obj_dict = request.json[key]
-
- try:
- self._extension_mgrs['resourceApi'].map_method('pre_route_table_update', id, obj_dict)
- except Exception as e:
- pass
-
- db_conn = self._db_conn
- try:
- obj_type = db_conn.uuid_to_obj_type(id)
- if obj_type != 'route_table':
- abort(404, 'No route-table object found for id %s' %(id))
- fq_name = db_conn.uuid_to_fq_name(id)
- except NoIdError as e:
- abort(404, str(e))
- prop_dict = obj_dict.get('routes')
- if prop_dict:
- buf = cStringIO.StringIO()
- xx_routes = RouteTableType(**prop_dict)
- xx_routes.export(buf)
- node = etree.fromstring(buf.getvalue())
- xx_routes = RouteTableType()
- try:
- xx_routes.build(node)
- except Exception as e:
- abort(400, str(e))
- prop_dict = obj_dict.get('id_perms')
- if prop_dict:
- buf = cStringIO.StringIO()
- xx_id_perms = IdPermsType(**prop_dict)
- xx_id_perms.export(buf)
- node = etree.fromstring(buf.getvalue())
- xx_id_perms = IdPermsType()
- try:
- xx_id_perms.build(node)
- except Exception as e:
- abort(400, str(e))
- # common handling for all resource put
- (ok, result) = self._put_common(request, 'route_table', id, fq_name, obj_dict)
- if not ok:
- (code, msg) = result
- self.config_object_error(id, None, 'route_table', 'http_put', msg)
- abort(code, msg)
-
-
- # State modification starts from here. Ensure that cleanup is done for all state changes
- cleanup_on_failure = []
- # type-specific hook
- r_class = self.get_resource_class('route-table')
- if r_class:
- (ok, put_result) = r_class.http_put(id, fq_name, obj_dict, self._db_conn)
- if not ok:
- (code, msg) = put_result
- self.config_object_error(id, None, 'route_table', 'http_put', msg)
- abort(code, msg)
- callable = getattr(r_class, 'http_put_fail', None)
- if callable:
- cleanup_on_failure.append((callable, [id, fq_name, obj_dict, self._db_conn]))
-
- obj_ids = {'uuid': id}
- try:
- (ok, result) = db_conn.dbe_update('route-table', obj_ids, obj_dict)
- except Exception as e:
- ok = False
- result = str(e)
- if not ok:
- for fail_cleanup_callable, cleanup_args in cleanup_on_failure:
- fail_cleanup_callable(*cleanup_args)
- self.config_object_error(id, None, 'route_table', 'http_put', result)
- abort(404, result)
-
- rsp_body = {}
- rsp_body['uuid'] = id
- rsp_body['href'] = self.generate_url('route-table', id)
-
- try:
- self._extension_mgrs['resourceApi'].map_method('post_route_table_update', id, obj_dict)
- except Exception as e:
- pass
-
- return {'route-table': rsp_body}
- #end route_table_http_put
-
- def route_table_http_delete(self, id):
- db_conn = self._db_conn
- # if obj doesn't exist return early
- try:
- obj_type = db_conn.uuid_to_obj_type(id)
- if obj_type != 'route_table':
- abort(404, 'No route-table object found for id %s' %(id))
- _ = db_conn.uuid_to_fq_name(id)
- except NoIdError:
- abort(404, 'ID %s does not exist' %(id))
-
- try:
- self._extension_mgrs['resourceApi'].map_method('pre_route_table_delete', id)
- except Exception as e:
- pass
-
- # read in obj from db (accepting error) to get details of it
- obj_ids = {'uuid': id}
- back_references = [u'project_back_refs', u'virtual_network_back_refs']
- children = []
- obj_fields = children + back_references
- (read_ok, read_result) = db_conn.dbe_read('route-table', obj_ids, obj_fields)
- if not read_ok:
- if read_result.startswith('Unknown id:'):
- abort(404, 'ID %s does not exist' %(id))
- else:
- self.config_object_error(id, None, 'route_table', 'http_delete', read_result)
- # proceed down to delete the resource
-
- # common handling for all resource delete
- parent_type = read_result.get('parent_type', None)
- (ok, del_result) = self._delete_common(request, 'route_table', id, parent_type)
- if not ok:
- (code, msg) = del_result
- self.config_object_error(id, None, 'route_table', 'http_delete', msg)
- abort(code, msg)
-
- fq_name = read_result['fq_name']
- ifmap_id = cfgm_common.imid.get_ifmap_id_from_fq_name('route-table', fq_name)
- obj_ids['imid'] = ifmap_id
- if parent_type:
- parent_imid = cfgm_common.imid.get_ifmap_id_from_fq_name(parent_type, fq_name[:-1])
- obj_ids['parent_imid'] = parent_imid
-
- # State modification starts from here. Ensure that cleanup is done for all state changes
- cleanup_on_failure = []
-
- # type-specific hook
- r_class = self.get_resource_class('route-table')
- if r_class:
- if read_ok:
- # fail if non-default children or backrefs exist
- virtual_network_back_refs = read_result.get('virtual_network_back_refs', None)
- if virtual_network_back_refs:
- back_ref_urls = [back_ref_info['href'] for back_ref_info in read_result['virtual_network_back_refs']]
- back_ref_str = ', '.join(back_ref_urls)
- err_msg = 'Back-References from ' + back_ref_str + ' still exist'
- self.config_object_error(id, None, 'route_table', 'http_delete', err_msg)
- abort(409, err_msg)
-
-
- # Delete default children first
- self._route_table_delete_default_children(read_result)
-
- (ok, del_result) = r_class.http_delete(id, read_result, db_conn)
- if not ok:
- (code, msg) = del_result
- self.config_object_error(id, None, 'route_table', 'http_delete', msg)
- abort(code, msg)
- callable = getattr(r_class, 'http_delete_fail', None)
- if callable:
- cleanup_on_failure.append((callable, [id, read_result, db_conn]))
- #end if read_ok
-
- try:
- (ok, del_result) = db_conn.dbe_delete('route-table', obj_ids, read_result)
- except Exception as e:
- ok = False
- del_result = str(e)
- if not ok:
- for fail_cleanup_callable, cleanup_args in cleanup_on_failure:
- fail_cleanup_callable(*cleanup_args)
- self.config_object_error(id, None, 'route_table', 'http_delete', del_result)
- abort(409, del_result)
-
- try:
- self._extension_mgrs['resourceApi'].map_method('post_route_table_delete', id, read_result)
- except Exception as e:
- pass
-
- #end route_table_http_delete
-
- def route_tables_http_post(self):
- key = 'route-table'
- obj_dict = request.json[key]
- self._post_validate(key, obj_dict=obj_dict)
- fq_name = obj_dict['fq_name']
-
- try:
- self._extension_mgrs['resourceApi'].map_method('pre_route_table_create', obj_dict)
- except Exception as e:
- pass
-
- prop_dict = obj_dict.get('routes')
- if prop_dict:
- buf = cStringIO.StringIO()
- xx_routes = RouteTableType(**prop_dict)
- xx_routes.export(buf)
- node = etree.fromstring(buf.getvalue())
- xx_routes = RouteTableType()
- try:
- xx_routes.build(node)
- except Exception as e:
- abort(400, str(e))
- prop_dict = obj_dict.get('id_perms')
- if prop_dict:
- buf = cStringIO.StringIO()
- xx_id_perms = IdPermsType(**prop_dict)
- xx_id_perms.export(buf)
- node = etree.fromstring(buf.getvalue())
- xx_id_perms = IdPermsType()
- try:
- xx_id_perms.build(node)
- except Exception as e:
- abort(400, str(e))
- # common handling for all resource create
- (ok, result) = self._post_common(request, 'route-table', obj_dict)
- if not ok:
- (code, msg) = result
- fq_name_str = ':'.join(obj_dict.get('fq_name', []))
- self.config_object_error(None, fq_name_str, 'route_table', 'http_post', msg)
- abort(code, msg)
-
- name = obj_dict['fq_name'][-1]
- fq_name = obj_dict['fq_name']
-
- db_conn = self._db_conn
-
- # if client gave parent_type of config-root, ignore and remove
- if 'parent_type' in obj_dict and obj_dict['parent_type'] == 'config-root':
- del obj_dict['parent_type']
-
- if 'parent_type' in obj_dict:
- # non config-root child, verify parent exists
- parent_type = obj_dict['parent_type']
- parent_fq_name = obj_dict['fq_name'][:-1]
- try:
- parent_uuid = self._db_conn.fq_name_to_uuid(parent_type, parent_fq_name)
- (ok, status) = self._permissions.check_perms_write(request, parent_uuid)
- if not ok:
- (code, err_msg) = status
- abort(code, err_msg)
- self._permissions.set_user_role(request, obj_dict)
- except NoIdError:
- err_msg = 'Parent ' + pformat(parent_fq_name) + ' type ' + parent_type + ' does not exist'
- fq_name_str = ':'.join(parent_fq_name)
- self.config_object_error(None, fq_name_str, 'route_table', 'http_post', err_msg)
- abort(400, err_msg)
-
-
- # State modification starts from here. Ensure that cleanup is done for all state changes
- cleanup_on_failure = []
- # Alloc and Store id-mappings before creating entry on pubsub store.
- # Else a subscriber can ask for an id mapping before we have stored it
- uuid_requested = result
- (ok, result) = db_conn.dbe_alloc('route-table', obj_dict, uuid_requested)
- if not ok:
- (code, msg) = result
- fq_name_str = ':'.join(obj_dict['fq_name'])
- self.config_object_error(None, fq_name_str, 'route_table', 'http_post', result)
- abort(code, msg)
- cleanup_on_failure.append((db_conn.dbe_release, ['route_table', fq_name]))
-
- obj_ids = result
-
- env = request.headers.environ
- tenant_name = env.get(hdr_server_tenant(), 'default-project')
-
-
- # type-specific hook
- r_class = self.get_resource_class('route-table')
- if r_class:
- try:
- (ok, result) = r_class.http_post_collection(tenant_name, obj_dict, db_conn)
- except Exception as e:
- ok = False
- result = (500, str(e))
- if not ok:
- for fail_cleanup_callable, cleanup_args in cleanup_on_failure:
- fail_cleanup_callable(*cleanup_args)
- (code, msg) = result
- fq_name_str = ':'.join(fq_name)
- self.config_object_error(None, fq_name_str, 'route_table', 'http_post', msg)
- abort(code, msg)
- callable = getattr(r_class, 'http_post_collection_fail', None)
- if callable:
- cleanup_on_failure.append((callable, [tenant_name, obj_dict, db_conn]))
-
- try:
- (ok, result) = \
- db_conn.dbe_create('route-table', obj_ids, obj_dict)
- except Exception as e:
- ok = False
- result = str(e)
-
- if not ok:
- for fail_cleanup_callable, cleanup_args in cleanup_on_failure:
- fail_cleanup_callable(*cleanup_args)
- fq_name_str = ':'.join(fq_name)
- self.config_object_error(None, fq_name_str, 'route_table', 'http_post', result)
- abort(404, result)
-
- rsp_body = {}
- rsp_body['name'] = name
- rsp_body['fq_name'] = fq_name
- rsp_body['uuid'] = obj_ids['uuid']
- rsp_body['href'] = self.generate_url('route-table', obj_ids['uuid'])
- if 'parent_type' in obj_dict:
- # non config-root child, send back parent uuid/href
- rsp_body['parent_uuid'] = parent_uuid
- rsp_body['parent_href'] = self.generate_url(parent_type, parent_uuid)
-
- try:
- self._extension_mgrs['resourceApi'].map_method('post_route_table_create', obj_dict)
- except Exception as e:
- pass
-
- return {'route-table': rsp_body}
- #end route_tables_http_post
-
- def route_tables_http_get(self):
- # gather list of uuids using 1. any specified anchors
- # 2. any specified filters
- # if not 'detail' return list with any specified 'fields'
- # if 'detail' return list with props+refs + any specified 'fields'
-
- env = request.headers.environ
- tenant_name = env.get(hdr_server_tenant(), 'default-project')
- parent_uuids = None
- back_ref_uuids = None
- obj_uuids = None
- if (('parent_fq_name_str' in request.query) and
- ('parent_type' in request.query)):
- parent_fq_name = request.query.parent_fq_name_str.split(':')
- parent_type = request.query.parent_type
- parent_uuids = [self._db_conn.fq_name_to_uuid(parent_type, parent_fq_name)]
- elif 'parent_id' in request.query:
- parent_ids = request.query.parent_id.split(',')
- parent_uuids = [str(uuid.UUID(p_uuid)) for p_uuid in parent_ids]
- if 'back_ref_id' in request.query:
- back_ref_ids = request.query.back_ref_id.split(',')
- back_ref_uuids = [str(uuid.UUID(b_uuid)) for b_uuid in back_ref_ids]
- if 'obj_uuids' in request.query:
- obj_uuids = request.query.obj_uuids.split(',')
-
- # common handling for all resource get
- (ok, result) = self._get_common(request, parent_uuids)
- if not ok:
- (code, msg) = result
- self.config_object_error(None, None, 'route_tables', 'http_get_collection', msg)
- abort(code, msg)
-
- if 'count' in request.query:
- count = 'true' in request.query.count.lower()
- else:
- count = False
-
- filter_params = request.query.filters
- if filter_params:
- try:
- ff_key_vals = filter_params.split(',')
- ff_names = [ff.split('==')[0] for ff in ff_key_vals]
- ff_values = [ff.split('==')[1] for ff in ff_key_vals]
- filters = {'field_names': ff_names, 'field_values': ff_values}
- except Exception as e:
- abort(400, 'Invalid filter ' + filter_params)
- else:
- filters = None
- db_conn = self._db_conn
- (ok, result) = \
- db_conn.dbe_list('route-table', parent_uuids, back_ref_uuids, obj_uuids, count, filters)
- if not ok:
- self.config_object_error(None, None, 'route_tables', 'http_get_collection', result)
- abort(404, result)
-
- # If only counting, return early
- if count:
- return {'route-tables': {'count': result}}
-
- if 'detail' in request.query:
- detail = 'true' in request.query.detail.lower()
- else:
- detail = False
-
- fq_names_uuids = result
- obj_dicts = []
- if not detail:
- if not self.is_admin_request():
- obj_ids_list = [{'uuid': obj_uuid} for _, obj_uuid in fq_names_uuids]
- obj_fields = [u'id_perms']
- (ok, result) = db_conn.dbe_read_multi('route-table', obj_ids_list, obj_fields)
- if not ok:
- result = []
- for obj_result in result:
- # give chance for zk heartbeat/ping
- gevent.sleep(0)
- if obj_result['id_perms'].get('user_visible', True):
- obj_dict = {}
- obj_dict['uuid'] = obj_result['uuid']
- obj_dict['href'] = self.generate_url('route-table', obj_result['uuid'])
- obj_dict['fq_name'] = obj_result['fq_name']
- obj_dicts.append(obj_dict)
- else:
- for fq_name, obj_uuid in fq_names_uuids:
- # give chance for zk heartbeat/ping
- gevent.sleep(0)
- obj_dict = {}
- obj_dict['uuid'] = obj_uuid
- obj_dict['href'] = self.generate_url('route-table', obj_uuid)
- obj_dict['fq_name'] = fq_name
- obj_dicts.append(obj_dict)
- else: #detail
- obj_ids_list = [{'uuid': obj_uuid} for _, obj_uuid in fq_names_uuids]
- obj_fields = [u'routes', u'id_perms', u'display_name'] + []
- if 'fields' in request.query:
- obj_fields.extend(request.query.fields.split(','))
- (ok, result) = db_conn.dbe_read_multi('route-table', obj_ids_list, obj_fields)
-
- if not ok:
- result = []
-
- for obj_result in result:
- # give chance for zk heartbeat/ping
- gevent.sleep(0)
- obj_dict = {}
- obj_dict['name'] = obj_result['fq_name'][-1]
- obj_dict['href'] = self.generate_url('route-table', obj_result['uuid'])
- obj_dict.update(obj_result)
- if (obj_dict['id_perms'].get('user_visible', True) or
- self.is_admin_request()):
- obj_dicts.append({'route-table': obj_dict})
-
- return {'route-tables': obj_dicts}
- #end route_tables_http_get
-
- def _route_table_create_default_children(self, parent_obj):
- pass
- #end _route_table_create_default_children
-
- def _route_table_delete_default_children(self, parent_dict):
- pass
- #end _route_table_delete_default_children
-
- def physical_interface_http_get(self, id):
- try:
- self._extension_mgrs['resourceApi'].map_method('pre_physical_interface_read', id)
- except Exception as e:
- pass
-
- # TODO get vals from request out of the global ASAP
- etag = request.headers.get('If-None-Match')
- try:
- obj_type = self._db_conn.uuid_to_obj_type(id)
- except NoIdError:
- obj_type = None
- if obj_type != 'physical_interface':
- abort(404, 'No physical-interface object found for id %s' %(id))
- # common handling for all resource get
- (ok, result) = self._get_common(request, id)
- if not ok:
- (code, msg) = result
- self.config_object_error(id, None, 'physical_interface', 'http_get', msg)
- abort(code, msg)
-
- # type-specific hook
- r_class = self.get_resource_class('physical-interface')
- if r_class:
- r_class.http_get(id)
-
- db_conn = self._db_conn
- if etag:
- obj_ids = {'uuid': id}
- (ok, result) = db_conn.dbe_is_latest(obj_ids, etag.replace('"', ''))
- if not ok:
- # Not present in DB
- self.config_object_error(id, None, 'physical_interface', 'http_get', result)
- abort(404, result)
-
- is_latest = result
- if is_latest:
- # send Not-Modified, caches use this for read optimization
- response.status = 304
- return
- #end if etag
-
- obj_ids = {'uuid': id}
-
- # Generate field list for db layer
- properties = [u'id_perms', u'display_name']
- references = []
- back_references = [u'physical_router_back_refs']
- children = [u'logical_interfaces']
- if 'fields' in request.query:
- obj_fields = request.query.fields.split(',')
- else: # default props + children + refs + backrefs
- obj_fields = properties + references
- if 'exclude_back_refs' not in request.query:
- obj_fields = obj_fields + back_references
- if 'exclude_children' not in request.query:
- obj_fields = obj_fields + children
-
- (ok, result) = db_conn.dbe_read('physical-interface', obj_ids, obj_fields)
- if not ok:
- # Not present in DB
- self.config_object_error(id, None, 'physical_interface', 'http_get', result)
- abort(404, result)
-
- # check visibility
- if (not result['id_perms'].get('user_visible', True) and
- not self.is_admin_request()):
- result = 'This object is not visible by users: %s' % id
- self.config_object_error(id, None, 'physical_interface', 'http_get', result)
- abort(404, result)
-
- rsp_body = {}
- rsp_body['uuid'] = id
- rsp_body['href'] = self.generate_url('physical-interface', id)
- rsp_body['name'] = result['fq_name'][-1]
- rsp_body.update(result)
- id_perms = result['id_perms']
- response.set_header('ETag', '"' + id_perms['last_modified'] + '"')
- try:
- self._extension_mgrs['resourceApi'].map_method('post_physical_interface_read', id, rsp_body)
- except Exception as e:
- pass
-
- return {'physical-interface': rsp_body}
- #end physical_interface_http_get
-
- def physical_interface_http_put(self, id):
- key = 'physical-interface'
- obj_dict = request.json[key]
-
- try:
- self._extension_mgrs['resourceApi'].map_method('pre_physical_interface_update', id, obj_dict)
- except Exception as e:
- pass
-
- db_conn = self._db_conn
- try:
- obj_type = db_conn.uuid_to_obj_type(id)
- if obj_type != 'physical_interface':
- abort(404, 'No physical-interface object found for id %s' %(id))
- fq_name = db_conn.uuid_to_fq_name(id)
- except NoIdError as e:
- abort(404, str(e))
- prop_dict = obj_dict.get('id_perms')
- if prop_dict:
- buf = cStringIO.StringIO()
- xx_id_perms = IdPermsType(**prop_dict)
- xx_id_perms.export(buf)
- node = etree.fromstring(buf.getvalue())
- xx_id_perms = IdPermsType()
- try:
- xx_id_perms.build(node)
- except Exception as e:
- abort(400, str(e))
- # common handling for all resource put
- (ok, result) = self._put_common(request, 'physical_interface', id, fq_name, obj_dict)
- if not ok:
- (code, msg) = result
- self.config_object_error(id, None, 'physical_interface', 'http_put', msg)
- abort(code, msg)
-
- # Validate perms
- objtype_list = [u'logical_interface']
- for obj_type in objtype_list:
- refs = obj_dict.get('%s_refs'%(obj_type), None)
- if refs:
- for ref in refs:
- try:
- ref_uuid = db_conn.fq_name_to_uuid(obj_type, ref['to'])
- except NoIdError as e:
- abort(404, str(e))
- (ok, status) = self._permissions.check_perms_link(request, ref_uuid)
- if not ok:
- (code, err_msg) = status
- abort(code, err_msg)
-
- # State modification starts from here. Ensure that cleanup is done for all state changes
- cleanup_on_failure = []
- # type-specific hook
- r_class = self.get_resource_class('physical-interface')
- if r_class:
- (ok, put_result) = r_class.http_put(id, fq_name, obj_dict, self._db_conn)
- if not ok:
- (code, msg) = put_result
- self.config_object_error(id, None, 'physical_interface', 'http_put', msg)
- abort(code, msg)
- callable = getattr(r_class, 'http_put_fail', None)
- if callable:
- cleanup_on_failure.append((callable, [id, fq_name, obj_dict, self._db_conn]))
-
- obj_ids = {'uuid': id}
- try:
- (ok, result) = db_conn.dbe_update('physical-interface', obj_ids, obj_dict)
- except Exception as e:
- ok = False
- result = str(e)
- if not ok:
- for fail_cleanup_callable, cleanup_args in cleanup_on_failure:
- fail_cleanup_callable(*cleanup_args)
- self.config_object_error(id, None, 'physical_interface', 'http_put', result)
- abort(404, result)
-
- rsp_body = {}
- rsp_body['uuid'] = id
- rsp_body['href'] = self.generate_url('physical-interface', id)
-
- try:
- self._extension_mgrs['resourceApi'].map_method('post_physical_interface_update', id, obj_dict)
- except Exception as e:
- pass
-
- return {'physical-interface': rsp_body}
- #end physical_interface_http_put
-
- def physical_interface_http_delete(self, id):
- db_conn = self._db_conn
- # if obj doesn't exist return early
- try:
- obj_type = db_conn.uuid_to_obj_type(id)
- if obj_type != 'physical_interface':
- abort(404, 'No physical-interface object found for id %s' %(id))
- _ = db_conn.uuid_to_fq_name(id)
- except NoIdError:
- abort(404, 'ID %s does not exist' %(id))
-
- try:
- self._extension_mgrs['resourceApi'].map_method('pre_physical_interface_delete', id)
- except Exception as e:
- pass
-
- # read in obj from db (accepting error) to get details of it
- obj_ids = {'uuid': id}
- back_references = [u'physical_router_back_refs']
- children = [u'logical_interfaces']
- obj_fields = children + back_references
- (read_ok, read_result) = db_conn.dbe_read('physical-interface', obj_ids, obj_fields)
- if not read_ok:
- if read_result.startswith('Unknown id:'):
- abort(404, 'ID %s does not exist' %(id))
- else:
- self.config_object_error(id, None, 'physical_interface', 'http_delete', read_result)
- # proceed down to delete the resource
-
- # common handling for all resource delete
- parent_type = read_result.get('parent_type', None)
- (ok, del_result) = self._delete_common(request, 'physical_interface', id, parent_type)
- if not ok:
- (code, msg) = del_result
- self.config_object_error(id, None, 'physical_interface', 'http_delete', msg)
- abort(code, msg)
-
- fq_name = read_result['fq_name']
- ifmap_id = cfgm_common.imid.get_ifmap_id_from_fq_name('physical-interface', fq_name)
- obj_ids['imid'] = ifmap_id
- if parent_type:
- parent_imid = cfgm_common.imid.get_ifmap_id_from_fq_name(parent_type, fq_name[:-1])
- obj_ids['parent_imid'] = parent_imid
-
- # State modification starts from here. Ensure that cleanup is done for all state changes
- cleanup_on_failure = []
-
- # type-specific hook
- r_class = self.get_resource_class('physical-interface')
- if r_class:
- if read_ok:
- # fail if non-default children or backrefs exist
- logical_interfaces = read_result.get('logical_interfaces', None)
- if logical_interfaces:
- has_infos = read_result['logical_interfaces']
- if ((len(has_infos) > 1) or
- (len(has_infos) == 1 and has_infos[0]['to'][-1] != 'default-logical-interface')):
- has_urls = [has_info['href'] for has_info in has_infos]
- has_str = ', '.join(has_urls)
- err_msg = 'Children ' + has_str + ' still exist'
- self.config_object_error(id, None, 'physical_interface', 'http_delete', err_msg)
- abort(409, err_msg)
-
-
- # Delete default children first
- self._physical_interface_delete_default_children(read_result)
-
- (ok, del_result) = r_class.http_delete(id, read_result, db_conn)
- if not ok:
- (code, msg) = del_result
- self.config_object_error(id, None, 'physical_interface', 'http_delete', msg)
- abort(code, msg)
- callable = getattr(r_class, 'http_delete_fail', None)
- if callable:
- cleanup_on_failure.append((callable, [id, read_result, db_conn]))
- #end if read_ok
-
- try:
- (ok, del_result) = db_conn.dbe_delete('physical-interface', obj_ids, read_result)
- except Exception as e:
- ok = False
- del_result = str(e)
- if not ok:
- for fail_cleanup_callable, cleanup_args in cleanup_on_failure:
- fail_cleanup_callable(*cleanup_args)
- self.config_object_error(id, None, 'physical_interface', 'http_delete', del_result)
- abort(409, del_result)
-
- try:
- self._extension_mgrs['resourceApi'].map_method('post_physical_interface_delete', id, read_result)
- except Exception as e:
- pass
-
- #end physical_interface_http_delete
-
- def physical_interfaces_http_post(self):
- key = 'physical-interface'
- obj_dict = request.json[key]
- self._post_validate(key, obj_dict=obj_dict)
- fq_name = obj_dict['fq_name']
-
- try:
- self._extension_mgrs['resourceApi'].map_method('pre_physical_interface_create', obj_dict)
- except Exception as e:
- pass
-
- prop_dict = obj_dict.get('id_perms')
- if prop_dict:
- buf = cStringIO.StringIO()
- xx_id_perms = IdPermsType(**prop_dict)
- xx_id_perms.export(buf)
- node = etree.fromstring(buf.getvalue())
- xx_id_perms = IdPermsType()
- try:
- xx_id_perms.build(node)
- except Exception as e:
- abort(400, str(e))
- # common handling for all resource create
- (ok, result) = self._post_common(request, 'physical-interface', obj_dict)
- if not ok:
- (code, msg) = result
- fq_name_str = ':'.join(obj_dict.get('fq_name', []))
- self.config_object_error(None, fq_name_str, 'physical_interface', 'http_post', msg)
- abort(code, msg)
-
- name = obj_dict['fq_name'][-1]
- fq_name = obj_dict['fq_name']
-
- db_conn = self._db_conn
-
- # if client gave parent_type of config-root, ignore and remove
- if 'parent_type' in obj_dict and obj_dict['parent_type'] == 'config-root':
- del obj_dict['parent_type']
-
- if 'parent_type' in obj_dict:
- # non config-root child, verify parent exists
- parent_type = obj_dict['parent_type']
- parent_fq_name = obj_dict['fq_name'][:-1]
- try:
- parent_uuid = self._db_conn.fq_name_to_uuid(parent_type, parent_fq_name)
- (ok, status) = self._permissions.check_perms_write(request, parent_uuid)
- if not ok:
- (code, err_msg) = status
- abort(code, err_msg)
- self._permissions.set_user_role(request, obj_dict)
- except NoIdError:
- err_msg = 'Parent ' + pformat(parent_fq_name) + ' type ' + parent_type + ' does not exist'
- fq_name_str = ':'.join(parent_fq_name)
- self.config_object_error(None, fq_name_str, 'physical_interface', 'http_post', err_msg)
- abort(400, err_msg)
-
- # Validate perms
- objtype_list = [u'logical_interface']
- for obj_type in objtype_list:
- refs = obj_dict.get('%s_refs'%(obj_type), None)
- if refs:
- for ref in refs:
- ref_uuid = db_conn.fq_name_to_uuid(obj_type, ref['to'])
- (ok, status) = self._permissions.check_perms_link(request, ref_uuid)
- if not ok:
- (code, err_msg) = status
- abort(code, err_msg)
-
- # State modification starts from here. Ensure that cleanup is done for all state changes
- cleanup_on_failure = []
- # Alloc and Store id-mappings before creating entry on pubsub store.
- # Else a subscriber can ask for an id mapping before we have stored it
- uuid_requested = result
- (ok, result) = db_conn.dbe_alloc('physical-interface', obj_dict, uuid_requested)
- if not ok:
- (code, msg) = result
- fq_name_str = ':'.join(obj_dict['fq_name'])
- self.config_object_error(None, fq_name_str, 'physical_interface', 'http_post', result)
- abort(code, msg)
- cleanup_on_failure.append((db_conn.dbe_release, ['physical_interface', fq_name]))
-
- obj_ids = result
-
- env = request.headers.environ
- tenant_name = env.get(hdr_server_tenant(), 'default-project')
-
-
- # type-specific hook
- r_class = self.get_resource_class('physical-interface')
- if r_class:
- try:
- (ok, result) = r_class.http_post_collection(tenant_name, obj_dict, db_conn)
- except Exception as e:
- ok = False
- result = (500, str(e))
- if not ok:
- for fail_cleanup_callable, cleanup_args in cleanup_on_failure:
- fail_cleanup_callable(*cleanup_args)
- (code, msg) = result
- fq_name_str = ':'.join(fq_name)
- self.config_object_error(None, fq_name_str, 'physical_interface', 'http_post', msg)
- abort(code, msg)
- callable = getattr(r_class, 'http_post_collection_fail', None)
- if callable:
- cleanup_on_failure.append((callable, [tenant_name, obj_dict, db_conn]))
-
- try:
- (ok, result) = \
- db_conn.dbe_create('physical-interface', obj_ids, obj_dict)
- except Exception as e:
- ok = False
- result = str(e)
-
- if not ok:
- for fail_cleanup_callable, cleanup_args in cleanup_on_failure:
- fail_cleanup_callable(*cleanup_args)
- fq_name_str = ':'.join(fq_name)
- self.config_object_error(None, fq_name_str, 'physical_interface', 'http_post', result)
- abort(404, result)
-
- rsp_body = {}
- rsp_body['name'] = name
- rsp_body['fq_name'] = fq_name
- rsp_body['uuid'] = obj_ids['uuid']
- rsp_body['href'] = self.generate_url('physical-interface', obj_ids['uuid'])
- if 'parent_type' in obj_dict:
- # non config-root child, send back parent uuid/href
- rsp_body['parent_uuid'] = parent_uuid
- rsp_body['parent_href'] = self.generate_url(parent_type, parent_uuid)
-
- try:
- self._extension_mgrs['resourceApi'].map_method('post_physical_interface_create', obj_dict)
- except Exception as e:
- pass
-
- return {'physical-interface': rsp_body}
- #end physical_interfaces_http_post
-
- def physical_interfaces_http_get(self):
- # gather list of uuids using 1. any specified anchors
- # 2. any specified filters
- # if not 'detail' return list with any specified 'fields'
- # if 'detail' return list with props+refs + any specified 'fields'
-
- env = request.headers.environ
- tenant_name = env.get(hdr_server_tenant(), 'default-project')
- parent_uuids = None
- back_ref_uuids = None
- obj_uuids = None
- if (('parent_fq_name_str' in request.query) and
- ('parent_type' in request.query)):
- parent_fq_name = request.query.parent_fq_name_str.split(':')
- parent_type = request.query.parent_type
- parent_uuids = [self._db_conn.fq_name_to_uuid(parent_type, parent_fq_name)]
- elif 'parent_id' in request.query:
- parent_ids = request.query.parent_id.split(',')
- parent_uuids = [str(uuid.UUID(p_uuid)) for p_uuid in parent_ids]
- if 'back_ref_id' in request.query:
- back_ref_ids = request.query.back_ref_id.split(',')
- back_ref_uuids = [str(uuid.UUID(b_uuid)) for b_uuid in back_ref_ids]
- if 'obj_uuids' in request.query:
- obj_uuids = request.query.obj_uuids.split(',')
-
- # common handling for all resource get
- (ok, result) = self._get_common(request, parent_uuids)
- if not ok:
- (code, msg) = result
- self.config_object_error(None, None, 'physical_interfaces', 'http_get_collection', msg)
- abort(code, msg)
-
- if 'count' in request.query:
- count = 'true' in request.query.count.lower()
- else:
- count = False
-
- filter_params = request.query.filters
- if filter_params:
- try:
- ff_key_vals = filter_params.split(',')
- ff_names = [ff.split('==')[0] for ff in ff_key_vals]
- ff_values = [ff.split('==')[1] for ff in ff_key_vals]
- filters = {'field_names': ff_names, 'field_values': ff_values}
- except Exception as e:
- abort(400, 'Invalid filter ' + filter_params)
- else:
- filters = None
- db_conn = self._db_conn
- (ok, result) = \
- db_conn.dbe_list('physical-interface', parent_uuids, back_ref_uuids, obj_uuids, count, filters)
- if not ok:
- self.config_object_error(None, None, 'physical_interfaces', 'http_get_collection', result)
- abort(404, result)
-
- # If only counting, return early
- if count:
- return {'physical-interfaces': {'count': result}}
-
- if 'detail' in request.query:
- detail = 'true' in request.query.detail.lower()
- else:
- detail = False
-
- fq_names_uuids = result
- obj_dicts = []
- if not detail:
- if not self.is_admin_request():
- obj_ids_list = [{'uuid': obj_uuid} for _, obj_uuid in fq_names_uuids]
- obj_fields = [u'id_perms']
- (ok, result) = db_conn.dbe_read_multi('physical-interface', obj_ids_list, obj_fields)
- if not ok:
- result = []
- for obj_result in result:
- # give chance for zk heartbeat/ping
- gevent.sleep(0)
- if obj_result['id_perms'].get('user_visible', True):
- obj_dict = {}
- obj_dict['uuid'] = obj_result['uuid']
- obj_dict['href'] = self.generate_url('physical-interface', obj_result['uuid'])
- obj_dict['fq_name'] = obj_result['fq_name']
- obj_dicts.append(obj_dict)
- else:
- for fq_name, obj_uuid in fq_names_uuids:
- # give chance for zk heartbeat/ping
- gevent.sleep(0)
- obj_dict = {}
- obj_dict['uuid'] = obj_uuid
- obj_dict['href'] = self.generate_url('physical-interface', obj_uuid)
- obj_dict['fq_name'] = fq_name
- obj_dicts.append(obj_dict)
- else: #detail
- obj_ids_list = [{'uuid': obj_uuid} for _, obj_uuid in fq_names_uuids]
- obj_fields = [u'id_perms', u'display_name'] + []
- if 'fields' in request.query:
- obj_fields.extend(request.query.fields.split(','))
- (ok, result) = db_conn.dbe_read_multi('physical-interface', obj_ids_list, obj_fields)
-
- if not ok:
- result = []
-
- for obj_result in result:
- # give chance for zk heartbeat/ping
- gevent.sleep(0)
- obj_dict = {}
- obj_dict['name'] = obj_result['fq_name'][-1]
- obj_dict['href'] = self.generate_url('physical-interface', obj_result['uuid'])
- obj_dict.update(obj_result)
- if (obj_dict['id_perms'].get('user_visible', True) or
- self.is_admin_request()):
- obj_dicts.append({'physical-interface': obj_dict})
-
- return {'physical-interfaces': obj_dicts}
- #end physical_interfaces_http_get
-
- def _physical_interface_create_default_children(self, parent_obj):
- # Create a default child only if provisioned for
- r_class = self.get_resource_class('logical-interface')
- if r_class and r_class.generate_default_instance:
- child_obj = LogicalInterface(parent_obj = parent_obj)
- child_dict = child_obj.__dict__
- fq_name = child_dict['fq_name']
- child_dict['id_perms'] = self._get_default_id_perms('logical-interface')
-
- db_conn = self._db_conn
- (ok, result) = db_conn.dbe_alloc('logical-interface', child_dict)
- if not ok:
- return (ok, result)
-
- obj_ids = result
- db_conn.dbe_create('logical-interface', obj_ids, child_dict)
- self._logical_interface_create_default_children(child_obj)
-
- pass
- #end _physical_interface_create_default_children
-
- def _physical_interface_delete_default_children(self, parent_dict):
- # Delete a default child only if provisioned for
- r_class = self.get_resource_class('logical-interface')
- if r_class and r_class.generate_default_instance:
- # first locate default child then delete it
- has_infos = parent_dict.get('logical_interfaces')
- if has_infos:
- for has_info in has_infos:
- if has_info['to'][-1] == 'default-logical-interface':
- default_child_id = has_info['href'].split('/')[-1]
- self.logical_interface_http_delete(default_child_id)
- break
-
- pass
- #end _physical_interface_delete_default_children
-
- def access_control_list_http_get(self, id):
- try:
- self._extension_mgrs['resourceApi'].map_method('pre_access_control_list_read', id)
- except Exception as e:
- pass
-
- # TODO get vals from request out of the global ASAP
- etag = request.headers.get('If-None-Match')
- try:
- obj_type = self._db_conn.uuid_to_obj_type(id)
- except NoIdError:
- obj_type = None
- if obj_type != 'access_control_list':
- abort(404, 'No access-control-list object found for id %s' %(id))
- # common handling for all resource get
- (ok, result) = self._get_common(request, id)
- if not ok:
- (code, msg) = result
- self.config_object_error(id, None, 'access_control_list', 'http_get', msg)
- abort(code, msg)
-
- # type-specific hook
- r_class = self.get_resource_class('access-control-list')
- if r_class:
- r_class.http_get(id)
-
- db_conn = self._db_conn
- if etag:
- obj_ids = {'uuid': id}
- (ok, result) = db_conn.dbe_is_latest(obj_ids, etag.replace('"', ''))
- if not ok:
- # Not present in DB
- self.config_object_error(id, None, 'access_control_list', 'http_get', result)
- abort(404, result)
-
- is_latest = result
- if is_latest:
- # send Not-Modified, caches use this for read optimization
- response.status = 304
- return
- #end if etag
-
- obj_ids = {'uuid': id}
-
- # Generate field list for db layer
- properties = [u'access_control_list_entries', u'id_perms', u'display_name']
- references = []
- back_references = [u'virtual_network_back_refs', u'security_group_back_refs']
- children = []
- if 'fields' in request.query:
- obj_fields = request.query.fields.split(',')
- else: # default props + children + refs + backrefs
- obj_fields = properties + references
- if 'exclude_back_refs' not in request.query:
- obj_fields = obj_fields + back_references
- if 'exclude_children' not in request.query:
- obj_fields = obj_fields + children
-
- (ok, result) = db_conn.dbe_read('access-control-list', obj_ids, obj_fields)
- if not ok:
- # Not present in DB
- self.config_object_error(id, None, 'access_control_list', 'http_get', result)
- abort(404, result)
-
- # check visibility
- if (not result['id_perms'].get('user_visible', True) and
- not self.is_admin_request()):
- result = 'This object is not visible by users: %s' % id
- self.config_object_error(id, None, 'access_control_list', 'http_get', result)
- abort(404, result)
-
- rsp_body = {}
- rsp_body['uuid'] = id
- rsp_body['href'] = self.generate_url('access-control-list', id)
- rsp_body['name'] = result['fq_name'][-1]
- rsp_body.update(result)
- id_perms = result['id_perms']
- response.set_header('ETag', '"' + id_perms['last_modified'] + '"')
- try:
- self._extension_mgrs['resourceApi'].map_method('post_access_control_list_read', id, rsp_body)
- except Exception as e:
- pass
-
- return {'access-control-list': rsp_body}
- #end access_control_list_http_get
-
- def access_control_list_http_put(self, id):
- key = 'access-control-list'
- obj_dict = request.json[key]
-
- try:
- self._extension_mgrs['resourceApi'].map_method('pre_access_control_list_update', id, obj_dict)
- except Exception as e:
- pass
-
- db_conn = self._db_conn
- try:
- obj_type = db_conn.uuid_to_obj_type(id)
- if obj_type != 'access_control_list':
- abort(404, 'No access-control-list object found for id %s' %(id))
- fq_name = db_conn.uuid_to_fq_name(id)
- except NoIdError as e:
- abort(404, str(e))
- prop_dict = obj_dict.get('access_control_list_entries')
- if prop_dict:
- buf = cStringIO.StringIO()
- xx_access_control_list_entries = AclEntriesType(**prop_dict)
- xx_access_control_list_entries.export(buf)
- node = etree.fromstring(buf.getvalue())
- xx_access_control_list_entries = AclEntriesType()
- try:
- xx_access_control_list_entries.build(node)
- except Exception as e:
- abort(400, str(e))
- prop_dict = obj_dict.get('id_perms')
- if prop_dict:
- buf = cStringIO.StringIO()
- xx_id_perms = IdPermsType(**prop_dict)
- xx_id_perms.export(buf)
- node = etree.fromstring(buf.getvalue())
- xx_id_perms = IdPermsType()
- try:
- xx_id_perms.build(node)
- except Exception as e:
- abort(400, str(e))
- # common handling for all resource put
- (ok, result) = self._put_common(request, 'access_control_list', id, fq_name, obj_dict)
- if not ok:
- (code, msg) = result
- self.config_object_error(id, None, 'access_control_list', 'http_put', msg)
- abort(code, msg)
-
-
- # State modification starts from here. Ensure that cleanup is done for all state changes
- cleanup_on_failure = []
- # type-specific hook
- r_class = self.get_resource_class('access-control-list')
- if r_class:
- (ok, put_result) = r_class.http_put(id, fq_name, obj_dict, self._db_conn)
- if not ok:
- (code, msg) = put_result
- self.config_object_error(id, None, 'access_control_list', 'http_put', msg)
- abort(code, msg)
- callable = getattr(r_class, 'http_put_fail', None)
- if callable:
- cleanup_on_failure.append((callable, [id, fq_name, obj_dict, self._db_conn]))
-
- obj_ids = {'uuid': id}
- try:
- (ok, result) = db_conn.dbe_update('access-control-list', obj_ids, obj_dict)
- except Exception as e:
- ok = False
- result = str(e)
- if not ok:
- for fail_cleanup_callable, cleanup_args in cleanup_on_failure:
- fail_cleanup_callable(*cleanup_args)
- self.config_object_error(id, None, 'access_control_list', 'http_put', result)
- abort(404, result)
-
- rsp_body = {}
- rsp_body['uuid'] = id
- rsp_body['href'] = self.generate_url('access-control-list', id)
-
- try:
- self._extension_mgrs['resourceApi'].map_method('post_access_control_list_update', id, obj_dict)
- except Exception as e:
- pass
-
- return {'access-control-list': rsp_body}
- #end access_control_list_http_put
-
- def access_control_list_http_delete(self, id):
- db_conn = self._db_conn
- # if obj doesn't exist return early
- try:
- obj_type = db_conn.uuid_to_obj_type(id)
- if obj_type != 'access_control_list':
- abort(404, 'No access-control-list object found for id %s' %(id))
- _ = db_conn.uuid_to_fq_name(id)
- except NoIdError:
- abort(404, 'ID %s does not exist' %(id))
-
- try:
- self._extension_mgrs['resourceApi'].map_method('pre_access_control_list_delete', id)
- except Exception as e:
- pass
-
- # read in obj from db (accepting error) to get details of it
- obj_ids = {'uuid': id}
- back_references = [u'virtual_network_back_refs', u'security_group_back_refs']
- children = []
- obj_fields = children + back_references
- (read_ok, read_result) = db_conn.dbe_read('access-control-list', obj_ids, obj_fields)
- if not read_ok:
- if read_result.startswith('Unknown id:'):
- abort(404, 'ID %s does not exist' %(id))
- else:
- self.config_object_error(id, None, 'access_control_list', 'http_delete', read_result)
- # proceed down to delete the resource
-
- # common handling for all resource delete
- parent_type = read_result.get('parent_type', None)
- (ok, del_result) = self._delete_common(request, 'access_control_list', id, parent_type)
- if not ok:
- (code, msg) = del_result
- self.config_object_error(id, None, 'access_control_list', 'http_delete', msg)
- abort(code, msg)
-
- fq_name = read_result['fq_name']
- ifmap_id = cfgm_common.imid.get_ifmap_id_from_fq_name('access-control-list', fq_name)
- obj_ids['imid'] = ifmap_id
- if parent_type:
- parent_imid = cfgm_common.imid.get_ifmap_id_from_fq_name(parent_type, fq_name[:-1])
- obj_ids['parent_imid'] = parent_imid
-
- # State modification starts from here. Ensure that cleanup is done for all state changes
- cleanup_on_failure = []
-
- # type-specific hook
- r_class = self.get_resource_class('access-control-list')
- if r_class:
- if read_ok:
- # fail if non-default children or backrefs exist
-
- # Delete default children first
- self._access_control_list_delete_default_children(read_result)
-
- (ok, del_result) = r_class.http_delete(id, read_result, db_conn)
- if not ok:
- (code, msg) = del_result
- self.config_object_error(id, None, 'access_control_list', 'http_delete', msg)
- abort(code, msg)
- callable = getattr(r_class, 'http_delete_fail', None)
- if callable:
- cleanup_on_failure.append((callable, [id, read_result, db_conn]))
- #end if read_ok
-
- try:
- (ok, del_result) = db_conn.dbe_delete('access-control-list', obj_ids, read_result)
- except Exception as e:
- ok = False
- del_result = str(e)
- if not ok:
- for fail_cleanup_callable, cleanup_args in cleanup_on_failure:
- fail_cleanup_callable(*cleanup_args)
- self.config_object_error(id, None, 'access_control_list', 'http_delete', del_result)
- abort(409, del_result)
-
- try:
- self._extension_mgrs['resourceApi'].map_method('post_access_control_list_delete', id, read_result)
- except Exception as e:
- pass
-
- #end access_control_list_http_delete
-
- def access_control_lists_http_post(self):
- key = 'access-control-list'
- obj_dict = request.json[key]
- self._post_validate(key, obj_dict=obj_dict)
- fq_name = obj_dict['fq_name']
-
- try:
- self._extension_mgrs['resourceApi'].map_method('pre_access_control_list_create', obj_dict)
- except Exception as e:
- pass
-
- prop_dict = obj_dict.get('access_control_list_entries')
- if prop_dict:
- buf = cStringIO.StringIO()
- xx_access_control_list_entries = AclEntriesType(**prop_dict)
- xx_access_control_list_entries.export(buf)
- node = etree.fromstring(buf.getvalue())
- xx_access_control_list_entries = AclEntriesType()
- try:
- xx_access_control_list_entries.build(node)
- except Exception as e:
- abort(400, str(e))
- prop_dict = obj_dict.get('id_perms')
- if prop_dict:
- buf = cStringIO.StringIO()
- xx_id_perms = IdPermsType(**prop_dict)
- xx_id_perms.export(buf)
- node = etree.fromstring(buf.getvalue())
- xx_id_perms = IdPermsType()
- try:
- xx_id_perms.build(node)
- except Exception as e:
- abort(400, str(e))
- # common handling for all resource create
- (ok, result) = self._post_common(request, 'access-control-list', obj_dict)
- if not ok:
- (code, msg) = result
- fq_name_str = ':'.join(obj_dict.get('fq_name', []))
- self.config_object_error(None, fq_name_str, 'access_control_list', 'http_post', msg)
- abort(code, msg)
-
- name = obj_dict['fq_name'][-1]
- fq_name = obj_dict['fq_name']
-
- db_conn = self._db_conn
-
- # if client gave parent_type of config-root, ignore and remove
- if 'parent_type' in obj_dict and obj_dict['parent_type'] == 'config-root':
- del obj_dict['parent_type']
-
- if 'parent_type' in obj_dict:
- # non config-root child, verify parent exists
- parent_type = obj_dict['parent_type']
- parent_fq_name = obj_dict['fq_name'][:-1]
- try:
- parent_uuid = self._db_conn.fq_name_to_uuid(parent_type, parent_fq_name)
- (ok, status) = self._permissions.check_perms_write(request, parent_uuid)
- if not ok:
- (code, err_msg) = status
- abort(code, err_msg)
- self._permissions.set_user_role(request, obj_dict)
- except NoIdError:
- err_msg = 'Parent ' + pformat(parent_fq_name) + ' type ' + parent_type + ' does not exist'
- fq_name_str = ':'.join(parent_fq_name)
- self.config_object_error(None, fq_name_str, 'access_control_list', 'http_post', err_msg)
- abort(400, err_msg)
-
-
- # State modification starts from here. Ensure that cleanup is done for all state changes
- cleanup_on_failure = []
- # Alloc and Store id-mappings before creating entry on pubsub store.
- # Else a subscriber can ask for an id mapping before we have stored it
- uuid_requested = result
- (ok, result) = db_conn.dbe_alloc('access-control-list', obj_dict, uuid_requested)
- if not ok:
- (code, msg) = result
- fq_name_str = ':'.join(obj_dict['fq_name'])
- self.config_object_error(None, fq_name_str, 'access_control_list', 'http_post', result)
- abort(code, msg)
- cleanup_on_failure.append((db_conn.dbe_release, ['access_control_list', fq_name]))
-
- obj_ids = result
-
- env = request.headers.environ
- tenant_name = env.get(hdr_server_tenant(), 'default-project')
-
-
- # type-specific hook
- r_class = self.get_resource_class('access-control-list')
- if r_class:
- try:
- (ok, result) = r_class.http_post_collection(tenant_name, obj_dict, db_conn)
- except Exception as e:
- ok = False
- result = (500, str(e))
- if not ok:
- for fail_cleanup_callable, cleanup_args in cleanup_on_failure:
- fail_cleanup_callable(*cleanup_args)
- (code, msg) = result
- fq_name_str = ':'.join(fq_name)
- self.config_object_error(None, fq_name_str, 'access_control_list', 'http_post', msg)
- abort(code, msg)
- callable = getattr(r_class, 'http_post_collection_fail', None)
- if callable:
- cleanup_on_failure.append((callable, [tenant_name, obj_dict, db_conn]))
-
- try:
- (ok, result) = \
- db_conn.dbe_create('access-control-list', obj_ids, obj_dict)
- except Exception as e:
- ok = False
- result = str(e)
-
- if not ok:
- for fail_cleanup_callable, cleanup_args in cleanup_on_failure:
- fail_cleanup_callable(*cleanup_args)
- fq_name_str = ':'.join(fq_name)
- self.config_object_error(None, fq_name_str, 'access_control_list', 'http_post', result)
- abort(404, result)
-
- rsp_body = {}
- rsp_body['name'] = name
- rsp_body['fq_name'] = fq_name
- rsp_body['uuid'] = obj_ids['uuid']
- rsp_body['href'] = self.generate_url('access-control-list', obj_ids['uuid'])
- if 'parent_type' in obj_dict:
- # non config-root child, send back parent uuid/href
- rsp_body['parent_uuid'] = parent_uuid
- rsp_body['parent_href'] = self.generate_url(parent_type, parent_uuid)
-
- try:
- self._extension_mgrs['resourceApi'].map_method('post_access_control_list_create', obj_dict)
- except Exception as e:
- pass
-
- return {'access-control-list': rsp_body}
- #end access_control_lists_http_post
-
- def access_control_lists_http_get(self):
- # gather list of uuids using 1. any specified anchors
- # 2. any specified filters
- # if not 'detail' return list with any specified 'fields'
- # if 'detail' return list with props+refs + any specified 'fields'
-
- env = request.headers.environ
- tenant_name = env.get(hdr_server_tenant(), 'default-project')
- parent_uuids = None
- back_ref_uuids = None
- obj_uuids = None
- if (('parent_fq_name_str' in request.query) and
- ('parent_type' in request.query)):
- parent_fq_name = request.query.parent_fq_name_str.split(':')
- parent_type = request.query.parent_type
- parent_uuids = [self._db_conn.fq_name_to_uuid(parent_type, parent_fq_name)]
- elif 'parent_id' in request.query:
- parent_ids = request.query.parent_id.split(',')
- parent_uuids = [str(uuid.UUID(p_uuid)) for p_uuid in parent_ids]
- if 'back_ref_id' in request.query:
- back_ref_ids = request.query.back_ref_id.split(',')
- back_ref_uuids = [str(uuid.UUID(b_uuid)) for b_uuid in back_ref_ids]
- if 'obj_uuids' in request.query:
- obj_uuids = request.query.obj_uuids.split(',')
-
- # common handling for all resource get
- (ok, result) = self._get_common(request, parent_uuids)
- if not ok:
- (code, msg) = result
- self.config_object_error(None, None, 'access_control_lists', 'http_get_collection', msg)
- abort(code, msg)
-
- if 'count' in request.query:
- count = 'true' in request.query.count.lower()
- else:
- count = False
-
- filter_params = request.query.filters
- if filter_params:
- try:
- ff_key_vals = filter_params.split(',')
- ff_names = [ff.split('==')[0] for ff in ff_key_vals]
- ff_values = [ff.split('==')[1] for ff in ff_key_vals]
- filters = {'field_names': ff_names, 'field_values': ff_values}
- except Exception as e:
- abort(400, 'Invalid filter ' + filter_params)
- else:
- filters = None
- db_conn = self._db_conn
- (ok, result) = \
- db_conn.dbe_list('access-control-list', parent_uuids, back_ref_uuids, obj_uuids, count, filters)
- if not ok:
- self.config_object_error(None, None, 'access_control_lists', 'http_get_collection', result)
- abort(404, result)
-
- # If only counting, return early
- if count:
- return {'access-control-lists': {'count': result}}
-
- if 'detail' in request.query:
- detail = 'true' in request.query.detail.lower()
- else:
- detail = False
-
- fq_names_uuids = result
- obj_dicts = []
- if not detail:
- if not self.is_admin_request():
- obj_ids_list = [{'uuid': obj_uuid} for _, obj_uuid in fq_names_uuids]
- obj_fields = [u'id_perms']
- (ok, result) = db_conn.dbe_read_multi('access-control-list', obj_ids_list, obj_fields)
- if not ok:
- result = []
- for obj_result in result:
- # give chance for zk heartbeat/ping
- gevent.sleep(0)
- if obj_result['id_perms'].get('user_visible', True):
- obj_dict = {}
- obj_dict['uuid'] = obj_result['uuid']
- obj_dict['href'] = self.generate_url('access-control-list', obj_result['uuid'])
- obj_dict['fq_name'] = obj_result['fq_name']
- obj_dicts.append(obj_dict)
- else:
- for fq_name, obj_uuid in fq_names_uuids:
- # give chance for zk heartbeat/ping
- gevent.sleep(0)
- obj_dict = {}
- obj_dict['uuid'] = obj_uuid
- obj_dict['href'] = self.generate_url('access-control-list', obj_uuid)
- obj_dict['fq_name'] = fq_name
- obj_dicts.append(obj_dict)
- else: #detail
- obj_ids_list = [{'uuid': obj_uuid} for _, obj_uuid in fq_names_uuids]
- obj_fields = [u'access_control_list_entries', u'id_perms', u'display_name'] + []
- if 'fields' in request.query:
- obj_fields.extend(request.query.fields.split(','))
- (ok, result) = db_conn.dbe_read_multi('access-control-list', obj_ids_list, obj_fields)
-
- if not ok:
- result = []
-
- for obj_result in result:
- # give chance for zk heartbeat/ping
- gevent.sleep(0)
- obj_dict = {}
- obj_dict['name'] = obj_result['fq_name'][-1]
- obj_dict['href'] = self.generate_url('access-control-list', obj_result['uuid'])
- obj_dict.update(obj_result)
- if (obj_dict['id_perms'].get('user_visible', True) or
- self.is_admin_request()):
- obj_dicts.append({'access-control-list': obj_dict})
-
- return {'access-control-lists': obj_dicts}
- #end access_control_lists_http_get
-
- def _access_control_list_create_default_children(self, parent_obj):
- pass
- #end _access_control_list_create_default_children
-
- def _access_control_list_delete_default_children(self, parent_dict):
- pass
- #end _access_control_list_delete_default_children
-
- def analytics_node_http_get(self, id):
- try:
- self._extension_mgrs['resourceApi'].map_method('pre_analytics_node_read', id)
- except Exception as e:
- pass
-
- # TODO get vals from request out of the global ASAP
- etag = request.headers.get('If-None-Match')
- try:
- obj_type = self._db_conn.uuid_to_obj_type(id)
- except NoIdError:
- obj_type = None
- if obj_type != 'analytics_node':
- abort(404, 'No analytics-node object found for id %s' %(id))
- # common handling for all resource get
- (ok, result) = self._get_common(request, id)
- if not ok:
- (code, msg) = result
- self.config_object_error(id, None, 'analytics_node', 'http_get', msg)
- abort(code, msg)
-
- # type-specific hook
- r_class = self.get_resource_class('analytics-node')
- if r_class:
- r_class.http_get(id)
-
- db_conn = self._db_conn
- if etag:
- obj_ids = {'uuid': id}
- (ok, result) = db_conn.dbe_is_latest(obj_ids, etag.replace('"', ''))
- if not ok:
- # Not present in DB
- self.config_object_error(id, None, 'analytics_node', 'http_get', result)
- abort(404, result)
-
- is_latest = result
- if is_latest:
- # send Not-Modified, caches use this for read optimization
- response.status = 304
- return
- #end if etag
-
- obj_ids = {'uuid': id}
-
- # Generate field list for db layer
- properties = [u'analytics_node_ip_address', u'id_perms', u'display_name']
- references = []
- back_references = [u'global_system_config_back_refs']
- children = []
- if 'fields' in request.query:
- obj_fields = request.query.fields.split(',')
- else: # default props + children + refs + backrefs
- obj_fields = properties + references
- if 'exclude_back_refs' not in request.query:
- obj_fields = obj_fields + back_references
- if 'exclude_children' not in request.query:
- obj_fields = obj_fields + children
-
- (ok, result) = db_conn.dbe_read('analytics-node', obj_ids, obj_fields)
- if not ok:
- # Not present in DB
- self.config_object_error(id, None, 'analytics_node', 'http_get', result)
- abort(404, result)
-
- # check visibility
- if (not result['id_perms'].get('user_visible', True) and
- not self.is_admin_request()):
- result = 'This object is not visible by users: %s' % id
- self.config_object_error(id, None, 'analytics_node', 'http_get', result)
- abort(404, result)
-
- rsp_body = {}
- rsp_body['uuid'] = id
- rsp_body['href'] = self.generate_url('analytics-node', id)
- rsp_body['name'] = result['fq_name'][-1]
- rsp_body.update(result)
- id_perms = result['id_perms']
- response.set_header('ETag', '"' + id_perms['last_modified'] + '"')
- try:
- self._extension_mgrs['resourceApi'].map_method('post_analytics_node_read', id, rsp_body)
- except Exception as e:
- pass
-
- return {'analytics-node': rsp_body}
- #end analytics_node_http_get
-
- def analytics_node_http_put(self, id):
- key = 'analytics-node'
- obj_dict = request.json[key]
-
- try:
- self._extension_mgrs['resourceApi'].map_method('pre_analytics_node_update', id, obj_dict)
- except Exception as e:
- pass
-
- db_conn = self._db_conn
- try:
- obj_type = db_conn.uuid_to_obj_type(id)
- if obj_type != 'analytics_node':
- abort(404, 'No analytics-node object found for id %s' %(id))
- fq_name = db_conn.uuid_to_fq_name(id)
- except NoIdError as e:
- abort(404, str(e))
- prop_dict = obj_dict.get('id_perms')
- if prop_dict:
- buf = cStringIO.StringIO()
- xx_id_perms = IdPermsType(**prop_dict)
- xx_id_perms.export(buf)
- node = etree.fromstring(buf.getvalue())
- xx_id_perms = IdPermsType()
- try:
- xx_id_perms.build(node)
- except Exception as e:
- abort(400, str(e))
- # common handling for all resource put
- (ok, result) = self._put_common(request, 'analytics_node', id, fq_name, obj_dict)
- if not ok:
- (code, msg) = result
- self.config_object_error(id, None, 'analytics_node', 'http_put', msg)
- abort(code, msg)
-
-
- # State modification starts from here. Ensure that cleanup is done for all state changes
- cleanup_on_failure = []
- # type-specific hook
- r_class = self.get_resource_class('analytics-node')
- if r_class:
- (ok, put_result) = r_class.http_put(id, fq_name, obj_dict, self._db_conn)
- if not ok:
- (code, msg) = put_result
- self.config_object_error(id, None, 'analytics_node', 'http_put', msg)
- abort(code, msg)
- callable = getattr(r_class, 'http_put_fail', None)
- if callable:
- cleanup_on_failure.append((callable, [id, fq_name, obj_dict, self._db_conn]))
-
- obj_ids = {'uuid': id}
- try:
- (ok, result) = db_conn.dbe_update('analytics-node', obj_ids, obj_dict)
- except Exception as e:
- ok = False
- result = str(e)
- if not ok:
- for fail_cleanup_callable, cleanup_args in cleanup_on_failure:
- fail_cleanup_callable(*cleanup_args)
- self.config_object_error(id, None, 'analytics_node', 'http_put', result)
- abort(404, result)
-
- rsp_body = {}
- rsp_body['uuid'] = id
- rsp_body['href'] = self.generate_url('analytics-node', id)
-
- try:
- self._extension_mgrs['resourceApi'].map_method('post_analytics_node_update', id, obj_dict)
- except Exception as e:
- pass
-
- return {'analytics-node': rsp_body}
- #end analytics_node_http_put
-
- def analytics_node_http_delete(self, id):
- db_conn = self._db_conn
- # if obj doesn't exist return early
- try:
- obj_type = db_conn.uuid_to_obj_type(id)
- if obj_type != 'analytics_node':
- abort(404, 'No analytics-node object found for id %s' %(id))
- _ = db_conn.uuid_to_fq_name(id)
- except NoIdError:
- abort(404, 'ID %s does not exist' %(id))
-
- try:
- self._extension_mgrs['resourceApi'].map_method('pre_analytics_node_delete', id)
- except Exception as e:
- pass
-
- # read in obj from db (accepting error) to get details of it
- obj_ids = {'uuid': id}
- back_references = [u'global_system_config_back_refs']
- children = []
- obj_fields = children + back_references
- (read_ok, read_result) = db_conn.dbe_read('analytics-node', obj_ids, obj_fields)
- if not read_ok:
- if read_result.startswith('Unknown id:'):
- abort(404, 'ID %s does not exist' %(id))
- else:
- self.config_object_error(id, None, 'analytics_node', 'http_delete', read_result)
- # proceed down to delete the resource
-
- # common handling for all resource delete
- parent_type = read_result.get('parent_type', None)
- (ok, del_result) = self._delete_common(request, 'analytics_node', id, parent_type)
- if not ok:
- (code, msg) = del_result
- self.config_object_error(id, None, 'analytics_node', 'http_delete', msg)
- abort(code, msg)
-
- fq_name = read_result['fq_name']
- ifmap_id = cfgm_common.imid.get_ifmap_id_from_fq_name('analytics-node', fq_name)
- obj_ids['imid'] = ifmap_id
- if parent_type:
- parent_imid = cfgm_common.imid.get_ifmap_id_from_fq_name(parent_type, fq_name[:-1])
- obj_ids['parent_imid'] = parent_imid
-
- # State modification starts from here. Ensure that cleanup is done for all state changes
- cleanup_on_failure = []
-
- # type-specific hook
- r_class = self.get_resource_class('analytics-node')
- if r_class:
- if read_ok:
- # fail if non-default children or backrefs exist
-
- # Delete default children first
- self._analytics_node_delete_default_children(read_result)
-
- (ok, del_result) = r_class.http_delete(id, read_result, db_conn)
- if not ok:
- (code, msg) = del_result
- self.config_object_error(id, None, 'analytics_node', 'http_delete', msg)
- abort(code, msg)
- callable = getattr(r_class, 'http_delete_fail', None)
- if callable:
- cleanup_on_failure.append((callable, [id, read_result, db_conn]))
- #end if read_ok
-
- try:
- (ok, del_result) = db_conn.dbe_delete('analytics-node', obj_ids, read_result)
- except Exception as e:
- ok = False
- del_result = str(e)
- if not ok:
- for fail_cleanup_callable, cleanup_args in cleanup_on_failure:
- fail_cleanup_callable(*cleanup_args)
- self.config_object_error(id, None, 'analytics_node', 'http_delete', del_result)
- abort(409, del_result)
-
- try:
- self._extension_mgrs['resourceApi'].map_method('post_analytics_node_delete', id, read_result)
- except Exception as e:
- pass
-
- #end analytics_node_http_delete
-
- def analytics_nodes_http_post(self):
- key = 'analytics-node'
- obj_dict = request.json[key]
- self._post_validate(key, obj_dict=obj_dict)
- fq_name = obj_dict['fq_name']
-
- try:
- self._extension_mgrs['resourceApi'].map_method('pre_analytics_node_create', obj_dict)
- except Exception as e:
- pass
-
- prop_dict = obj_dict.get('id_perms')
- if prop_dict:
- buf = cStringIO.StringIO()
- xx_id_perms = IdPermsType(**prop_dict)
- xx_id_perms.export(buf)
- node = etree.fromstring(buf.getvalue())
- xx_id_perms = IdPermsType()
- try:
- xx_id_perms.build(node)
- except Exception as e:
- abort(400, str(e))
- # common handling for all resource create
- (ok, result) = self._post_common(request, 'analytics-node', obj_dict)
- if not ok:
- (code, msg) = result
- fq_name_str = ':'.join(obj_dict.get('fq_name', []))
- self.config_object_error(None, fq_name_str, 'analytics_node', 'http_post', msg)
- abort(code, msg)
-
- name = obj_dict['fq_name'][-1]
- fq_name = obj_dict['fq_name']
-
- db_conn = self._db_conn
-
- # if client gave parent_type of config-root, ignore and remove
- if 'parent_type' in obj_dict and obj_dict['parent_type'] == 'config-root':
- del obj_dict['parent_type']
-
- if 'parent_type' in obj_dict:
- # non config-root child, verify parent exists
- parent_type = obj_dict['parent_type']
- parent_fq_name = obj_dict['fq_name'][:-1]
- try:
- parent_uuid = self._db_conn.fq_name_to_uuid(parent_type, parent_fq_name)
- (ok, status) = self._permissions.check_perms_write(request, parent_uuid)
- if not ok:
- (code, err_msg) = status
- abort(code, err_msg)
- self._permissions.set_user_role(request, obj_dict)
- except NoIdError:
- err_msg = 'Parent ' + pformat(parent_fq_name) + ' type ' + parent_type + ' does not exist'
- fq_name_str = ':'.join(parent_fq_name)
- self.config_object_error(None, fq_name_str, 'analytics_node', 'http_post', err_msg)
- abort(400, err_msg)
-
-
- # State modification starts from here. Ensure that cleanup is done for all state changes
- cleanup_on_failure = []
- # Alloc and Store id-mappings before creating entry on pubsub store.
- # Else a subscriber can ask for an id mapping before we have stored it
- uuid_requested = result
- (ok, result) = db_conn.dbe_alloc('analytics-node', obj_dict, uuid_requested)
- if not ok:
- (code, msg) = result
- fq_name_str = ':'.join(obj_dict['fq_name'])
- self.config_object_error(None, fq_name_str, 'analytics_node', 'http_post', result)
- abort(code, msg)
- cleanup_on_failure.append((db_conn.dbe_release, ['analytics_node', fq_name]))
-
- obj_ids = result
-
- env = request.headers.environ
- tenant_name = env.get(hdr_server_tenant(), 'default-project')
-
-
- # type-specific hook
- r_class = self.get_resource_class('analytics-node')
- if r_class:
- try:
- (ok, result) = r_class.http_post_collection(tenant_name, obj_dict, db_conn)
- except Exception as e:
- ok = False
- result = (500, str(e))
- if not ok:
- for fail_cleanup_callable, cleanup_args in cleanup_on_failure:
- fail_cleanup_callable(*cleanup_args)
- (code, msg) = result
- fq_name_str = ':'.join(fq_name)
- self.config_object_error(None, fq_name_str, 'analytics_node', 'http_post', msg)
- abort(code, msg)
- callable = getattr(r_class, 'http_post_collection_fail', None)
- if callable:
- cleanup_on_failure.append((callable, [tenant_name, obj_dict, db_conn]))
-
- try:
- (ok, result) = \
- db_conn.dbe_create('analytics-node', obj_ids, obj_dict)
- except Exception as e:
- ok = False
- result = str(e)
-
- if not ok:
- for fail_cleanup_callable, cleanup_args in cleanup_on_failure:
- fail_cleanup_callable(*cleanup_args)
- fq_name_str = ':'.join(fq_name)
- self.config_object_error(None, fq_name_str, 'analytics_node', 'http_post', result)
- abort(404, result)
-
- rsp_body = {}
- rsp_body['name'] = name
- rsp_body['fq_name'] = fq_name
- rsp_body['uuid'] = obj_ids['uuid']
- rsp_body['href'] = self.generate_url('analytics-node', obj_ids['uuid'])
- if 'parent_type' in obj_dict:
- # non config-root child, send back parent uuid/href
- rsp_body['parent_uuid'] = parent_uuid
- rsp_body['parent_href'] = self.generate_url(parent_type, parent_uuid)
-
- try:
- self._extension_mgrs['resourceApi'].map_method('post_analytics_node_create', obj_dict)
- except Exception as e:
- pass
-
- return {'analytics-node': rsp_body}
- #end analytics_nodes_http_post
-
- def analytics_nodes_http_get(self):
- # gather list of uuids using 1. any specified anchors
- # 2. any specified filters
- # if not 'detail' return list with any specified 'fields'
- # if 'detail' return list with props+refs + any specified 'fields'
-
- env = request.headers.environ
- tenant_name = env.get(hdr_server_tenant(), 'default-project')
- parent_uuids = None
- back_ref_uuids = None
- obj_uuids = None
- if (('parent_fq_name_str' in request.query) and
- ('parent_type' in request.query)):
- parent_fq_name = request.query.parent_fq_name_str.split(':')
- parent_type = request.query.parent_type
- parent_uuids = [self._db_conn.fq_name_to_uuid(parent_type, parent_fq_name)]
- elif 'parent_id' in request.query:
- parent_ids = request.query.parent_id.split(',')
- parent_uuids = [str(uuid.UUID(p_uuid)) for p_uuid in parent_ids]
- if 'back_ref_id' in request.query:
- back_ref_ids = request.query.back_ref_id.split(',')
- back_ref_uuids = [str(uuid.UUID(b_uuid)) for b_uuid in back_ref_ids]
- if 'obj_uuids' in request.query:
- obj_uuids = request.query.obj_uuids.split(',')
-
- # common handling for all resource get
- (ok, result) = self._get_common(request, parent_uuids)
- if not ok:
- (code, msg) = result
- self.config_object_error(None, None, 'analytics_nodes', 'http_get_collection', msg)
- abort(code, msg)
-
- if 'count' in request.query:
- count = 'true' in request.query.count.lower()
- else:
- count = False
-
- filter_params = request.query.filters
- if filter_params:
- try:
- ff_key_vals = filter_params.split(',')
- ff_names = [ff.split('==')[0] for ff in ff_key_vals]
- ff_values = [ff.split('==')[1] for ff in ff_key_vals]
- filters = {'field_names': ff_names, 'field_values': ff_values}
- except Exception as e:
- abort(400, 'Invalid filter ' + filter_params)
- else:
- filters = None
- db_conn = self._db_conn
- (ok, result) = \
- db_conn.dbe_list('analytics-node', parent_uuids, back_ref_uuids, obj_uuids, count, filters)
- if not ok:
- self.config_object_error(None, None, 'analytics_nodes', 'http_get_collection', result)
- abort(404, result)
-
- # If only counting, return early
- if count:
- return {'analytics-nodes': {'count': result}}
-
- if 'detail' in request.query:
- detail = 'true' in request.query.detail.lower()
- else:
- detail = False
-
- fq_names_uuids = result
- obj_dicts = []
- if not detail:
- if not self.is_admin_request():
- obj_ids_list = [{'uuid': obj_uuid} for _, obj_uuid in fq_names_uuids]
- obj_fields = [u'id_perms']
- (ok, result) = db_conn.dbe_read_multi('analytics-node', obj_ids_list, obj_fields)
- if not ok:
- result = []
- for obj_result in result:
- # give chance for zk heartbeat/ping
- gevent.sleep(0)
- if obj_result['id_perms'].get('user_visible', True):
- obj_dict = {}
- obj_dict['uuid'] = obj_result['uuid']
- obj_dict['href'] = self.generate_url('analytics-node', obj_result['uuid'])
- obj_dict['fq_name'] = obj_result['fq_name']
- obj_dicts.append(obj_dict)
- else:
- for fq_name, obj_uuid in fq_names_uuids:
- # give chance for zk heartbeat/ping
- gevent.sleep(0)
- obj_dict = {}
- obj_dict['uuid'] = obj_uuid
- obj_dict['href'] = self.generate_url('analytics-node', obj_uuid)
- obj_dict['fq_name'] = fq_name
- obj_dicts.append(obj_dict)
- else: #detail
- obj_ids_list = [{'uuid': obj_uuid} for _, obj_uuid in fq_names_uuids]
- obj_fields = [u'analytics_node_ip_address', u'id_perms', u'display_name'] + []
- if 'fields' in request.query:
- obj_fields.extend(request.query.fields.split(','))
- (ok, result) = db_conn.dbe_read_multi('analytics-node', obj_ids_list, obj_fields)
-
- if not ok:
- result = []
-
- for obj_result in result:
- # give chance for zk heartbeat/ping
- gevent.sleep(0)
- obj_dict = {}
- obj_dict['name'] = obj_result['fq_name'][-1]
- obj_dict['href'] = self.generate_url('analytics-node', obj_result['uuid'])
- obj_dict.update(obj_result)
- if (obj_dict['id_perms'].get('user_visible', True) or
- self.is_admin_request()):
- obj_dicts.append({'analytics-node': obj_dict})
-
- return {'analytics-nodes': obj_dicts}
- #end analytics_nodes_http_get
-
- def _analytics_node_create_default_children(self, parent_obj):
- pass
- #end _analytics_node_create_default_children
-
- def _analytics_node_delete_default_children(self, parent_dict):
- pass
- #end _analytics_node_delete_default_children
-
- def virtual_DNS_http_get(self, id):
- try:
- self._extension_mgrs['resourceApi'].map_method('pre_virtual_DNS_read', id)
- except Exception as e:
- pass
-
- # TODO get vals from request out of the global ASAP
- etag = request.headers.get('If-None-Match')
- try:
- obj_type = self._db_conn.uuid_to_obj_type(id)
- except NoIdError:
- obj_type = None
- if obj_type != 'virtual_DNS':
- abort(404, 'No virtual-DNS object found for id %s' %(id))
- # common handling for all resource get
- (ok, result) = self._get_common(request, id)
- if not ok:
- (code, msg) = result
- self.config_object_error(id, None, 'virtual_DNS', 'http_get', msg)
- abort(code, msg)
-
- # type-specific hook
- r_class = self.get_resource_class('virtual-DNS')
- if r_class:
- r_class.http_get(id)
-
- db_conn = self._db_conn
- if etag:
- obj_ids = {'uuid': id}
- (ok, result) = db_conn.dbe_is_latest(obj_ids, etag.replace('"', ''))
- if not ok:
- # Not present in DB
- self.config_object_error(id, None, 'virtual_DNS', 'http_get', result)
- abort(404, result)
-
- is_latest = result
- if is_latest:
- # send Not-Modified, caches use this for read optimization
- response.status = 304
- return
- #end if etag
-
- obj_ids = {'uuid': id}
-
- # Generate field list for db layer
- properties = [u'virtual_DNS_data', u'id_perms', u'display_name']
- references = []
- back_references = [u'domain_back_refs', u'network_ipam_back_refs']
- children = [u'virtual_DNS_records']
- if 'fields' in request.query:
- obj_fields = request.query.fields.split(',')
- else: # default props + children + refs + backrefs
- obj_fields = properties + references
- if 'exclude_back_refs' not in request.query:
- obj_fields = obj_fields + back_references
- if 'exclude_children' not in request.query:
- obj_fields = obj_fields + children
-
- (ok, result) = db_conn.dbe_read('virtual-DNS', obj_ids, obj_fields)
- if not ok:
- # Not present in DB
- self.config_object_error(id, None, 'virtual_DNS', 'http_get', result)
- abort(404, result)
-
- # check visibility
- if (not result['id_perms'].get('user_visible', True) and
- not self.is_admin_request()):
- result = 'This object is not visible by users: %s' % id
- self.config_object_error(id, None, 'virtual_DNS', 'http_get', result)
- abort(404, result)
-
- rsp_body = {}
- rsp_body['uuid'] = id
- rsp_body['href'] = self.generate_url('virtual-DNS', id)
- rsp_body['name'] = result['fq_name'][-1]
- rsp_body.update(result)
- id_perms = result['id_perms']
- response.set_header('ETag', '"' + id_perms['last_modified'] + '"')
- try:
- self._extension_mgrs['resourceApi'].map_method('post_virtual_DNS_read', id, rsp_body)
- except Exception as e:
- pass
-
- return {'virtual-DNS': rsp_body}
- #end virtual_DNS_http_get
-
- def virtual_DNS_http_put(self, id):
- key = 'virtual-DNS'
- obj_dict = request.json[key]
-
- try:
- self._extension_mgrs['resourceApi'].map_method('pre_virtual_DNS_update', id, obj_dict)
- except Exception as e:
- pass
-
- db_conn = self._db_conn
- try:
- obj_type = db_conn.uuid_to_obj_type(id)
- if obj_type != 'virtual_DNS':
- abort(404, 'No virtual-DNS object found for id %s' %(id))
- fq_name = db_conn.uuid_to_fq_name(id)
- except NoIdError as e:
- abort(404, str(e))
- prop_dict = obj_dict.get('virtual_DNS_data')
- if prop_dict:
- buf = cStringIO.StringIO()
- xx_virtual_DNS_data = VirtualDnsType(**prop_dict)
- xx_virtual_DNS_data.export(buf)
- node = etree.fromstring(buf.getvalue())
- xx_virtual_DNS_data = VirtualDnsType()
- try:
- xx_virtual_DNS_data.build(node)
- except Exception as e:
- abort(400, str(e))
- prop_dict = obj_dict.get('id_perms')
- if prop_dict:
- buf = cStringIO.StringIO()
- xx_id_perms = IdPermsType(**prop_dict)
- xx_id_perms.export(buf)
- node = etree.fromstring(buf.getvalue())
- xx_id_perms = IdPermsType()
- try:
- xx_id_perms.build(node)
- except Exception as e:
- abort(400, str(e))
- # common handling for all resource put
- (ok, result) = self._put_common(request, 'virtual_DNS', id, fq_name, obj_dict)
- if not ok:
- (code, msg) = result
- self.config_object_error(id, None, 'virtual_DNS', 'http_put', msg)
- abort(code, msg)
-
- # Validate perms
- objtype_list = [u'virtual_DNS_record']
- for obj_type in objtype_list:
- refs = obj_dict.get('%s_refs'%(obj_type), None)
- if refs:
- for ref in refs:
- try:
- ref_uuid = db_conn.fq_name_to_uuid(obj_type, ref['to'])
- except NoIdError as e:
- abort(404, str(e))
- (ok, status) = self._permissions.check_perms_link(request, ref_uuid)
- if not ok:
- (code, err_msg) = status
- abort(code, err_msg)
-
- # State modification starts from here. Ensure that cleanup is done for all state changes
- cleanup_on_failure = []
- # type-specific hook
- r_class = self.get_resource_class('virtual-DNS')
- if r_class:
- (ok, put_result) = r_class.http_put(id, fq_name, obj_dict, self._db_conn)
- if not ok:
- (code, msg) = put_result
- self.config_object_error(id, None, 'virtual_DNS', 'http_put', msg)
- abort(code, msg)
- callable = getattr(r_class, 'http_put_fail', None)
- if callable:
- cleanup_on_failure.append((callable, [id, fq_name, obj_dict, self._db_conn]))
-
- obj_ids = {'uuid': id}
- try:
- (ok, result) = db_conn.dbe_update('virtual-DNS', obj_ids, obj_dict)
- except Exception as e:
- ok = False
- result = str(e)
- if not ok:
- for fail_cleanup_callable, cleanup_args in cleanup_on_failure:
- fail_cleanup_callable(*cleanup_args)
- self.config_object_error(id, None, 'virtual_DNS', 'http_put', result)
- abort(404, result)
-
- rsp_body = {}
- rsp_body['uuid'] = id
- rsp_body['href'] = self.generate_url('virtual-DNS', id)
-
- try:
- self._extension_mgrs['resourceApi'].map_method('post_virtual_DNS_update', id, obj_dict)
- except Exception as e:
- pass
-
- return {'virtual-DNS': rsp_body}
- #end virtual_DNS_http_put
-
- def virtual_DNS_http_delete(self, id):
- db_conn = self._db_conn
- # if obj doesn't exist return early
- try:
- obj_type = db_conn.uuid_to_obj_type(id)
- if obj_type != 'virtual_DNS':
- abort(404, 'No virtual-DNS object found for id %s' %(id))
- _ = db_conn.uuid_to_fq_name(id)
- except NoIdError:
- abort(404, 'ID %s does not exist' %(id))
-
- try:
- self._extension_mgrs['resourceApi'].map_method('pre_virtual_DNS_delete', id)
- except Exception as e:
- pass
-
- # read in obj from db (accepting error) to get details of it
- obj_ids = {'uuid': id}
- back_references = [u'domain_back_refs', u'network_ipam_back_refs']
- children = [u'virtual_DNS_records']
- obj_fields = children + back_references
- (read_ok, read_result) = db_conn.dbe_read('virtual-DNS', obj_ids, obj_fields)
- if not read_ok:
- if read_result.startswith('Unknown id:'):
- abort(404, 'ID %s does not exist' %(id))
- else:
- self.config_object_error(id, None, 'virtual_DNS', 'http_delete', read_result)
- # proceed down to delete the resource
-
- # common handling for all resource delete
- parent_type = read_result.get('parent_type', None)
- (ok, del_result) = self._delete_common(request, 'virtual_DNS', id, parent_type)
- if not ok:
- (code, msg) = del_result
- self.config_object_error(id, None, 'virtual_DNS', 'http_delete', msg)
- abort(code, msg)
-
- fq_name = read_result['fq_name']
- ifmap_id = cfgm_common.imid.get_ifmap_id_from_fq_name('virtual-DNS', fq_name)
- obj_ids['imid'] = ifmap_id
- if parent_type:
- parent_imid = cfgm_common.imid.get_ifmap_id_from_fq_name(parent_type, fq_name[:-1])
- obj_ids['parent_imid'] = parent_imid
-
- # State modification starts from here. Ensure that cleanup is done for all state changes
- cleanup_on_failure = []
-
- # type-specific hook
- r_class = self.get_resource_class('virtual-DNS')
- if r_class:
- if read_ok:
- # fail if non-default children or backrefs exist
- virtual_DNS_records = read_result.get('virtual_DNS_records', None)
- if virtual_DNS_records:
- has_infos = read_result['virtual_DNS_records']
- if ((len(has_infos) > 1) or
- (len(has_infos) == 1 and has_infos[0]['to'][-1] != 'default-virtual-DNS-record')):
- has_urls = [has_info['href'] for has_info in has_infos]
- has_str = ', '.join(has_urls)
- err_msg = 'Children ' + has_str + ' still exist'
- self.config_object_error(id, None, 'virtual_DNS', 'http_delete', err_msg)
- abort(409, err_msg)
-
- network_ipam_back_refs = read_result.get('network_ipam_back_refs', None)
- if network_ipam_back_refs:
- back_ref_urls = [back_ref_info['href'] for back_ref_info in read_result['network_ipam_back_refs']]
- back_ref_str = ', '.join(back_ref_urls)
- err_msg = 'Back-References from ' + back_ref_str + ' still exist'
- self.config_object_error(id, None, 'virtual_DNS', 'http_delete', err_msg)
- abort(409, err_msg)
-
-
- # Delete default children first
- self._virtual_DNS_delete_default_children(read_result)
-
- (ok, del_result) = r_class.http_delete(id, read_result, db_conn)
- if not ok:
- (code, msg) = del_result
- self.config_object_error(id, None, 'virtual_DNS', 'http_delete', msg)
- abort(code, msg)
- callable = getattr(r_class, 'http_delete_fail', None)
- if callable:
- cleanup_on_failure.append((callable, [id, read_result, db_conn]))
- #end if read_ok
-
- try:
- (ok, del_result) = db_conn.dbe_delete('virtual-DNS', obj_ids, read_result)
- except Exception as e:
- ok = False
- del_result = str(e)
- if not ok:
- for fail_cleanup_callable, cleanup_args in cleanup_on_failure:
- fail_cleanup_callable(*cleanup_args)
- self.config_object_error(id, None, 'virtual_DNS', 'http_delete', del_result)
- abort(409, del_result)
-
- try:
- self._extension_mgrs['resourceApi'].map_method('post_virtual_DNS_delete', id, read_result)
- except Exception as e:
- pass
-
- #end virtual_DNS_http_delete
-
- def virtual_DNSs_http_post(self):
- key = 'virtual-DNS'
- obj_dict = request.json[key]
- self._post_validate(key, obj_dict=obj_dict)
- fq_name = obj_dict['fq_name']
-
- try:
- self._extension_mgrs['resourceApi'].map_method('pre_virtual_DNS_create', obj_dict)
- except Exception as e:
- pass
-
- prop_dict = obj_dict.get('virtual_DNS_data')
- if prop_dict:
- buf = cStringIO.StringIO()
- xx_virtual_DNS_data = VirtualDnsType(**prop_dict)
- xx_virtual_DNS_data.export(buf)
- node = etree.fromstring(buf.getvalue())
- xx_virtual_DNS_data = VirtualDnsType()
- try:
- xx_virtual_DNS_data.build(node)
- except Exception as e:
- abort(400, str(e))
- prop_dict = obj_dict.get('id_perms')
- if prop_dict:
- buf = cStringIO.StringIO()
- xx_id_perms = IdPermsType(**prop_dict)
- xx_id_perms.export(buf)
- node = etree.fromstring(buf.getvalue())
- xx_id_perms = IdPermsType()
- try:
- xx_id_perms.build(node)
- except Exception as e:
- abort(400, str(e))
- # common handling for all resource create
- (ok, result) = self._post_common(request, 'virtual-DNS', obj_dict)
- if not ok:
- (code, msg) = result
- fq_name_str = ':'.join(obj_dict.get('fq_name', []))
- self.config_object_error(None, fq_name_str, 'virtual_DNS', 'http_post', msg)
- abort(code, msg)
-
- name = obj_dict['fq_name'][-1]
- fq_name = obj_dict['fq_name']
-
- db_conn = self._db_conn
-
- # if client gave parent_type of config-root, ignore and remove
- if 'parent_type' in obj_dict and obj_dict['parent_type'] == 'config-root':
- del obj_dict['parent_type']
-
- if 'parent_type' in obj_dict:
- # non config-root child, verify parent exists
- parent_type = obj_dict['parent_type']
- parent_fq_name = obj_dict['fq_name'][:-1]
- try:
- parent_uuid = self._db_conn.fq_name_to_uuid(parent_type, parent_fq_name)
- (ok, status) = self._permissions.check_perms_write(request, parent_uuid)
- if not ok:
- (code, err_msg) = status
- abort(code, err_msg)
- self._permissions.set_user_role(request, obj_dict)
- except NoIdError:
- err_msg = 'Parent ' + pformat(parent_fq_name) + ' type ' + parent_type + ' does not exist'
- fq_name_str = ':'.join(parent_fq_name)
- self.config_object_error(None, fq_name_str, 'virtual_DNS', 'http_post', err_msg)
- abort(400, err_msg)
-
- # Validate perms
- objtype_list = [u'virtual_DNS_record']
- for obj_type in objtype_list:
- refs = obj_dict.get('%s_refs'%(obj_type), None)
- if refs:
- for ref in refs:
- ref_uuid = db_conn.fq_name_to_uuid(obj_type, ref['to'])
- (ok, status) = self._permissions.check_perms_link(request, ref_uuid)
- if not ok:
- (code, err_msg) = status
- abort(code, err_msg)
-
- # State modification starts from here. Ensure that cleanup is done for all state changes
- cleanup_on_failure = []
- # Alloc and Store id-mappings before creating entry on pubsub store.
- # Else a subscriber can ask for an id mapping before we have stored it
- uuid_requested = result
- (ok, result) = db_conn.dbe_alloc('virtual-DNS', obj_dict, uuid_requested)
- if not ok:
- (code, msg) = result
- fq_name_str = ':'.join(obj_dict['fq_name'])
- self.config_object_error(None, fq_name_str, 'virtual_DNS', 'http_post', result)
- abort(code, msg)
- cleanup_on_failure.append((db_conn.dbe_release, ['virtual_DNS', fq_name]))
-
- obj_ids = result
-
- env = request.headers.environ
- tenant_name = env.get(hdr_server_tenant(), 'default-project')
-
-
- # type-specific hook
- r_class = self.get_resource_class('virtual-DNS')
- if r_class:
- try:
- (ok, result) = r_class.http_post_collection(tenant_name, obj_dict, db_conn)
- except Exception as e:
- ok = False
- result = (500, str(e))
- if not ok:
- for fail_cleanup_callable, cleanup_args in cleanup_on_failure:
- fail_cleanup_callable(*cleanup_args)
- (code, msg) = result
- fq_name_str = ':'.join(fq_name)
- self.config_object_error(None, fq_name_str, 'virtual_DNS', 'http_post', msg)
- abort(code, msg)
- callable = getattr(r_class, 'http_post_collection_fail', None)
- if callable:
- cleanup_on_failure.append((callable, [tenant_name, obj_dict, db_conn]))
-
- try:
- (ok, result) = \
- db_conn.dbe_create('virtual-DNS', obj_ids, obj_dict)
- except Exception as e:
- ok = False
- result = str(e)
-
- if not ok:
- for fail_cleanup_callable, cleanup_args in cleanup_on_failure:
- fail_cleanup_callable(*cleanup_args)
- fq_name_str = ':'.join(fq_name)
- self.config_object_error(None, fq_name_str, 'virtual_DNS', 'http_post', result)
- abort(404, result)
-
- rsp_body = {}
- rsp_body['name'] = name
- rsp_body['fq_name'] = fq_name
- rsp_body['uuid'] = obj_ids['uuid']
- rsp_body['href'] = self.generate_url('virtual-DNS', obj_ids['uuid'])
- if 'parent_type' in obj_dict:
- # non config-root child, send back parent uuid/href
- rsp_body['parent_uuid'] = parent_uuid
- rsp_body['parent_href'] = self.generate_url(parent_type, parent_uuid)
-
- try:
- self._extension_mgrs['resourceApi'].map_method('post_virtual_DNS_create', obj_dict)
- except Exception as e:
- pass
-
- return {'virtual-DNS': rsp_body}
- #end virtual_DNSs_http_post
-
- def virtual_DNSs_http_get(self):
- # gather list of uuids using 1. any specified anchors
- # 2. any specified filters
- # if not 'detail' return list with any specified 'fields'
- # if 'detail' return list with props+refs + any specified 'fields'
-
- env = request.headers.environ
- tenant_name = env.get(hdr_server_tenant(), 'default-project')
- parent_uuids = None
- back_ref_uuids = None
- obj_uuids = None
- if (('parent_fq_name_str' in request.query) and
- ('parent_type' in request.query)):
- parent_fq_name = request.query.parent_fq_name_str.split(':')
- parent_type = request.query.parent_type
- parent_uuids = [self._db_conn.fq_name_to_uuid(parent_type, parent_fq_name)]
- elif 'parent_id' in request.query:
- parent_ids = request.query.parent_id.split(',')
- parent_uuids = [str(uuid.UUID(p_uuid)) for p_uuid in parent_ids]
- if 'back_ref_id' in request.query:
- back_ref_ids = request.query.back_ref_id.split(',')
- back_ref_uuids = [str(uuid.UUID(b_uuid)) for b_uuid in back_ref_ids]
- if 'obj_uuids' in request.query:
- obj_uuids = request.query.obj_uuids.split(',')
-
- # common handling for all resource get
- (ok, result) = self._get_common(request, parent_uuids)
- if not ok:
- (code, msg) = result
- self.config_object_error(None, None, 'virtual_DNSs', 'http_get_collection', msg)
- abort(code, msg)
-
- if 'count' in request.query:
- count = 'true' in request.query.count.lower()
- else:
- count = False
-
- filter_params = request.query.filters
- if filter_params:
- try:
- ff_key_vals = filter_params.split(',')
- ff_names = [ff.split('==')[0] for ff in ff_key_vals]
- ff_values = [ff.split('==')[1] for ff in ff_key_vals]
- filters = {'field_names': ff_names, 'field_values': ff_values}
- except Exception as e:
- abort(400, 'Invalid filter ' + filter_params)
- else:
- filters = None
- db_conn = self._db_conn
- (ok, result) = \
- db_conn.dbe_list('virtual-DNS', parent_uuids, back_ref_uuids, obj_uuids, count, filters)
- if not ok:
- self.config_object_error(None, None, 'virtual_DNSs', 'http_get_collection', result)
- abort(404, result)
-
- # If only counting, return early
- if count:
- return {'virtual-DNSs': {'count': result}}
-
- if 'detail' in request.query:
- detail = 'true' in request.query.detail.lower()
- else:
- detail = False
-
- fq_names_uuids = result
- obj_dicts = []
- if not detail:
- if not self.is_admin_request():
- obj_ids_list = [{'uuid': obj_uuid} for _, obj_uuid in fq_names_uuids]
- obj_fields = [u'id_perms']
- (ok, result) = db_conn.dbe_read_multi('virtual-DNS', obj_ids_list, obj_fields)
- if not ok:
- result = []
- for obj_result in result:
- # give chance for zk heartbeat/ping
- gevent.sleep(0)
- if obj_result['id_perms'].get('user_visible', True):
- obj_dict = {}
- obj_dict['uuid'] = obj_result['uuid']
- obj_dict['href'] = self.generate_url('virtual-DNS', obj_result['uuid'])
- obj_dict['fq_name'] = obj_result['fq_name']
- obj_dicts.append(obj_dict)
- else:
- for fq_name, obj_uuid in fq_names_uuids:
- # give chance for zk heartbeat/ping
- gevent.sleep(0)
- obj_dict = {}
- obj_dict['uuid'] = obj_uuid
- obj_dict['href'] = self.generate_url('virtual-DNS', obj_uuid)
- obj_dict['fq_name'] = fq_name
- obj_dicts.append(obj_dict)
- else: #detail
- obj_ids_list = [{'uuid': obj_uuid} for _, obj_uuid in fq_names_uuids]
- obj_fields = [u'virtual_DNS_data', u'id_perms', u'display_name'] + []
- if 'fields' in request.query:
- obj_fields.extend(request.query.fields.split(','))
- (ok, result) = db_conn.dbe_read_multi('virtual-DNS', obj_ids_list, obj_fields)
-
- if not ok:
- result = []
-
- for obj_result in result:
- # give chance for zk heartbeat/ping
- gevent.sleep(0)
- obj_dict = {}
- obj_dict['name'] = obj_result['fq_name'][-1]
- obj_dict['href'] = self.generate_url('virtual-DNS', obj_result['uuid'])
- obj_dict.update(obj_result)
- if (obj_dict['id_perms'].get('user_visible', True) or
- self.is_admin_request()):
- obj_dicts.append({'virtual-DNS': obj_dict})
-
- return {'virtual-DNSs': obj_dicts}
- #end virtual_DNSs_http_get
-
- def _virtual_DNS_create_default_children(self, parent_obj):
- # Create a default child only if provisioned for
- r_class = self.get_resource_class('virtual-DNS-record')
- if r_class and r_class.generate_default_instance:
- child_obj = VirtualDnsRecord(parent_obj = parent_obj)
- child_dict = child_obj.__dict__
- fq_name = child_dict['fq_name']
- child_dict['id_perms'] = self._get_default_id_perms('virtual-DNS-record')
-
- db_conn = self._db_conn
- (ok, result) = db_conn.dbe_alloc('virtual-DNS-record', child_dict)
- if not ok:
- return (ok, result)
-
- obj_ids = result
- db_conn.dbe_create('virtual-DNS-record', obj_ids, child_dict)
- self._virtual_DNS_record_create_default_children(child_obj)
-
- pass
- #end _virtual_DNS_create_default_children
-
- def _virtual_DNS_delete_default_children(self, parent_dict):
- # Delete a default child only if provisioned for
- r_class = self.get_resource_class('virtual-DNS-record')
- if r_class and r_class.generate_default_instance:
- # first locate default child then delete it
- has_infos = parent_dict.get('virtual_DNS_records')
- if has_infos:
- for has_info in has_infos:
- if has_info['to'][-1] == 'default-virtual-DNS-record':
- default_child_id = has_info['href'].split('/')[-1]
- self.virtual_DNS_record_http_delete(default_child_id)
- break
-
- pass
- #end _virtual_DNS_delete_default_children
-
- def customer_attachment_http_get(self, id):
- try:
- self._extension_mgrs['resourceApi'].map_method('pre_customer_attachment_read', id)
- except Exception as e:
- pass
-
- # TODO get vals from request out of the global ASAP
- etag = request.headers.get('If-None-Match')
- try:
- obj_type = self._db_conn.uuid_to_obj_type(id)
- except NoIdError:
- obj_type = None
- if obj_type != 'customer_attachment':
- abort(404, 'No customer-attachment object found for id %s' %(id))
- # common handling for all resource get
- (ok, result) = self._get_common(request, id)
- if not ok:
- (code, msg) = result
- self.config_object_error(id, None, 'customer_attachment', 'http_get', msg)
- abort(code, msg)
-
- # type-specific hook
- r_class = self.get_resource_class('customer-attachment')
- if r_class:
- r_class.http_get(id)
-
- db_conn = self._db_conn
- if etag:
- obj_ids = {'uuid': id}
- (ok, result) = db_conn.dbe_is_latest(obj_ids, etag.replace('"', ''))
- if not ok:
- # Not present in DB
- self.config_object_error(id, None, 'customer_attachment', 'http_get', result)
- abort(404, result)
-
- is_latest = result
- if is_latest:
- # send Not-Modified, caches use this for read optimization
- response.status = 304
- return
- #end if etag
-
- obj_ids = {'uuid': id}
-
- # Generate field list for db layer
- properties = [u'attachment_address', u'id_perms', u'display_name']
- references = ['virtual_machine_interface_refs', u'floating_ip_refs']
- back_references = []
- children = []
- if 'fields' in request.query:
- obj_fields = request.query.fields.split(',')
- else: # default props + children + refs + backrefs
- obj_fields = properties + references
- if 'exclude_back_refs' not in request.query:
- obj_fields = obj_fields + back_references
- if 'exclude_children' not in request.query:
- obj_fields = obj_fields + children
-
- (ok, result) = db_conn.dbe_read('customer-attachment', obj_ids, obj_fields)
- if not ok:
- # Not present in DB
- self.config_object_error(id, None, 'customer_attachment', 'http_get', result)
- abort(404, result)
-
- # check visibility
- if (not result['id_perms'].get('user_visible', True) and
- not self.is_admin_request()):
- result = 'This object is not visible by users: %s' % id
- self.config_object_error(id, None, 'customer_attachment', 'http_get', result)
- abort(404, result)
-
- rsp_body = {}
- rsp_body['uuid'] = id
- rsp_body['href'] = self.generate_url('customer-attachment', id)
- rsp_body['name'] = result['fq_name'][-1]
- rsp_body.update(result)
- id_perms = result['id_perms']
- response.set_header('ETag', '"' + id_perms['last_modified'] + '"')
- try:
- self._extension_mgrs['resourceApi'].map_method('post_customer_attachment_read', id, rsp_body)
- except Exception as e:
- pass
-
- return {'customer-attachment': rsp_body}
- #end customer_attachment_http_get
-
- def customer_attachment_http_put(self, id):
- key = 'customer-attachment'
- obj_dict = request.json[key]
-
- try:
- self._extension_mgrs['resourceApi'].map_method('pre_customer_attachment_update', id, obj_dict)
- except Exception as e:
- pass
-
- db_conn = self._db_conn
- try:
- obj_type = db_conn.uuid_to_obj_type(id)
- if obj_type != 'customer_attachment':
- abort(404, 'No customer-attachment object found for id %s' %(id))
- fq_name = db_conn.uuid_to_fq_name(id)
- except NoIdError as e:
- abort(404, str(e))
- prop_dict = obj_dict.get('attachment_address')
- if prop_dict:
- buf = cStringIO.StringIO()
- xx_attachment_address = AttachmentAddressType(**prop_dict)
- xx_attachment_address.export(buf)
- node = etree.fromstring(buf.getvalue())
- xx_attachment_address = AttachmentAddressType()
- try:
- xx_attachment_address.build(node)
- except Exception as e:
- abort(400, str(e))
- prop_dict = obj_dict.get('id_perms')
- if prop_dict:
- buf = cStringIO.StringIO()
- xx_id_perms = IdPermsType(**prop_dict)
- xx_id_perms.export(buf)
- node = etree.fromstring(buf.getvalue())
- xx_id_perms = IdPermsType()
- try:
- xx_id_perms.build(node)
- except Exception as e:
- abort(400, str(e))
- # common handling for all resource put
- (ok, result) = self._put_common(request, 'customer_attachment', id, fq_name, obj_dict)
- if not ok:
- (code, msg) = result
- self.config_object_error(id, None, 'customer_attachment', 'http_put', msg)
- abort(code, msg)
-
- # Validate perms
- objtype_list = ['virtual_machine_interface', u'floating_ip', 'routing_instance', 'provider_attachment']
- for obj_type in objtype_list:
- refs = obj_dict.get('%s_refs'%(obj_type), None)
- if refs:
- for ref in refs:
- try:
- ref_uuid = db_conn.fq_name_to_uuid(obj_type, ref['to'])
- except NoIdError as e:
- abort(404, str(e))
- (ok, status) = self._permissions.check_perms_link(request, ref_uuid)
- if not ok:
- (code, err_msg) = status
- abort(code, err_msg)
-
- # State modification starts from here. Ensure that cleanup is done for all state changes
- cleanup_on_failure = []
- # type-specific hook
- r_class = self.get_resource_class('customer-attachment')
- if r_class:
- (ok, put_result) = r_class.http_put(id, fq_name, obj_dict, self._db_conn)
- if not ok:
- (code, msg) = put_result
- self.config_object_error(id, None, 'customer_attachment', 'http_put', msg)
- abort(code, msg)
- callable = getattr(r_class, 'http_put_fail', None)
- if callable:
- cleanup_on_failure.append((callable, [id, fq_name, obj_dict, self._db_conn]))
-
- obj_ids = {'uuid': id}
- try:
- (ok, result) = db_conn.dbe_update('customer-attachment', obj_ids, obj_dict)
- except Exception as e:
- ok = False
- result = str(e)
- if not ok:
- for fail_cleanup_callable, cleanup_args in cleanup_on_failure:
- fail_cleanup_callable(*cleanup_args)
- self.config_object_error(id, None, 'customer_attachment', 'http_put', result)
- abort(404, result)
-
- rsp_body = {}
- rsp_body['uuid'] = id
- rsp_body['href'] = self.generate_url('customer-attachment', id)
-
- try:
- self._extension_mgrs['resourceApi'].map_method('post_customer_attachment_update', id, obj_dict)
- except Exception as e:
- pass
-
- return {'customer-attachment': rsp_body}
- #end customer_attachment_http_put
-
- def customer_attachment_http_delete(self, id):
- db_conn = self._db_conn
- # if obj doesn't exist return early
- try:
- obj_type = db_conn.uuid_to_obj_type(id)
- if obj_type != 'customer_attachment':
- abort(404, 'No customer-attachment object found for id %s' %(id))
- _ = db_conn.uuid_to_fq_name(id)
- except NoIdError:
- abort(404, 'ID %s does not exist' %(id))
-
- try:
- self._extension_mgrs['resourceApi'].map_method('pre_customer_attachment_delete', id)
- except Exception as e:
- pass
-
- # read in obj from db (accepting error) to get details of it
- obj_ids = {'uuid': id}
- back_references = []
- children = []
- obj_fields = children + back_references
- (read_ok, read_result) = db_conn.dbe_read('customer-attachment', obj_ids, obj_fields)
- if not read_ok:
- if read_result.startswith('Unknown id:'):
- abort(404, 'ID %s does not exist' %(id))
- else:
- self.config_object_error(id, None, 'customer_attachment', 'http_delete', read_result)
- # proceed down to delete the resource
-
- # common handling for all resource delete
- parent_type = read_result.get('parent_type', None)
- (ok, del_result) = self._delete_common(request, 'customer_attachment', id, parent_type)
- if not ok:
- (code, msg) = del_result
- self.config_object_error(id, None, 'customer_attachment', 'http_delete', msg)
- abort(code, msg)
-
- fq_name = read_result['fq_name']
- ifmap_id = cfgm_common.imid.get_ifmap_id_from_fq_name('customer-attachment', fq_name)
- obj_ids['imid'] = ifmap_id
- if parent_type:
- parent_imid = cfgm_common.imid.get_ifmap_id_from_fq_name(parent_type, fq_name[:-1])
- obj_ids['parent_imid'] = parent_imid
-
- # State modification starts from here. Ensure that cleanup is done for all state changes
- cleanup_on_failure = []
-
- # type-specific hook
- r_class = self.get_resource_class('customer-attachment')
- if r_class:
- if read_ok:
- # fail if non-default children or backrefs exist
-
- # Delete default children first
- self._customer_attachment_delete_default_children(read_result)
-
- (ok, del_result) = r_class.http_delete(id, read_result, db_conn)
- if not ok:
- (code, msg) = del_result
- self.config_object_error(id, None, 'customer_attachment', 'http_delete', msg)
- abort(code, msg)
- callable = getattr(r_class, 'http_delete_fail', None)
- if callable:
- cleanup_on_failure.append((callable, [id, read_result, db_conn]))
- #end if read_ok
-
- try:
- (ok, del_result) = db_conn.dbe_delete('customer-attachment', obj_ids, read_result)
- except Exception as e:
- ok = False
- del_result = str(e)
- if not ok:
- for fail_cleanup_callable, cleanup_args in cleanup_on_failure:
- fail_cleanup_callable(*cleanup_args)
- self.config_object_error(id, None, 'customer_attachment', 'http_delete', del_result)
- abort(409, del_result)
-
- try:
- self._extension_mgrs['resourceApi'].map_method('post_customer_attachment_delete', id, read_result)
- except Exception as e:
- pass
-
- #end customer_attachment_http_delete
-
- def customer_attachments_http_post(self):
- key = 'customer-attachment'
- obj_dict = request.json[key]
- self._post_validate(key, obj_dict=obj_dict)
- fq_name = obj_dict['fq_name']
-
- try:
- self._extension_mgrs['resourceApi'].map_method('pre_customer_attachment_create', obj_dict)
- except Exception as e:
- pass
-
- prop_dict = obj_dict.get('attachment_address')
- if prop_dict:
- buf = cStringIO.StringIO()
- xx_attachment_address = AttachmentAddressType(**prop_dict)
- xx_attachment_address.export(buf)
- node = etree.fromstring(buf.getvalue())
- xx_attachment_address = AttachmentAddressType()
- try:
- xx_attachment_address.build(node)
- except Exception as e:
- abort(400, str(e))
- prop_dict = obj_dict.get('id_perms')
- if prop_dict:
- buf = cStringIO.StringIO()
- xx_id_perms = IdPermsType(**prop_dict)
- xx_id_perms.export(buf)
- node = etree.fromstring(buf.getvalue())
- xx_id_perms = IdPermsType()
- try:
- xx_id_perms.build(node)
- except Exception as e:
- abort(400, str(e))
- # common handling for all resource create
- (ok, result) = self._post_common(request, 'customer-attachment', obj_dict)
- if not ok:
- (code, msg) = result
- fq_name_str = ':'.join(obj_dict.get('fq_name', []))
- self.config_object_error(None, fq_name_str, 'customer_attachment', 'http_post', msg)
- abort(code, msg)
-
- name = obj_dict['fq_name'][-1]
- fq_name = obj_dict['fq_name']
-
- db_conn = self._db_conn
-
- # Validate perms
- objtype_list = ['virtual_machine_interface', u'floating_ip', 'routing_instance', 'provider_attachment']
- for obj_type in objtype_list:
- refs = obj_dict.get('%s_refs'%(obj_type), None)
- if refs:
- for ref in refs:
- ref_uuid = db_conn.fq_name_to_uuid(obj_type, ref['to'])
- (ok, status) = self._permissions.check_perms_link(request, ref_uuid)
- if not ok:
- (code, err_msg) = status
- abort(code, err_msg)
-
- # State modification starts from here. Ensure that cleanup is done for all state changes
- cleanup_on_failure = []
- # Alloc and Store id-mappings before creating entry on pubsub store.
- # Else a subscriber can ask for an id mapping before we have stored it
- uuid_requested = result
- (ok, result) = db_conn.dbe_alloc('customer-attachment', obj_dict, uuid_requested)
- if not ok:
- (code, msg) = result
- fq_name_str = ':'.join(obj_dict['fq_name'])
- self.config_object_error(None, fq_name_str, 'customer_attachment', 'http_post', result)
- abort(code, msg)
- cleanup_on_failure.append((db_conn.dbe_release, ['customer_attachment', fq_name]))
-
- obj_ids = result
-
- env = request.headers.environ
- tenant_name = env.get(hdr_server_tenant(), 'default-project')
-
-
- # type-specific hook
- r_class = self.get_resource_class('customer-attachment')
- if r_class:
- try:
- (ok, result) = r_class.http_post_collection(tenant_name, obj_dict, db_conn)
- except Exception as e:
- ok = False
- result = (500, str(e))
- if not ok:
- for fail_cleanup_callable, cleanup_args in cleanup_on_failure:
- fail_cleanup_callable(*cleanup_args)
- (code, msg) = result
- fq_name_str = ':'.join(fq_name)
- self.config_object_error(None, fq_name_str, 'customer_attachment', 'http_post', msg)
- abort(code, msg)
- callable = getattr(r_class, 'http_post_collection_fail', None)
- if callable:
- cleanup_on_failure.append((callable, [tenant_name, obj_dict, db_conn]))
-
- try:
- (ok, result) = \
- db_conn.dbe_create('customer-attachment', obj_ids, obj_dict)
- except Exception as e:
- ok = False
- result = str(e)
-
- if not ok:
- for fail_cleanup_callable, cleanup_args in cleanup_on_failure:
- fail_cleanup_callable(*cleanup_args)
- fq_name_str = ':'.join(fq_name)
- self.config_object_error(None, fq_name_str, 'customer_attachment', 'http_post', result)
- abort(404, result)
-
- rsp_body = {}
- rsp_body['name'] = name
- rsp_body['fq_name'] = fq_name
- rsp_body['uuid'] = obj_ids['uuid']
- rsp_body['href'] = self.generate_url('customer-attachment', obj_ids['uuid'])
-
- try:
- self._extension_mgrs['resourceApi'].map_method('post_customer_attachment_create', obj_dict)
- except Exception as e:
- pass
-
- return {'customer-attachment': rsp_body}
- #end customer_attachments_http_post
-
- def customer_attachments_http_get(self):
- # gather list of uuids using 1. any specified anchors
- # 2. any specified filters
- # if not 'detail' return list with any specified 'fields'
- # if 'detail' return list with props+refs + any specified 'fields'
-
- env = request.headers.environ
- tenant_name = env.get(hdr_server_tenant(), 'default-project')
- parent_uuids = None
- back_ref_uuids = None
- obj_uuids = None
- if (('parent_fq_name_str' in request.query) and
- ('parent_type' in request.query)):
- parent_fq_name = request.query.parent_fq_name_str.split(':')
- parent_type = request.query.parent_type
- parent_uuids = [self._db_conn.fq_name_to_uuid(parent_type, parent_fq_name)]
- elif 'parent_id' in request.query:
- parent_ids = request.query.parent_id.split(',')
- parent_uuids = [str(uuid.UUID(p_uuid)) for p_uuid in parent_ids]
- if 'back_ref_id' in request.query:
- back_ref_ids = request.query.back_ref_id.split(',')
- back_ref_uuids = [str(uuid.UUID(b_uuid)) for b_uuid in back_ref_ids]
- if 'obj_uuids' in request.query:
- obj_uuids = request.query.obj_uuids.split(',')
-
- # common handling for all resource get
- (ok, result) = self._get_common(request, parent_uuids)
- if not ok:
- (code, msg) = result
- self.config_object_error(None, None, 'customer_attachments', 'http_get_collection', msg)
- abort(code, msg)
-
- if 'count' in request.query:
- count = 'true' in request.query.count.lower()
- else:
- count = False
-
- filter_params = request.query.filters
- if filter_params:
- try:
- ff_key_vals = filter_params.split(',')
- ff_names = [ff.split('==')[0] for ff in ff_key_vals]
- ff_values = [ff.split('==')[1] for ff in ff_key_vals]
- filters = {'field_names': ff_names, 'field_values': ff_values}
- except Exception as e:
- abort(400, 'Invalid filter ' + filter_params)
- else:
- filters = None
- db_conn = self._db_conn
- (ok, result) = \
- db_conn.dbe_list('customer-attachment', parent_uuids, back_ref_uuids, obj_uuids, count, filters)
- if not ok:
- self.config_object_error(None, None, 'customer_attachments', 'http_get_collection', result)
- abort(404, result)
-
- # If only counting, return early
- if count:
- return {'customer-attachments': {'count': result}}
-
- if 'detail' in request.query:
- detail = 'true' in request.query.detail.lower()
- else:
- detail = False
-
- fq_names_uuids = result
- obj_dicts = []
- if not detail:
- if not self.is_admin_request():
- obj_ids_list = [{'uuid': obj_uuid} for _, obj_uuid in fq_names_uuids]
- obj_fields = [u'id_perms']
- (ok, result) = db_conn.dbe_read_multi('customer-attachment', obj_ids_list, obj_fields)
- if not ok:
- result = []
- for obj_result in result:
- # give chance for zk heartbeat/ping
- gevent.sleep(0)
- if obj_result['id_perms'].get('user_visible', True):
- obj_dict = {}
- obj_dict['uuid'] = obj_result['uuid']
- obj_dict['href'] = self.generate_url('customer-attachment', obj_result['uuid'])
- obj_dict['fq_name'] = obj_result['fq_name']
- obj_dicts.append(obj_dict)
- else:
- for fq_name, obj_uuid in fq_names_uuids:
- # give chance for zk heartbeat/ping
- gevent.sleep(0)
- obj_dict = {}
- obj_dict['uuid'] = obj_uuid
- obj_dict['href'] = self.generate_url('customer-attachment', obj_uuid)
- obj_dict['fq_name'] = fq_name
- obj_dicts.append(obj_dict)
- else: #detail
- obj_ids_list = [{'uuid': obj_uuid} for _, obj_uuid in fq_names_uuids]
- obj_fields = [u'attachment_address', u'id_perms', u'display_name'] + ['virtual_machine_interface_refs', u'floating_ip_refs']
- if 'fields' in request.query:
- obj_fields.extend(request.query.fields.split(','))
- (ok, result) = db_conn.dbe_read_multi('customer-attachment', obj_ids_list, obj_fields)
-
- if not ok:
- result = []
-
- for obj_result in result:
- # give chance for zk heartbeat/ping
- gevent.sleep(0)
- obj_dict = {}
- obj_dict['name'] = obj_result['fq_name'][-1]
- obj_dict['href'] = self.generate_url('customer-attachment', obj_result['uuid'])
- obj_dict.update(obj_result)
- if (obj_dict['id_perms'].get('user_visible', True) or
- self.is_admin_request()):
- obj_dicts.append({'customer-attachment': obj_dict})
-
- return {'customer-attachments': obj_dicts}
- #end customer_attachments_http_get
-
- def _customer_attachment_create_default_children(self, parent_obj):
- pass
- #end _customer_attachment_create_default_children
-
- def _customer_attachment_delete_default_children(self, parent_dict):
- pass
- #end _customer_attachment_delete_default_children
-
- def service_appliance_set_http_get(self, id):
- try:
- self._extension_mgrs['resourceApi'].map_method('pre_service_appliance_set_read', id)
- except Exception as e:
- pass
-
- # TODO get vals from request out of the global ASAP
- etag = request.headers.get('If-None-Match')
- try:
- obj_type = self._db_conn.uuid_to_obj_type(id)
- except NoIdError:
- obj_type = None
- if obj_type != 'service_appliance_set':
- abort(404, 'No service-appliance-set object found for id %s' %(id))
- # common handling for all resource get
- (ok, result) = self._get_common(request, id)
- if not ok:
- (code, msg) = result
- self.config_object_error(id, None, 'service_appliance_set', 'http_get', msg)
- abort(code, msg)
-
- # type-specific hook
- r_class = self.get_resource_class('service-appliance-set')
- if r_class:
- r_class.http_get(id)
-
- db_conn = self._db_conn
- if etag:
- obj_ids = {'uuid': id}
- (ok, result) = db_conn.dbe_is_latest(obj_ids, etag.replace('"', ''))
- if not ok:
- # Not present in DB
- self.config_object_error(id, None, 'service_appliance_set', 'http_get', result)
- abort(404, result)
-
- is_latest = result
- if is_latest:
- # send Not-Modified, caches use this for read optimization
- response.status = 304
- return
- #end if etag
-
- obj_ids = {'uuid': id}
-
- # Generate field list for db layer
- properties = [u'service_appliance_set_properties', u'service_appliance_driver', u'service_appliance_ha_mode', u'id_perms', u'display_name']
- references = []
- back_references = [u'global_system_config_back_refs', u'loadbalancer_pool_back_refs']
- children = [u'service_appliances']
- if 'fields' in request.query:
- obj_fields = request.query.fields.split(',')
- else: # default props + children + refs + backrefs
- obj_fields = properties + references
- if 'exclude_back_refs' not in request.query:
- obj_fields = obj_fields + back_references
- if 'exclude_children' not in request.query:
- obj_fields = obj_fields + children
-
- (ok, result) = db_conn.dbe_read('service-appliance-set', obj_ids, obj_fields)
- if not ok:
- # Not present in DB
- self.config_object_error(id, None, 'service_appliance_set', 'http_get', result)
- abort(404, result)
-
- # check visibility
- if (not result['id_perms'].get('user_visible', True) and
- not self.is_admin_request()):
- result = 'This object is not visible by users: %s' % id
- self.config_object_error(id, None, 'service_appliance_set', 'http_get', result)
- abort(404, result)
-
- rsp_body = {}
- rsp_body['uuid'] = id
- rsp_body['href'] = self.generate_url('service-appliance-set', id)
- rsp_body['name'] = result['fq_name'][-1]
- rsp_body.update(result)
- id_perms = result['id_perms']
- response.set_header('ETag', '"' + id_perms['last_modified'] + '"')
- try:
- self._extension_mgrs['resourceApi'].map_method('post_service_appliance_set_read', id, rsp_body)
- except Exception as e:
- pass
-
- return {'service-appliance-set': rsp_body}
- #end service_appliance_set_http_get
-
- def service_appliance_set_http_put(self, id):
- key = 'service-appliance-set'
- obj_dict = request.json[key]
-
- try:
- self._extension_mgrs['resourceApi'].map_method('pre_service_appliance_set_update', id, obj_dict)
- except Exception as e:
- pass
-
- db_conn = self._db_conn
- try:
- obj_type = db_conn.uuid_to_obj_type(id)
- if obj_type != 'service_appliance_set':
- abort(404, 'No service-appliance-set object found for id %s' %(id))
- fq_name = db_conn.uuid_to_fq_name(id)
- except NoIdError as e:
- abort(404, str(e))
- prop_dict = obj_dict.get('service_appliance_set_properties')
- if prop_dict:
- buf = cStringIO.StringIO()
- xx_service_appliance_set_properties = KeyValuePairs(**prop_dict)
- xx_service_appliance_set_properties.export(buf)
- node = etree.fromstring(buf.getvalue())
- xx_service_appliance_set_properties = KeyValuePairs()
- try:
- xx_service_appliance_set_properties.build(node)
- except Exception as e:
- abort(400, str(e))
- prop_dict = obj_dict.get('id_perms')
- if prop_dict:
- buf = cStringIO.StringIO()
- xx_id_perms = IdPermsType(**prop_dict)
- xx_id_perms.export(buf)
- node = etree.fromstring(buf.getvalue())
- xx_id_perms = IdPermsType()
- try:
- xx_id_perms.build(node)
- except Exception as e:
- abort(400, str(e))
- # common handling for all resource put
- (ok, result) = self._put_common(request, 'service_appliance_set', id, fq_name, obj_dict)
- if not ok:
- (code, msg) = result
- self.config_object_error(id, None, 'service_appliance_set', 'http_put', msg)
- abort(code, msg)
-
- # Validate perms
- objtype_list = [u'service_appliance']
- for obj_type in objtype_list:
- refs = obj_dict.get('%s_refs'%(obj_type), None)
- if refs:
- for ref in refs:
- try:
- ref_uuid = db_conn.fq_name_to_uuid(obj_type, ref['to'])
- except NoIdError as e:
- abort(404, str(e))
- (ok, status) = self._permissions.check_perms_link(request, ref_uuid)
- if not ok:
- (code, err_msg) = status
- abort(code, err_msg)
-
- # State modification starts from here. Ensure that cleanup is done for all state changes
- cleanup_on_failure = []
- # type-specific hook
- r_class = self.get_resource_class('service-appliance-set')
- if r_class:
- (ok, put_result) = r_class.http_put(id, fq_name, obj_dict, self._db_conn)
- if not ok:
- (code, msg) = put_result
- self.config_object_error(id, None, 'service_appliance_set', 'http_put', msg)
- abort(code, msg)
- callable = getattr(r_class, 'http_put_fail', None)
- if callable:
- cleanup_on_failure.append((callable, [id, fq_name, obj_dict, self._db_conn]))
-
- obj_ids = {'uuid': id}
- try:
- (ok, result) = db_conn.dbe_update('service-appliance-set', obj_ids, obj_dict)
- except Exception as e:
- ok = False
- result = str(e)
- if not ok:
- for fail_cleanup_callable, cleanup_args in cleanup_on_failure:
- fail_cleanup_callable(*cleanup_args)
- self.config_object_error(id, None, 'service_appliance_set', 'http_put', result)
- abort(404, result)
-
- rsp_body = {}
- rsp_body['uuid'] = id
- rsp_body['href'] = self.generate_url('service-appliance-set', id)
-
- try:
- self._extension_mgrs['resourceApi'].map_method('post_service_appliance_set_update', id, obj_dict)
- except Exception as e:
- pass
-
- return {'service-appliance-set': rsp_body}
- #end service_appliance_set_http_put
-
- def service_appliance_set_http_delete(self, id):
- db_conn = self._db_conn
- # if obj doesn't exist return early
- try:
- obj_type = db_conn.uuid_to_obj_type(id)
- if obj_type != 'service_appliance_set':
- abort(404, 'No service-appliance-set object found for id %s' %(id))
- _ = db_conn.uuid_to_fq_name(id)
- except NoIdError:
- abort(404, 'ID %s does not exist' %(id))
-
- try:
- self._extension_mgrs['resourceApi'].map_method('pre_service_appliance_set_delete', id)
- except Exception as e:
- pass
-
- # read in obj from db (accepting error) to get details of it
- obj_ids = {'uuid': id}
- back_references = [u'global_system_config_back_refs', u'loadbalancer_pool_back_refs']
- children = [u'service_appliances']
- obj_fields = children + back_references
- (read_ok, read_result) = db_conn.dbe_read('service-appliance-set', obj_ids, obj_fields)
- if not read_ok:
- if read_result.startswith('Unknown id:'):
- abort(404, 'ID %s does not exist' %(id))
- else:
- self.config_object_error(id, None, 'service_appliance_set', 'http_delete', read_result)
- # proceed down to delete the resource
-
- # common handling for all resource delete
- parent_type = read_result.get('parent_type', None)
- (ok, del_result) = self._delete_common(request, 'service_appliance_set', id, parent_type)
- if not ok:
- (code, msg) = del_result
- self.config_object_error(id, None, 'service_appliance_set', 'http_delete', msg)
- abort(code, msg)
-
- fq_name = read_result['fq_name']
- ifmap_id = cfgm_common.imid.get_ifmap_id_from_fq_name('service-appliance-set', fq_name)
- obj_ids['imid'] = ifmap_id
- if parent_type:
- parent_imid = cfgm_common.imid.get_ifmap_id_from_fq_name(parent_type, fq_name[:-1])
- obj_ids['parent_imid'] = parent_imid
-
- # State modification starts from here. Ensure that cleanup is done for all state changes
- cleanup_on_failure = []
-
- # type-specific hook
- r_class = self.get_resource_class('service-appliance-set')
- if r_class:
- if read_ok:
- # fail if non-default children or backrefs exist
- service_appliances = read_result.get('service_appliances', None)
- if service_appliances:
- has_infos = read_result['service_appliances']
- if ((len(has_infos) > 1) or
- (len(has_infos) == 1 and has_infos[0]['to'][-1] != 'default-service-appliance')):
- has_urls = [has_info['href'] for has_info in has_infos]
- has_str = ', '.join(has_urls)
- err_msg = 'Children ' + has_str + ' still exist'
- self.config_object_error(id, None, 'service_appliance_set', 'http_delete', err_msg)
- abort(409, err_msg)
-
- loadbalancer_pool_back_refs = read_result.get('loadbalancer_pool_back_refs', None)
- if loadbalancer_pool_back_refs:
- back_ref_urls = [back_ref_info['href'] for back_ref_info in read_result['loadbalancer_pool_back_refs']]
- back_ref_str = ', '.join(back_ref_urls)
- err_msg = 'Back-References from ' + back_ref_str + ' still exist'
- self.config_object_error(id, None, 'service_appliance_set', 'http_delete', err_msg)
- abort(409, err_msg)
-
-
- # Delete default children first
- self._service_appliance_set_delete_default_children(read_result)
-
- (ok, del_result) = r_class.http_delete(id, read_result, db_conn)
- if not ok:
- (code, msg) = del_result
- self.config_object_error(id, None, 'service_appliance_set', 'http_delete', msg)
- abort(code, msg)
- callable = getattr(r_class, 'http_delete_fail', None)
- if callable:
- cleanup_on_failure.append((callable, [id, read_result, db_conn]))
- #end if read_ok
-
- try:
- (ok, del_result) = db_conn.dbe_delete('service-appliance-set', obj_ids, read_result)
- except Exception as e:
- ok = False
- del_result = str(e)
- if not ok:
- for fail_cleanup_callable, cleanup_args in cleanup_on_failure:
- fail_cleanup_callable(*cleanup_args)
- self.config_object_error(id, None, 'service_appliance_set', 'http_delete', del_result)
- abort(409, del_result)
-
- try:
- self._extension_mgrs['resourceApi'].map_method('post_service_appliance_set_delete', id, read_result)
- except Exception as e:
- pass
-
- #end service_appliance_set_http_delete
-
- def service_appliance_sets_http_post(self):
- key = 'service-appliance-set'
- obj_dict = request.json[key]
- self._post_validate(key, obj_dict=obj_dict)
- fq_name = obj_dict['fq_name']
-
- try:
- self._extension_mgrs['resourceApi'].map_method('pre_service_appliance_set_create', obj_dict)
- except Exception as e:
- pass
-
- prop_dict = obj_dict.get('service_appliance_set_properties')
- if prop_dict:
- buf = cStringIO.StringIO()
- xx_service_appliance_set_properties = KeyValuePairs(**prop_dict)
- xx_service_appliance_set_properties.export(buf)
- node = etree.fromstring(buf.getvalue())
- xx_service_appliance_set_properties = KeyValuePairs()
- try:
- xx_service_appliance_set_properties.build(node)
- except Exception as e:
- abort(400, str(e))
- prop_dict = obj_dict.get('id_perms')
- if prop_dict:
- buf = cStringIO.StringIO()
- xx_id_perms = IdPermsType(**prop_dict)
- xx_id_perms.export(buf)
- node = etree.fromstring(buf.getvalue())
- xx_id_perms = IdPermsType()
- try:
- xx_id_perms.build(node)
- except Exception as e:
- abort(400, str(e))
- # common handling for all resource create
- (ok, result) = self._post_common(request, 'service-appliance-set', obj_dict)
- if not ok:
- (code, msg) = result
- fq_name_str = ':'.join(obj_dict.get('fq_name', []))
- self.config_object_error(None, fq_name_str, 'service_appliance_set', 'http_post', msg)
- abort(code, msg)
-
- name = obj_dict['fq_name'][-1]
- fq_name = obj_dict['fq_name']
-
- db_conn = self._db_conn
-
- # if client gave parent_type of config-root, ignore and remove
- if 'parent_type' in obj_dict and obj_dict['parent_type'] == 'config-root':
- del obj_dict['parent_type']
-
- if 'parent_type' in obj_dict:
- # non config-root child, verify parent exists
- parent_type = obj_dict['parent_type']
- parent_fq_name = obj_dict['fq_name'][:-1]
- try:
- parent_uuid = self._db_conn.fq_name_to_uuid(parent_type, parent_fq_name)
- (ok, status) = self._permissions.check_perms_write(request, parent_uuid)
- if not ok:
- (code, err_msg) = status
- abort(code, err_msg)
- self._permissions.set_user_role(request, obj_dict)
- except NoIdError:
- err_msg = 'Parent ' + pformat(parent_fq_name) + ' type ' + parent_type + ' does not exist'
- fq_name_str = ':'.join(parent_fq_name)
- self.config_object_error(None, fq_name_str, 'service_appliance_set', 'http_post', err_msg)
- abort(400, err_msg)
-
- # Validate perms
- objtype_list = [u'service_appliance']
- for obj_type in objtype_list:
- refs = obj_dict.get('%s_refs'%(obj_type), None)
- if refs:
- for ref in refs:
- ref_uuid = db_conn.fq_name_to_uuid(obj_type, ref['to'])
- (ok, status) = self._permissions.check_perms_link(request, ref_uuid)
- if not ok:
- (code, err_msg) = status
- abort(code, err_msg)
-
- # State modification starts from here. Ensure that cleanup is done for all state changes
- cleanup_on_failure = []
- # Alloc and Store id-mappings before creating entry on pubsub store.
- # Else a subscriber can ask for an id mapping before we have stored it
- uuid_requested = result
- (ok, result) = db_conn.dbe_alloc('service-appliance-set', obj_dict, uuid_requested)
- if not ok:
- (code, msg) = result
- fq_name_str = ':'.join(obj_dict['fq_name'])
- self.config_object_error(None, fq_name_str, 'service_appliance_set', 'http_post', result)
- abort(code, msg)
- cleanup_on_failure.append((db_conn.dbe_release, ['service_appliance_set', fq_name]))
-
- obj_ids = result
-
- env = request.headers.environ
- tenant_name = env.get(hdr_server_tenant(), 'default-project')
-
-
- # type-specific hook
- r_class = self.get_resource_class('service-appliance-set')
- if r_class:
- try:
- (ok, result) = r_class.http_post_collection(tenant_name, obj_dict, db_conn)
- except Exception as e:
- ok = False
- result = (500, str(e))
- if not ok:
- for fail_cleanup_callable, cleanup_args in cleanup_on_failure:
- fail_cleanup_callable(*cleanup_args)
- (code, msg) = result
- fq_name_str = ':'.join(fq_name)
- self.config_object_error(None, fq_name_str, 'service_appliance_set', 'http_post', msg)
- abort(code, msg)
- callable = getattr(r_class, 'http_post_collection_fail', None)
- if callable:
- cleanup_on_failure.append((callable, [tenant_name, obj_dict, db_conn]))
-
- try:
- (ok, result) = \
- db_conn.dbe_create('service-appliance-set', obj_ids, obj_dict)
- except Exception as e:
- ok = False
- result = str(e)
-
- if not ok:
- for fail_cleanup_callable, cleanup_args in cleanup_on_failure:
- fail_cleanup_callable(*cleanup_args)
- fq_name_str = ':'.join(fq_name)
- self.config_object_error(None, fq_name_str, 'service_appliance_set', 'http_post', result)
- abort(404, result)
-
- rsp_body = {}
- rsp_body['name'] = name
- rsp_body['fq_name'] = fq_name
- rsp_body['uuid'] = obj_ids['uuid']
- rsp_body['href'] = self.generate_url('service-appliance-set', obj_ids['uuid'])
- if 'parent_type' in obj_dict:
- # non config-root child, send back parent uuid/href
- rsp_body['parent_uuid'] = parent_uuid
- rsp_body['parent_href'] = self.generate_url(parent_type, parent_uuid)
-
- try:
- self._extension_mgrs['resourceApi'].map_method('post_service_appliance_set_create', obj_dict)
- except Exception as e:
- pass
-
- return {'service-appliance-set': rsp_body}
- #end service_appliance_sets_http_post
-
- def service_appliance_sets_http_get(self):
- # gather list of uuids using 1. any specified anchors
- # 2. any specified filters
- # if not 'detail' return list with any specified 'fields'
- # if 'detail' return list with props+refs + any specified 'fields'
-
- env = request.headers.environ
- tenant_name = env.get(hdr_server_tenant(), 'default-project')
- parent_uuids = None
- back_ref_uuids = None
- obj_uuids = None
- if (('parent_fq_name_str' in request.query) and
- ('parent_type' in request.query)):
- parent_fq_name = request.query.parent_fq_name_str.split(':')
- parent_type = request.query.parent_type
- parent_uuids = [self._db_conn.fq_name_to_uuid(parent_type, parent_fq_name)]
- elif 'parent_id' in request.query:
- parent_ids = request.query.parent_id.split(',')
- parent_uuids = [str(uuid.UUID(p_uuid)) for p_uuid in parent_ids]
- if 'back_ref_id' in request.query:
- back_ref_ids = request.query.back_ref_id.split(',')
- back_ref_uuids = [str(uuid.UUID(b_uuid)) for b_uuid in back_ref_ids]
- if 'obj_uuids' in request.query:
- obj_uuids = request.query.obj_uuids.split(',')
-
- # common handling for all resource get
- (ok, result) = self._get_common(request, parent_uuids)
- if not ok:
- (code, msg) = result
- self.config_object_error(None, None, 'service_appliance_sets', 'http_get_collection', msg)
- abort(code, msg)
-
- if 'count' in request.query:
- count = 'true' in request.query.count.lower()
- else:
- count = False
-
- filter_params = request.query.filters
- if filter_params:
- try:
- ff_key_vals = filter_params.split(',')
- ff_names = [ff.split('==')[0] for ff in ff_key_vals]
- ff_values = [ff.split('==')[1] for ff in ff_key_vals]
- filters = {'field_names': ff_names, 'field_values': ff_values}
- except Exception as e:
- abort(400, 'Invalid filter ' + filter_params)
- else:
- filters = None
- db_conn = self._db_conn
- (ok, result) = \
- db_conn.dbe_list('service-appliance-set', parent_uuids, back_ref_uuids, obj_uuids, count, filters)
- if not ok:
- self.config_object_error(None, None, 'service_appliance_sets', 'http_get_collection', result)
- abort(404, result)
-
- # If only counting, return early
- if count:
- return {'service-appliance-sets': {'count': result}}
-
- if 'detail' in request.query:
- detail = 'true' in request.query.detail.lower()
- else:
- detail = False
-
- fq_names_uuids = result
- obj_dicts = []
- if not detail:
- if not self.is_admin_request():
- obj_ids_list = [{'uuid': obj_uuid} for _, obj_uuid in fq_names_uuids]
- obj_fields = [u'id_perms']
- (ok, result) = db_conn.dbe_read_multi('service-appliance-set', obj_ids_list, obj_fields)
- if not ok:
- result = []
- for obj_result in result:
- # give chance for zk heartbeat/ping
- gevent.sleep(0)
- if obj_result['id_perms'].get('user_visible', True):
- obj_dict = {}
- obj_dict['uuid'] = obj_result['uuid']
- obj_dict['href'] = self.generate_url('service-appliance-set', obj_result['uuid'])
- obj_dict['fq_name'] = obj_result['fq_name']
- obj_dicts.append(obj_dict)
- else:
- for fq_name, obj_uuid in fq_names_uuids:
- # give chance for zk heartbeat/ping
- gevent.sleep(0)
- obj_dict = {}
- obj_dict['uuid'] = obj_uuid
- obj_dict['href'] = self.generate_url('service-appliance-set', obj_uuid)
- obj_dict['fq_name'] = fq_name
- obj_dicts.append(obj_dict)
- else: #detail
- obj_ids_list = [{'uuid': obj_uuid} for _, obj_uuid in fq_names_uuids]
- obj_fields = [u'service_appliance_set_properties', u'service_appliance_driver', u'service_appliance_ha_mode', u'id_perms', u'display_name'] + []
- if 'fields' in request.query:
- obj_fields.extend(request.query.fields.split(','))
- (ok, result) = db_conn.dbe_read_multi('service-appliance-set', obj_ids_list, obj_fields)
-
- if not ok:
- result = []
-
- for obj_result in result:
- # give chance for zk heartbeat/ping
- gevent.sleep(0)
- obj_dict = {}
- obj_dict['name'] = obj_result['fq_name'][-1]
- obj_dict['href'] = self.generate_url('service-appliance-set', obj_result['uuid'])
- obj_dict.update(obj_result)
- if (obj_dict['id_perms'].get('user_visible', True) or
- self.is_admin_request()):
- obj_dicts.append({'service-appliance-set': obj_dict})
-
- return {'service-appliance-sets': obj_dicts}
- #end service_appliance_sets_http_get
-
- def _service_appliance_set_create_default_children(self, parent_obj):
- # Create a default child only if provisioned for
- r_class = self.get_resource_class('service-appliance')
- if r_class and r_class.generate_default_instance:
- child_obj = ServiceAppliance(parent_obj = parent_obj)
- child_dict = child_obj.__dict__
- fq_name = child_dict['fq_name']
- child_dict['id_perms'] = self._get_default_id_perms('service-appliance')
-
- db_conn = self._db_conn
- (ok, result) = db_conn.dbe_alloc('service-appliance', child_dict)
- if not ok:
- return (ok, result)
-
- obj_ids = result
- db_conn.dbe_create('service-appliance', obj_ids, child_dict)
- self._service_appliance_create_default_children(child_obj)
-
- pass
- #end _service_appliance_set_create_default_children
-
- def _service_appliance_set_delete_default_children(self, parent_dict):
- # Delete a default child only if provisioned for
- r_class = self.get_resource_class('service-appliance')
- if r_class and r_class.generate_default_instance:
- # first locate default child then delete it
- has_infos = parent_dict.get('service_appliances')
- if has_infos:
- for has_info in has_infos:
- if has_info['to'][-1] == 'default-service-appliance':
- default_child_id = has_info['href'].split('/')[-1]
- self.service_appliance_http_delete(default_child_id)
- break
-
- pass
- #end _service_appliance_set_delete_default_children
-
- def config_node_http_get(self, id):
- try:
- self._extension_mgrs['resourceApi'].map_method('pre_config_node_read', id)
- except Exception as e:
- pass
-
- # TODO get vals from request out of the global ASAP
- etag = request.headers.get('If-None-Match')
- try:
- obj_type = self._db_conn.uuid_to_obj_type(id)
- except NoIdError:
- obj_type = None
- if obj_type != 'config_node':
- abort(404, 'No config-node object found for id %s' %(id))
- # common handling for all resource get
- (ok, result) = self._get_common(request, id)
- if not ok:
- (code, msg) = result
- self.config_object_error(id, None, 'config_node', 'http_get', msg)
- abort(code, msg)
-
- # type-specific hook
- r_class = self.get_resource_class('config-node')
- if r_class:
- r_class.http_get(id)
-
- db_conn = self._db_conn
- if etag:
- obj_ids = {'uuid': id}
- (ok, result) = db_conn.dbe_is_latest(obj_ids, etag.replace('"', ''))
- if not ok:
- # Not present in DB
- self.config_object_error(id, None, 'config_node', 'http_get', result)
- abort(404, result)
-
- is_latest = result
- if is_latest:
- # send Not-Modified, caches use this for read optimization
- response.status = 304
- return
- #end if etag
-
- obj_ids = {'uuid': id}
-
- # Generate field list for db layer
- properties = [u'config_node_ip_address', u'id_perms', u'display_name']
- references = []
- back_references = [u'global_system_config_back_refs']
- children = []
- if 'fields' in request.query:
- obj_fields = request.query.fields.split(',')
- else: # default props + children + refs + backrefs
- obj_fields = properties + references
- if 'exclude_back_refs' not in request.query:
- obj_fields = obj_fields + back_references
- if 'exclude_children' not in request.query:
- obj_fields = obj_fields + children
-
- (ok, result) = db_conn.dbe_read('config-node', obj_ids, obj_fields)
- if not ok:
- # Not present in DB
- self.config_object_error(id, None, 'config_node', 'http_get', result)
- abort(404, result)
-
- # check visibility
- if (not result['id_perms'].get('user_visible', True) and
- not self.is_admin_request()):
- result = 'This object is not visible by users: %s' % id
- self.config_object_error(id, None, 'config_node', 'http_get', result)
- abort(404, result)
-
- rsp_body = {}
- rsp_body['uuid'] = id
- rsp_body['href'] = self.generate_url('config-node', id)
- rsp_body['name'] = result['fq_name'][-1]
- rsp_body.update(result)
- id_perms = result['id_perms']
- response.set_header('ETag', '"' + id_perms['last_modified'] + '"')
- try:
- self._extension_mgrs['resourceApi'].map_method('post_config_node_read', id, rsp_body)
- except Exception as e:
- pass
-
- return {'config-node': rsp_body}
- #end config_node_http_get
-
- def config_node_http_put(self, id):
- key = 'config-node'
- obj_dict = request.json[key]
-
- try:
- self._extension_mgrs['resourceApi'].map_method('pre_config_node_update', id, obj_dict)
- except Exception as e:
- pass
-
- db_conn = self._db_conn
- try:
- obj_type = db_conn.uuid_to_obj_type(id)
- if obj_type != 'config_node':
- abort(404, 'No config-node object found for id %s' %(id))
- fq_name = db_conn.uuid_to_fq_name(id)
- except NoIdError as e:
- abort(404, str(e))
- prop_dict = obj_dict.get('id_perms')
- if prop_dict:
- buf = cStringIO.StringIO()
- xx_id_perms = IdPermsType(**prop_dict)
- xx_id_perms.export(buf)
- node = etree.fromstring(buf.getvalue())
- xx_id_perms = IdPermsType()
- try:
- xx_id_perms.build(node)
- except Exception as e:
- abort(400, str(e))
- # common handling for all resource put
- (ok, result) = self._put_common(request, 'config_node', id, fq_name, obj_dict)
- if not ok:
- (code, msg) = result
- self.config_object_error(id, None, 'config_node', 'http_put', msg)
- abort(code, msg)
-
-
- # State modification starts from here. Ensure that cleanup is done for all state changes
- cleanup_on_failure = []
- # type-specific hook
- r_class = self.get_resource_class('config-node')
- if r_class:
- (ok, put_result) = r_class.http_put(id, fq_name, obj_dict, self._db_conn)
- if not ok:
- (code, msg) = put_result
- self.config_object_error(id, None, 'config_node', 'http_put', msg)
- abort(code, msg)
- callable = getattr(r_class, 'http_put_fail', None)
- if callable:
- cleanup_on_failure.append((callable, [id, fq_name, obj_dict, self._db_conn]))
-
- obj_ids = {'uuid': id}
- try:
- (ok, result) = db_conn.dbe_update('config-node', obj_ids, obj_dict)
- except Exception as e:
- ok = False
- result = str(e)
- if not ok:
- for fail_cleanup_callable, cleanup_args in cleanup_on_failure:
- fail_cleanup_callable(*cleanup_args)
- self.config_object_error(id, None, 'config_node', 'http_put', result)
- abort(404, result)
-
- rsp_body = {}
- rsp_body['uuid'] = id
- rsp_body['href'] = self.generate_url('config-node', id)
-
- try:
- self._extension_mgrs['resourceApi'].map_method('post_config_node_update', id, obj_dict)
- except Exception as e:
- pass
-
- return {'config-node': rsp_body}
- #end config_node_http_put
-
- def config_node_http_delete(self, id):
- db_conn = self._db_conn
- # if obj doesn't exist return early
- try:
- obj_type = db_conn.uuid_to_obj_type(id)
- if obj_type != 'config_node':
- abort(404, 'No config-node object found for id %s' %(id))
- _ = db_conn.uuid_to_fq_name(id)
- except NoIdError:
- abort(404, 'ID %s does not exist' %(id))
-
- try:
- self._extension_mgrs['resourceApi'].map_method('pre_config_node_delete', id)
- except Exception as e:
- pass
-
- # read in obj from db (accepting error) to get details of it
- obj_ids = {'uuid': id}
- back_references = [u'global_system_config_back_refs']
- children = []
- obj_fields = children + back_references
- (read_ok, read_result) = db_conn.dbe_read('config-node', obj_ids, obj_fields)
- if not read_ok:
- if read_result.startswith('Unknown id:'):
- abort(404, 'ID %s does not exist' %(id))
- else:
- self.config_object_error(id, None, 'config_node', 'http_delete', read_result)
- # proceed down to delete the resource
-
- # common handling for all resource delete
- parent_type = read_result.get('parent_type', None)
- (ok, del_result) = self._delete_common(request, 'config_node', id, parent_type)
- if not ok:
- (code, msg) = del_result
- self.config_object_error(id, None, 'config_node', 'http_delete', msg)
- abort(code, msg)
-
- fq_name = read_result['fq_name']
- ifmap_id = cfgm_common.imid.get_ifmap_id_from_fq_name('config-node', fq_name)
- obj_ids['imid'] = ifmap_id
- if parent_type:
- parent_imid = cfgm_common.imid.get_ifmap_id_from_fq_name(parent_type, fq_name[:-1])
- obj_ids['parent_imid'] = parent_imid
-
- # State modification starts from here. Ensure that cleanup is done for all state changes
- cleanup_on_failure = []
-
- # type-specific hook
- r_class = self.get_resource_class('config-node')
- if r_class:
- if read_ok:
- # fail if non-default children or backrefs exist
-
- # Delete default children first
- self._config_node_delete_default_children(read_result)
-
- (ok, del_result) = r_class.http_delete(id, read_result, db_conn)
- if not ok:
- (code, msg) = del_result
- self.config_object_error(id, None, 'config_node', 'http_delete', msg)
- abort(code, msg)
- callable = getattr(r_class, 'http_delete_fail', None)
- if callable:
- cleanup_on_failure.append((callable, [id, read_result, db_conn]))
- #end if read_ok
-
- try:
- (ok, del_result) = db_conn.dbe_delete('config-node', obj_ids, read_result)
- except Exception as e:
- ok = False
- del_result = str(e)
- if not ok:
- for fail_cleanup_callable, cleanup_args in cleanup_on_failure:
- fail_cleanup_callable(*cleanup_args)
- self.config_object_error(id, None, 'config_node', 'http_delete', del_result)
- abort(409, del_result)
-
- try:
- self._extension_mgrs['resourceApi'].map_method('post_config_node_delete', id, read_result)
- except Exception as e:
- pass
-
- #end config_node_http_delete
-
- def config_nodes_http_post(self):
- key = 'config-node'
- obj_dict = request.json[key]
- self._post_validate(key, obj_dict=obj_dict)
- fq_name = obj_dict['fq_name']
-
- try:
- self._extension_mgrs['resourceApi'].map_method('pre_config_node_create', obj_dict)
- except Exception as e:
- pass
-
- prop_dict = obj_dict.get('id_perms')
- if prop_dict:
- buf = cStringIO.StringIO()
- xx_id_perms = IdPermsType(**prop_dict)
- xx_id_perms.export(buf)
- node = etree.fromstring(buf.getvalue())
- xx_id_perms = IdPermsType()
- try:
- xx_id_perms.build(node)
- except Exception as e:
- abort(400, str(e))
- # common handling for all resource create
- (ok, result) = self._post_common(request, 'config-node', obj_dict)
- if not ok:
- (code, msg) = result
- fq_name_str = ':'.join(obj_dict.get('fq_name', []))
- self.config_object_error(None, fq_name_str, 'config_node', 'http_post', msg)
- abort(code, msg)
-
- name = obj_dict['fq_name'][-1]
- fq_name = obj_dict['fq_name']
-
- db_conn = self._db_conn
-
- # if client gave parent_type of config-root, ignore and remove
- if 'parent_type' in obj_dict and obj_dict['parent_type'] == 'config-root':
- del obj_dict['parent_type']
-
- if 'parent_type' in obj_dict:
- # non config-root child, verify parent exists
- parent_type = obj_dict['parent_type']
- parent_fq_name = obj_dict['fq_name'][:-1]
- try:
- parent_uuid = self._db_conn.fq_name_to_uuid(parent_type, parent_fq_name)
- (ok, status) = self._permissions.check_perms_write(request, parent_uuid)
- if not ok:
- (code, err_msg) = status
- abort(code, err_msg)
- self._permissions.set_user_role(request, obj_dict)
- except NoIdError:
- err_msg = 'Parent ' + pformat(parent_fq_name) + ' type ' + parent_type + ' does not exist'
- fq_name_str = ':'.join(parent_fq_name)
- self.config_object_error(None, fq_name_str, 'config_node', 'http_post', err_msg)
- abort(400, err_msg)
-
-
- # State modification starts from here. Ensure that cleanup is done for all state changes
- cleanup_on_failure = []
- # Alloc and Store id-mappings before creating entry on pubsub store.
- # Else a subscriber can ask for an id mapping before we have stored it
- uuid_requested = result
- (ok, result) = db_conn.dbe_alloc('config-node', obj_dict, uuid_requested)
- if not ok:
- (code, msg) = result
- fq_name_str = ':'.join(obj_dict['fq_name'])
- self.config_object_error(None, fq_name_str, 'config_node', 'http_post', result)
- abort(code, msg)
- cleanup_on_failure.append((db_conn.dbe_release, ['config_node', fq_name]))
-
- obj_ids = result
-
- env = request.headers.environ
- tenant_name = env.get(hdr_server_tenant(), 'default-project')
-
-
- # type-specific hook
- r_class = self.get_resource_class('config-node')
- if r_class:
- try:
- (ok, result) = r_class.http_post_collection(tenant_name, obj_dict, db_conn)
- except Exception as e:
- ok = False
- result = (500, str(e))
- if not ok:
- for fail_cleanup_callable, cleanup_args in cleanup_on_failure:
- fail_cleanup_callable(*cleanup_args)
- (code, msg) = result
- fq_name_str = ':'.join(fq_name)
- self.config_object_error(None, fq_name_str, 'config_node', 'http_post', msg)
- abort(code, msg)
- callable = getattr(r_class, 'http_post_collection_fail', None)
- if callable:
- cleanup_on_failure.append((callable, [tenant_name, obj_dict, db_conn]))
-
- try:
- (ok, result) = \
- db_conn.dbe_create('config-node', obj_ids, obj_dict)
- except Exception as e:
- ok = False
- result = str(e)
-
- if not ok:
- for fail_cleanup_callable, cleanup_args in cleanup_on_failure:
- fail_cleanup_callable(*cleanup_args)
- fq_name_str = ':'.join(fq_name)
- self.config_object_error(None, fq_name_str, 'config_node', 'http_post', result)
- abort(404, result)
-
- rsp_body = {}
- rsp_body['name'] = name
- rsp_body['fq_name'] = fq_name
- rsp_body['uuid'] = obj_ids['uuid']
- rsp_body['href'] = self.generate_url('config-node', obj_ids['uuid'])
- if 'parent_type' in obj_dict:
- # non config-root child, send back parent uuid/href
- rsp_body['parent_uuid'] = parent_uuid
- rsp_body['parent_href'] = self.generate_url(parent_type, parent_uuid)
-
- try:
- self._extension_mgrs['resourceApi'].map_method('post_config_node_create', obj_dict)
- except Exception as e:
- pass
-
- return {'config-node': rsp_body}
- #end config_nodes_http_post
-
- def config_nodes_http_get(self):
- # gather list of uuids using 1. any specified anchors
- # 2. any specified filters
- # if not 'detail' return list with any specified 'fields'
- # if 'detail' return list with props+refs + any specified 'fields'
-
- env = request.headers.environ
- tenant_name = env.get(hdr_server_tenant(), 'default-project')
- parent_uuids = None
- back_ref_uuids = None
- obj_uuids = None
- if (('parent_fq_name_str' in request.query) and
- ('parent_type' in request.query)):
- parent_fq_name = request.query.parent_fq_name_str.split(':')
- parent_type = request.query.parent_type
- parent_uuids = [self._db_conn.fq_name_to_uuid(parent_type, parent_fq_name)]
- elif 'parent_id' in request.query:
- parent_ids = request.query.parent_id.split(',')
- parent_uuids = [str(uuid.UUID(p_uuid)) for p_uuid in parent_ids]
- if 'back_ref_id' in request.query:
- back_ref_ids = request.query.back_ref_id.split(',')
- back_ref_uuids = [str(uuid.UUID(b_uuid)) for b_uuid in back_ref_ids]
- if 'obj_uuids' in request.query:
- obj_uuids = request.query.obj_uuids.split(',')
-
- # common handling for all resource get
- (ok, result) = self._get_common(request, parent_uuids)
- if not ok:
- (code, msg) = result
- self.config_object_error(None, None, 'config_nodes', 'http_get_collection', msg)
- abort(code, msg)
-
- if 'count' in request.query:
- count = 'true' in request.query.count.lower()
- else:
- count = False
-
- filter_params = request.query.filters
- if filter_params:
- try:
- ff_key_vals = filter_params.split(',')
- ff_names = [ff.split('==')[0] for ff in ff_key_vals]
- ff_values = [ff.split('==')[1] for ff in ff_key_vals]
- filters = {'field_names': ff_names, 'field_values': ff_values}
- except Exception as e:
- abort(400, 'Invalid filter ' + filter_params)
- else:
- filters = None
- db_conn = self._db_conn
- (ok, result) = \
- db_conn.dbe_list('config-node', parent_uuids, back_ref_uuids, obj_uuids, count, filters)
- if not ok:
- self.config_object_error(None, None, 'config_nodes', 'http_get_collection', result)
- abort(404, result)
-
- # If only counting, return early
- if count:
- return {'config-nodes': {'count': result}}
-
- if 'detail' in request.query:
- detail = 'true' in request.query.detail.lower()
- else:
- detail = False
-
- fq_names_uuids = result
- obj_dicts = []
- if not detail:
- if not self.is_admin_request():
- obj_ids_list = [{'uuid': obj_uuid} for _, obj_uuid in fq_names_uuids]
- obj_fields = [u'id_perms']
- (ok, result) = db_conn.dbe_read_multi('config-node', obj_ids_list, obj_fields)
- if not ok:
- result = []
- for obj_result in result:
- # give chance for zk heartbeat/ping
- gevent.sleep(0)
- if obj_result['id_perms'].get('user_visible', True):
- obj_dict = {}
- obj_dict['uuid'] = obj_result['uuid']
- obj_dict['href'] = self.generate_url('config-node', obj_result['uuid'])
- obj_dict['fq_name'] = obj_result['fq_name']
- obj_dicts.append(obj_dict)
- else:
- for fq_name, obj_uuid in fq_names_uuids:
- # give chance for zk heartbeat/ping
- gevent.sleep(0)
- obj_dict = {}
- obj_dict['uuid'] = obj_uuid
- obj_dict['href'] = self.generate_url('config-node', obj_uuid)
- obj_dict['fq_name'] = fq_name
- obj_dicts.append(obj_dict)
- else: #detail
- obj_ids_list = [{'uuid': obj_uuid} for _, obj_uuid in fq_names_uuids]
- obj_fields = [u'config_node_ip_address', u'id_perms', u'display_name'] + []
- if 'fields' in request.query:
- obj_fields.extend(request.query.fields.split(','))
- (ok, result) = db_conn.dbe_read_multi('config-node', obj_ids_list, obj_fields)
-
- if not ok:
- result = []
-
- for obj_result in result:
- # give chance for zk heartbeat/ping
- gevent.sleep(0)
- obj_dict = {}
- obj_dict['name'] = obj_result['fq_name'][-1]
- obj_dict['href'] = self.generate_url('config-node', obj_result['uuid'])
- obj_dict.update(obj_result)
- if (obj_dict['id_perms'].get('user_visible', True) or
- self.is_admin_request()):
- obj_dicts.append({'config-node': obj_dict})
-
- return {'config-nodes': obj_dicts}
- #end config_nodes_http_get
-
- def _config_node_create_default_children(self, parent_obj):
- pass
- #end _config_node_create_default_children
-
- def _config_node_delete_default_children(self, parent_dict):
- pass
- #end _config_node_delete_default_children
-
- def qos_queue_http_get(self, id):
- try:
- self._extension_mgrs['resourceApi'].map_method('pre_qos_queue_read', id)
- except Exception as e:
- pass
-
- # TODO get vals from request out of the global ASAP
- etag = request.headers.get('If-None-Match')
- try:
- obj_type = self._db_conn.uuid_to_obj_type(id)
- except NoIdError:
- obj_type = None
- if obj_type != 'qos_queue':
- abort(404, 'No qos-queue object found for id %s' %(id))
- # common handling for all resource get
- (ok, result) = self._get_common(request, id)
- if not ok:
- (code, msg) = result
- self.config_object_error(id, None, 'qos_queue', 'http_get', msg)
- abort(code, msg)
-
- # type-specific hook
- r_class = self.get_resource_class('qos-queue')
- if r_class:
- r_class.http_get(id)
-
- db_conn = self._db_conn
- if etag:
- obj_ids = {'uuid': id}
- (ok, result) = db_conn.dbe_is_latest(obj_ids, etag.replace('"', ''))
- if not ok:
- # Not present in DB
- self.config_object_error(id, None, 'qos_queue', 'http_get', result)
- abort(404, result)
-
- is_latest = result
- if is_latest:
- # send Not-Modified, caches use this for read optimization
- response.status = 304
- return
- #end if etag
-
- obj_ids = {'uuid': id}
-
- # Generate field list for db layer
- properties = [u'min_bandwidth', u'max_bandwidth', u'id_perms', u'display_name']
- references = []
- back_references = [u'project_back_refs', u'qos_forwarding_class_back_refs']
- children = []
- if 'fields' in request.query:
- obj_fields = request.query.fields.split(',')
- else: # default props + children + refs + backrefs
- obj_fields = properties + references
- if 'exclude_back_refs' not in request.query:
- obj_fields = obj_fields + back_references
- if 'exclude_children' not in request.query:
- obj_fields = obj_fields + children
-
- (ok, result) = db_conn.dbe_read('qos-queue', obj_ids, obj_fields)
- if not ok:
- # Not present in DB
- self.config_object_error(id, None, 'qos_queue', 'http_get', result)
- abort(404, result)
-
- # check visibility
- if (not result['id_perms'].get('user_visible', True) and
- not self.is_admin_request()):
- result = 'This object is not visible by users: %s' % id
- self.config_object_error(id, None, 'qos_queue', 'http_get', result)
- abort(404, result)
-
- rsp_body = {}
- rsp_body['uuid'] = id
- rsp_body['href'] = self.generate_url('qos-queue', id)
- rsp_body['name'] = result['fq_name'][-1]
- rsp_body.update(result)
- id_perms = result['id_perms']
- response.set_header('ETag', '"' + id_perms['last_modified'] + '"')
- try:
- self._extension_mgrs['resourceApi'].map_method('post_qos_queue_read', id, rsp_body)
- except Exception as e:
- pass
-
- return {'qos-queue': rsp_body}
- #end qos_queue_http_get
-
- def qos_queue_http_put(self, id):
- key = 'qos-queue'
- obj_dict = request.json[key]
-
- try:
- self._extension_mgrs['resourceApi'].map_method('pre_qos_queue_update', id, obj_dict)
- except Exception as e:
- pass
-
- db_conn = self._db_conn
- try:
- obj_type = db_conn.uuid_to_obj_type(id)
- if obj_type != 'qos_queue':
- abort(404, 'No qos-queue object found for id %s' %(id))
- fq_name = db_conn.uuid_to_fq_name(id)
- except NoIdError as e:
- abort(404, str(e))
- prop_dict = obj_dict.get('id_perms')
- if prop_dict:
- buf = cStringIO.StringIO()
- xx_id_perms = IdPermsType(**prop_dict)
- xx_id_perms.export(buf)
- node = etree.fromstring(buf.getvalue())
- xx_id_perms = IdPermsType()
- try:
- xx_id_perms.build(node)
- except Exception as e:
- abort(400, str(e))
- # common handling for all resource put
- (ok, result) = self._put_common(request, 'qos_queue', id, fq_name, obj_dict)
- if not ok:
- (code, msg) = result
- self.config_object_error(id, None, 'qos_queue', 'http_put', msg)
- abort(code, msg)
-
-
- # State modification starts from here. Ensure that cleanup is done for all state changes
- cleanup_on_failure = []
- # type-specific hook
- r_class = self.get_resource_class('qos-queue')
- if r_class:
- (ok, put_result) = r_class.http_put(id, fq_name, obj_dict, self._db_conn)
- if not ok:
- (code, msg) = put_result
- self.config_object_error(id, None, 'qos_queue', 'http_put', msg)
- abort(code, msg)
- callable = getattr(r_class, 'http_put_fail', None)
- if callable:
- cleanup_on_failure.append((callable, [id, fq_name, obj_dict, self._db_conn]))
-
- obj_ids = {'uuid': id}
- try:
- (ok, result) = db_conn.dbe_update('qos-queue', obj_ids, obj_dict)
- except Exception as e:
- ok = False
- result = str(e)
- if not ok:
- for fail_cleanup_callable, cleanup_args in cleanup_on_failure:
- fail_cleanup_callable(*cleanup_args)
- self.config_object_error(id, None, 'qos_queue', 'http_put', result)
- abort(404, result)
-
- rsp_body = {}
- rsp_body['uuid'] = id
- rsp_body['href'] = self.generate_url('qos-queue', id)
-
- try:
- self._extension_mgrs['resourceApi'].map_method('post_qos_queue_update', id, obj_dict)
- except Exception as e:
- pass
-
- return {'qos-queue': rsp_body}
- #end qos_queue_http_put
-
- def qos_queue_http_delete(self, id):
- db_conn = self._db_conn
- # if obj doesn't exist return early
- try:
- obj_type = db_conn.uuid_to_obj_type(id)
- if obj_type != 'qos_queue':
- abort(404, 'No qos-queue object found for id %s' %(id))
- _ = db_conn.uuid_to_fq_name(id)
- except NoIdError:
- abort(404, 'ID %s does not exist' %(id))
-
- try:
- self._extension_mgrs['resourceApi'].map_method('pre_qos_queue_delete', id)
- except Exception as e:
- pass
-
- # read in obj from db (accepting error) to get details of it
- obj_ids = {'uuid': id}
- back_references = [u'project_back_refs', u'qos_forwarding_class_back_refs']
- children = []
- obj_fields = children + back_references
- (read_ok, read_result) = db_conn.dbe_read('qos-queue', obj_ids, obj_fields)
- if not read_ok:
- if read_result.startswith('Unknown id:'):
- abort(404, 'ID %s does not exist' %(id))
- else:
- self.config_object_error(id, None, 'qos_queue', 'http_delete', read_result)
- # proceed down to delete the resource
-
- # common handling for all resource delete
- parent_type = read_result.get('parent_type', None)
- (ok, del_result) = self._delete_common(request, 'qos_queue', id, parent_type)
- if not ok:
- (code, msg) = del_result
- self.config_object_error(id, None, 'qos_queue', 'http_delete', msg)
- abort(code, msg)
-
- fq_name = read_result['fq_name']
- ifmap_id = cfgm_common.imid.get_ifmap_id_from_fq_name('qos-queue', fq_name)
- obj_ids['imid'] = ifmap_id
- if parent_type:
- parent_imid = cfgm_common.imid.get_ifmap_id_from_fq_name(parent_type, fq_name[:-1])
- obj_ids['parent_imid'] = parent_imid
-
- # State modification starts from here. Ensure that cleanup is done for all state changes
- cleanup_on_failure = []
-
- # type-specific hook
- r_class = self.get_resource_class('qos-queue')
- if r_class:
- if read_ok:
- # fail if non-default children or backrefs exist
- qos_forwarding_class_back_refs = read_result.get('qos_forwarding_class_back_refs', None)
- if qos_forwarding_class_back_refs:
- back_ref_urls = [back_ref_info['href'] for back_ref_info in read_result['qos_forwarding_class_back_refs']]
- back_ref_str = ', '.join(back_ref_urls)
- err_msg = 'Back-References from ' + back_ref_str + ' still exist'
- self.config_object_error(id, None, 'qos_queue', 'http_delete', err_msg)
- abort(409, err_msg)
-
-
- # Delete default children first
- self._qos_queue_delete_default_children(read_result)
-
- (ok, del_result) = r_class.http_delete(id, read_result, db_conn)
- if not ok:
- (code, msg) = del_result
- self.config_object_error(id, None, 'qos_queue', 'http_delete', msg)
- abort(code, msg)
- callable = getattr(r_class, 'http_delete_fail', None)
- if callable:
- cleanup_on_failure.append((callable, [id, read_result, db_conn]))
- #end if read_ok
-
- try:
- (ok, del_result) = db_conn.dbe_delete('qos-queue', obj_ids, read_result)
- except Exception as e:
- ok = False
- del_result = str(e)
- if not ok:
- for fail_cleanup_callable, cleanup_args in cleanup_on_failure:
- fail_cleanup_callable(*cleanup_args)
- self.config_object_error(id, None, 'qos_queue', 'http_delete', del_result)
- abort(409, del_result)
-
- try:
- self._extension_mgrs['resourceApi'].map_method('post_qos_queue_delete', id, read_result)
- except Exception as e:
- pass
-
- #end qos_queue_http_delete
-
- def qos_queues_http_post(self):
- key = 'qos-queue'
- obj_dict = request.json[key]
- self._post_validate(key, obj_dict=obj_dict)
- fq_name = obj_dict['fq_name']
-
- try:
- self._extension_mgrs['resourceApi'].map_method('pre_qos_queue_create', obj_dict)
- except Exception as e:
- pass
-
- prop_dict = obj_dict.get('id_perms')
- if prop_dict:
- buf = cStringIO.StringIO()
- xx_id_perms = IdPermsType(**prop_dict)
- xx_id_perms.export(buf)
- node = etree.fromstring(buf.getvalue())
- xx_id_perms = IdPermsType()
- try:
- xx_id_perms.build(node)
- except Exception as e:
- abort(400, str(e))
- # common handling for all resource create
- (ok, result) = self._post_common(request, 'qos-queue', obj_dict)
- if not ok:
- (code, msg) = result
- fq_name_str = ':'.join(obj_dict.get('fq_name', []))
- self.config_object_error(None, fq_name_str, 'qos_queue', 'http_post', msg)
- abort(code, msg)
-
- name = obj_dict['fq_name'][-1]
- fq_name = obj_dict['fq_name']
-
- db_conn = self._db_conn
-
- # if client gave parent_type of config-root, ignore and remove
- if 'parent_type' in obj_dict and obj_dict['parent_type'] == 'config-root':
- del obj_dict['parent_type']
-
- if 'parent_type' in obj_dict:
- # non config-root child, verify parent exists
- parent_type = obj_dict['parent_type']
- parent_fq_name = obj_dict['fq_name'][:-1]
- try:
- parent_uuid = self._db_conn.fq_name_to_uuid(parent_type, parent_fq_name)
- (ok, status) = self._permissions.check_perms_write(request, parent_uuid)
- if not ok:
- (code, err_msg) = status
- abort(code, err_msg)
- self._permissions.set_user_role(request, obj_dict)
- except NoIdError:
- err_msg = 'Parent ' + pformat(parent_fq_name) + ' type ' + parent_type + ' does not exist'
- fq_name_str = ':'.join(parent_fq_name)
- self.config_object_error(None, fq_name_str, 'qos_queue', 'http_post', err_msg)
- abort(400, err_msg)
-
-
- # State modification starts from here. Ensure that cleanup is done for all state changes
- cleanup_on_failure = []
- # Alloc and Store id-mappings before creating entry on pubsub store.
- # Else a subscriber can ask for an id mapping before we have stored it
- uuid_requested = result
- (ok, result) = db_conn.dbe_alloc('qos-queue', obj_dict, uuid_requested)
- if not ok:
- (code, msg) = result
- fq_name_str = ':'.join(obj_dict['fq_name'])
- self.config_object_error(None, fq_name_str, 'qos_queue', 'http_post', result)
- abort(code, msg)
- cleanup_on_failure.append((db_conn.dbe_release, ['qos_queue', fq_name]))
-
- obj_ids = result
-
- env = request.headers.environ
- tenant_name = env.get(hdr_server_tenant(), 'default-project')
-
-
- # type-specific hook
- r_class = self.get_resource_class('qos-queue')
- if r_class:
- try:
- (ok, result) = r_class.http_post_collection(tenant_name, obj_dict, db_conn)
- except Exception as e:
- ok = False
- result = (500, str(e))
- if not ok:
- for fail_cleanup_callable, cleanup_args in cleanup_on_failure:
- fail_cleanup_callable(*cleanup_args)
- (code, msg) = result
- fq_name_str = ':'.join(fq_name)
- self.config_object_error(None, fq_name_str, 'qos_queue', 'http_post', msg)
- abort(code, msg)
- callable = getattr(r_class, 'http_post_collection_fail', None)
- if callable:
- cleanup_on_failure.append((callable, [tenant_name, obj_dict, db_conn]))
-
- try:
- (ok, result) = \
- db_conn.dbe_create('qos-queue', obj_ids, obj_dict)
- except Exception as e:
- ok = False
- result = str(e)
-
- if not ok:
- for fail_cleanup_callable, cleanup_args in cleanup_on_failure:
- fail_cleanup_callable(*cleanup_args)
- fq_name_str = ':'.join(fq_name)
- self.config_object_error(None, fq_name_str, 'qos_queue', 'http_post', result)
- abort(404, result)
-
- rsp_body = {}
- rsp_body['name'] = name
- rsp_body['fq_name'] = fq_name
- rsp_body['uuid'] = obj_ids['uuid']
- rsp_body['href'] = self.generate_url('qos-queue', obj_ids['uuid'])
- if 'parent_type' in obj_dict:
- # non config-root child, send back parent uuid/href
- rsp_body['parent_uuid'] = parent_uuid
- rsp_body['parent_href'] = self.generate_url(parent_type, parent_uuid)
-
- try:
- self._extension_mgrs['resourceApi'].map_method('post_qos_queue_create', obj_dict)
- except Exception as e:
- pass
-
- return {'qos-queue': rsp_body}
- #end qos_queues_http_post
-
- def qos_queues_http_get(self):
- # gather list of uuids using 1. any specified anchors
- # 2. any specified filters
- # if not 'detail' return list with any specified 'fields'
- # if 'detail' return list with props+refs + any specified 'fields'
-
- env = request.headers.environ
- tenant_name = env.get(hdr_server_tenant(), 'default-project')
- parent_uuids = None
- back_ref_uuids = None
- obj_uuids = None
- if (('parent_fq_name_str' in request.query) and
- ('parent_type' in request.query)):
- parent_fq_name = request.query.parent_fq_name_str.split(':')
- parent_type = request.query.parent_type
- parent_uuids = [self._db_conn.fq_name_to_uuid(parent_type, parent_fq_name)]
- elif 'parent_id' in request.query:
- parent_ids = request.query.parent_id.split(',')
- parent_uuids = [str(uuid.UUID(p_uuid)) for p_uuid in parent_ids]
- if 'back_ref_id' in request.query:
- back_ref_ids = request.query.back_ref_id.split(',')
- back_ref_uuids = [str(uuid.UUID(b_uuid)) for b_uuid in back_ref_ids]
- if 'obj_uuids' in request.query:
- obj_uuids = request.query.obj_uuids.split(',')
-
- # common handling for all resource get
- (ok, result) = self._get_common(request, parent_uuids)
- if not ok:
- (code, msg) = result
- self.config_object_error(None, None, 'qos_queues', 'http_get_collection', msg)
- abort(code, msg)
-
- if 'count' in request.query:
- count = 'true' in request.query.count.lower()
- else:
- count = False
-
- filter_params = request.query.filters
- if filter_params:
- try:
- ff_key_vals = filter_params.split(',')
- ff_names = [ff.split('==')[0] for ff in ff_key_vals]
- ff_values = [ff.split('==')[1] for ff in ff_key_vals]
- filters = {'field_names': ff_names, 'field_values': ff_values}
- except Exception as e:
- abort(400, 'Invalid filter ' + filter_params)
- else:
- filters = None
- db_conn = self._db_conn
- (ok, result) = \
- db_conn.dbe_list('qos-queue', parent_uuids, back_ref_uuids, obj_uuids, count, filters)
- if not ok:
- self.config_object_error(None, None, 'qos_queues', 'http_get_collection', result)
- abort(404, result)
-
- # If only counting, return early
- if count:
- return {'qos-queues': {'count': result}}
-
- if 'detail' in request.query:
- detail = 'true' in request.query.detail.lower()
- else:
- detail = False
-
- fq_names_uuids = result
- obj_dicts = []
- if not detail:
- if not self.is_admin_request():
- obj_ids_list = [{'uuid': obj_uuid} for _, obj_uuid in fq_names_uuids]
- obj_fields = [u'id_perms']
- (ok, result) = db_conn.dbe_read_multi('qos-queue', obj_ids_list, obj_fields)
- if not ok:
- result = []
- for obj_result in result:
- # give chance for zk heartbeat/ping
- gevent.sleep(0)
- if obj_result['id_perms'].get('user_visible', True):
- obj_dict = {}
- obj_dict['uuid'] = obj_result['uuid']
- obj_dict['href'] = self.generate_url('qos-queue', obj_result['uuid'])
- obj_dict['fq_name'] = obj_result['fq_name']
- obj_dicts.append(obj_dict)
- else:
- for fq_name, obj_uuid in fq_names_uuids:
- # give chance for zk heartbeat/ping
- gevent.sleep(0)
- obj_dict = {}
- obj_dict['uuid'] = obj_uuid
- obj_dict['href'] = self.generate_url('qos-queue', obj_uuid)
- obj_dict['fq_name'] = fq_name
- obj_dicts.append(obj_dict)
- else: #detail
- obj_ids_list = [{'uuid': obj_uuid} for _, obj_uuid in fq_names_uuids]
- obj_fields = [u'min_bandwidth', u'max_bandwidth', u'id_perms', u'display_name'] + []
- if 'fields' in request.query:
- obj_fields.extend(request.query.fields.split(','))
- (ok, result) = db_conn.dbe_read_multi('qos-queue', obj_ids_list, obj_fields)
-
- if not ok:
- result = []
-
- for obj_result in result:
- # give chance for zk heartbeat/ping
- gevent.sleep(0)
- obj_dict = {}
- obj_dict['name'] = obj_result['fq_name'][-1]
- obj_dict['href'] = self.generate_url('qos-queue', obj_result['uuid'])
- obj_dict.update(obj_result)
- if (obj_dict['id_perms'].get('user_visible', True) or
- self.is_admin_request()):
- obj_dicts.append({'qos-queue': obj_dict})
-
- return {'qos-queues': obj_dicts}
- #end qos_queues_http_get
-
- def _qos_queue_create_default_children(self, parent_obj):
- pass
- #end _qos_queue_create_default_children
-
- def _qos_queue_delete_default_children(self, parent_dict):
- pass
- #end _qos_queue_delete_default_children
-
- def virtual_machine_http_get(self, id):
- try:
- self._extension_mgrs['resourceApi'].map_method('pre_virtual_machine_read', id)
- except Exception as e:
- pass
-
- # TODO get vals from request out of the global ASAP
- etag = request.headers.get('If-None-Match')
- try:
- obj_type = self._db_conn.uuid_to_obj_type(id)
- except NoIdError:
- obj_type = None
- if obj_type != 'virtual_machine':
- abort(404, 'No virtual-machine object found for id %s' %(id))
- # common handling for all resource get
- (ok, result) = self._get_common(request, id)
- if not ok:
- (code, msg) = result
- self.config_object_error(id, None, 'virtual_machine', 'http_get', msg)
- abort(code, msg)
-
- # type-specific hook
- r_class = self.get_resource_class('virtual-machine')
- if r_class:
- r_class.http_get(id)
-
- db_conn = self._db_conn
- if etag:
- obj_ids = {'uuid': id}
- (ok, result) = db_conn.dbe_is_latest(obj_ids, etag.replace('"', ''))
- if not ok:
- # Not present in DB
- self.config_object_error(id, None, 'virtual_machine', 'http_get', result)
- abort(404, result)
-
- is_latest = result
- if is_latest:
- # send Not-Modified, caches use this for read optimization
- response.status = 304
- return
- #end if etag
-
- obj_ids = {'uuid': id}
-
- # Generate field list for db layer
- properties = [u'id_perms', u'display_name']
- references = [u'service_instance_refs']
- back_references = ['virtual_machine_interface_back_refs', 'virtual_router_back_refs']
- children = ['virtual_machine_interfaces']
- if 'fields' in request.query:
- obj_fields = request.query.fields.split(',')
- else: # default props + children + refs + backrefs
- obj_fields = properties + references
- if 'exclude_back_refs' not in request.query:
- obj_fields = obj_fields + back_references
- if 'exclude_children' not in request.query:
- obj_fields = obj_fields + children
-
- (ok, result) = db_conn.dbe_read('virtual-machine', obj_ids, obj_fields)
- if not ok:
- # Not present in DB
- self.config_object_error(id, None, 'virtual_machine', 'http_get', result)
- abort(404, result)
-
- # check visibility
- if (not result['id_perms'].get('user_visible', True) and
- not self.is_admin_request()):
- result = 'This object is not visible by users: %s' % id
- self.config_object_error(id, None, 'virtual_machine', 'http_get', result)
- abort(404, result)
-
- rsp_body = {}
- rsp_body['uuid'] = id
- rsp_body['href'] = self.generate_url('virtual-machine', id)
- rsp_body['name'] = result['fq_name'][-1]
- rsp_body.update(result)
- id_perms = result['id_perms']
- response.set_header('ETag', '"' + id_perms['last_modified'] + '"')
- try:
- self._extension_mgrs['resourceApi'].map_method('post_virtual_machine_read', id, rsp_body)
- except Exception as e:
- pass
-
- return {'virtual-machine': rsp_body}
- #end virtual_machine_http_get
-
- def virtual_machine_http_put(self, id):
- key = 'virtual-machine'
- obj_dict = request.json[key]
-
- try:
- self._extension_mgrs['resourceApi'].map_method('pre_virtual_machine_update', id, obj_dict)
- except Exception as e:
- pass
-
- db_conn = self._db_conn
- try:
- obj_type = db_conn.uuid_to_obj_type(id)
- if obj_type != 'virtual_machine':
- abort(404, 'No virtual-machine object found for id %s' %(id))
- fq_name = db_conn.uuid_to_fq_name(id)
- except NoIdError as e:
- abort(404, str(e))
- prop_dict = obj_dict.get('id_perms')
- if prop_dict:
- buf = cStringIO.StringIO()
- xx_id_perms = IdPermsType(**prop_dict)
- xx_id_perms.export(buf)
- node = etree.fromstring(buf.getvalue())
- xx_id_perms = IdPermsType()
- try:
- xx_id_perms.build(node)
- except Exception as e:
- abort(400, str(e))
- # common handling for all resource put
- (ok, result) = self._put_common(request, 'virtual_machine', id, fq_name, obj_dict)
- if not ok:
- (code, msg) = result
- self.config_object_error(id, None, 'virtual_machine', 'http_put', msg)
- abort(code, msg)
-
- # Validate perms
- objtype_list = ['virtual_machine_interface', u'service_instance']
- for obj_type in objtype_list:
- refs = obj_dict.get('%s_refs'%(obj_type), None)
- if refs:
- for ref in refs:
- try:
- ref_uuid = db_conn.fq_name_to_uuid(obj_type, ref['to'])
- except NoIdError as e:
- abort(404, str(e))
- (ok, status) = self._permissions.check_perms_link(request, ref_uuid)
- if not ok:
- (code, err_msg) = status
- abort(code, err_msg)
-
- # State modification starts from here. Ensure that cleanup is done for all state changes
- cleanup_on_failure = []
- # type-specific hook
- r_class = self.get_resource_class('virtual-machine')
- if r_class:
- (ok, put_result) = r_class.http_put(id, fq_name, obj_dict, self._db_conn)
- if not ok:
- (code, msg) = put_result
- self.config_object_error(id, None, 'virtual_machine', 'http_put', msg)
- abort(code, msg)
- callable = getattr(r_class, 'http_put_fail', None)
- if callable:
- cleanup_on_failure.append((callable, [id, fq_name, obj_dict, self._db_conn]))
-
- obj_ids = {'uuid': id}
- try:
- (ok, result) = db_conn.dbe_update('virtual-machine', obj_ids, obj_dict)
- except Exception as e:
- ok = False
- result = str(e)
- if not ok:
- for fail_cleanup_callable, cleanup_args in cleanup_on_failure:
- fail_cleanup_callable(*cleanup_args)
- self.config_object_error(id, None, 'virtual_machine', 'http_put', result)
- abort(404, result)
-
- rsp_body = {}
- rsp_body['uuid'] = id
- rsp_body['href'] = self.generate_url('virtual-machine', id)
-
- try:
- self._extension_mgrs['resourceApi'].map_method('post_virtual_machine_update', id, obj_dict)
- except Exception as e:
- pass
-
- return {'virtual-machine': rsp_body}
- #end virtual_machine_http_put
-
- def virtual_machine_http_delete(self, id):
- db_conn = self._db_conn
- # if obj doesn't exist return early
- try:
- obj_type = db_conn.uuid_to_obj_type(id)
- if obj_type != 'virtual_machine':
- abort(404, 'No virtual-machine object found for id %s' %(id))
- _ = db_conn.uuid_to_fq_name(id)
- except NoIdError:
- abort(404, 'ID %s does not exist' %(id))
-
- try:
- self._extension_mgrs['resourceApi'].map_method('pre_virtual_machine_delete', id)
- except Exception as e:
- pass
-
- # read in obj from db (accepting error) to get details of it
- obj_ids = {'uuid': id}
- back_references = ['virtual_machine_interface_back_refs', 'virtual_router_back_refs']
- children = ['virtual_machine_interfaces']
- obj_fields = children + back_references
- (read_ok, read_result) = db_conn.dbe_read('virtual-machine', obj_ids, obj_fields)
- if not read_ok:
- if read_result.startswith('Unknown id:'):
- abort(404, 'ID %s does not exist' %(id))
- else:
- self.config_object_error(id, None, 'virtual_machine', 'http_delete', read_result)
- # proceed down to delete the resource
-
- # common handling for all resource delete
- parent_type = read_result.get('parent_type', None)
- (ok, del_result) = self._delete_common(request, 'virtual_machine', id, parent_type)
- if not ok:
- (code, msg) = del_result
- self.config_object_error(id, None, 'virtual_machine', 'http_delete', msg)
- abort(code, msg)
-
- fq_name = read_result['fq_name']
- ifmap_id = cfgm_common.imid.get_ifmap_id_from_fq_name('virtual-machine', fq_name)
- obj_ids['imid'] = ifmap_id
- if parent_type:
- parent_imid = cfgm_common.imid.get_ifmap_id_from_fq_name(parent_type, fq_name[:-1])
- obj_ids['parent_imid'] = parent_imid
-
- # State modification starts from here. Ensure that cleanup is done for all state changes
- cleanup_on_failure = []
-
- # type-specific hook
- r_class = self.get_resource_class('virtual-machine')
- if r_class:
- if read_ok:
- # fail if non-default children or backrefs exist
- virtual_machine_interfaces = read_result.get('virtual_machine_interfaces', None)
- if virtual_machine_interfaces:
- has_infos = read_result['virtual_machine_interfaces']
- if ((len(has_infos) > 1) or
- (len(has_infos) == 1 and has_infos[0]['to'][-1] != 'default-virtual-machine-interface')):
- has_urls = [has_info['href'] for has_info in has_infos]
- has_str = ', '.join(has_urls)
- err_msg = 'Children ' + has_str + ' still exist'
- self.config_object_error(id, None, 'virtual_machine', 'http_delete', err_msg)
- abort(409, err_msg)
-
- virtual_machine_interface_back_refs = read_result.get('virtual_machine_interface_back_refs', None)
- if virtual_machine_interface_back_refs:
- back_ref_urls = [back_ref_info['href'] for back_ref_info in read_result['virtual_machine_interface_back_refs']]
- back_ref_str = ', '.join(back_ref_urls)
- err_msg = 'Back-References from ' + back_ref_str + ' still exist'
- self.config_object_error(id, None, 'virtual_machine', 'http_delete', err_msg)
- abort(409, err_msg)
-
- virtual_router_back_refs = read_result.get('virtual_router_back_refs', None)
- if virtual_router_back_refs:
- back_ref_urls = [back_ref_info['href'] for back_ref_info in read_result['virtual_router_back_refs']]
- back_ref_str = ', '.join(back_ref_urls)
- err_msg = 'Back-References from ' + back_ref_str + ' still exist'
- self.config_object_error(id, None, 'virtual_machine', 'http_delete', err_msg)
- abort(409, err_msg)
-
-
- # Delete default children first
- self._virtual_machine_delete_default_children(read_result)
-
- (ok, del_result) = r_class.http_delete(id, read_result, db_conn)
- if not ok:
- (code, msg) = del_result
- self.config_object_error(id, None, 'virtual_machine', 'http_delete', msg)
- abort(code, msg)
- callable = getattr(r_class, 'http_delete_fail', None)
- if callable:
- cleanup_on_failure.append((callable, [id, read_result, db_conn]))
- #end if read_ok
-
- try:
- (ok, del_result) = db_conn.dbe_delete('virtual-machine', obj_ids, read_result)
- except Exception as e:
- ok = False
- del_result = str(e)
- if not ok:
- for fail_cleanup_callable, cleanup_args in cleanup_on_failure:
- fail_cleanup_callable(*cleanup_args)
- self.config_object_error(id, None, 'virtual_machine', 'http_delete', del_result)
- abort(409, del_result)
-
- try:
- self._extension_mgrs['resourceApi'].map_method('post_virtual_machine_delete', id, read_result)
- except Exception as e:
- pass
-
- #end virtual_machine_http_delete
-
- def virtual_machines_http_post(self):
- key = 'virtual-machine'
- obj_dict = request.json[key]
- self._post_validate(key, obj_dict=obj_dict)
- fq_name = obj_dict['fq_name']
-
- try:
- self._extension_mgrs['resourceApi'].map_method('pre_virtual_machine_create', obj_dict)
- except Exception as e:
- pass
-
- prop_dict = obj_dict.get('id_perms')
- if prop_dict:
- buf = cStringIO.StringIO()
- xx_id_perms = IdPermsType(**prop_dict)
- xx_id_perms.export(buf)
- node = etree.fromstring(buf.getvalue())
- xx_id_perms = IdPermsType()
- try:
- xx_id_perms.build(node)
- except Exception as e:
- abort(400, str(e))
- # common handling for all resource create
- (ok, result) = self._post_common(request, 'virtual-machine', obj_dict)
- if not ok:
- (code, msg) = result
- fq_name_str = ':'.join(obj_dict.get('fq_name', []))
- self.config_object_error(None, fq_name_str, 'virtual_machine', 'http_post', msg)
- abort(code, msg)
-
- name = obj_dict['fq_name'][-1]
- fq_name = obj_dict['fq_name']
-
- db_conn = self._db_conn
-
- # Validate perms
- objtype_list = ['virtual_machine_interface', u'service_instance']
- for obj_type in objtype_list:
- refs = obj_dict.get('%s_refs'%(obj_type), None)
- if refs:
- for ref in refs:
- ref_uuid = db_conn.fq_name_to_uuid(obj_type, ref['to'])
- (ok, status) = self._permissions.check_perms_link(request, ref_uuid)
- if not ok:
- (code, err_msg) = status
- abort(code, err_msg)
-
- # State modification starts from here. Ensure that cleanup is done for all state changes
- cleanup_on_failure = []
- # Alloc and Store id-mappings before creating entry on pubsub store.
- # Else a subscriber can ask for an id mapping before we have stored it
- uuid_requested = result
- (ok, result) = db_conn.dbe_alloc('virtual-machine', obj_dict, uuid_requested)
- if not ok:
- (code, msg) = result
- fq_name_str = ':'.join(obj_dict['fq_name'])
- self.config_object_error(None, fq_name_str, 'virtual_machine', 'http_post', result)
- abort(code, msg)
- cleanup_on_failure.append((db_conn.dbe_release, ['virtual_machine', fq_name]))
-
- obj_ids = result
-
- env = request.headers.environ
- tenant_name = env.get(hdr_server_tenant(), 'default-project')
-
-
- # type-specific hook
- r_class = self.get_resource_class('virtual-machine')
- if r_class:
- try:
- (ok, result) = r_class.http_post_collection(tenant_name, obj_dict, db_conn)
- except Exception as e:
- ok = False
- result = (500, str(e))
- if not ok:
- for fail_cleanup_callable, cleanup_args in cleanup_on_failure:
- fail_cleanup_callable(*cleanup_args)
- (code, msg) = result
- fq_name_str = ':'.join(fq_name)
- self.config_object_error(None, fq_name_str, 'virtual_machine', 'http_post', msg)
- abort(code, msg)
- callable = getattr(r_class, 'http_post_collection_fail', None)
- if callable:
- cleanup_on_failure.append((callable, [tenant_name, obj_dict, db_conn]))
-
- try:
- (ok, result) = \
- db_conn.dbe_create('virtual-machine', obj_ids, obj_dict)
- except Exception as e:
- ok = False
- result = str(e)
-
- if not ok:
- for fail_cleanup_callable, cleanup_args in cleanup_on_failure:
- fail_cleanup_callable(*cleanup_args)
- fq_name_str = ':'.join(fq_name)
- self.config_object_error(None, fq_name_str, 'virtual_machine', 'http_post', result)
- abort(404, result)
-
- rsp_body = {}
- rsp_body['name'] = name
- rsp_body['fq_name'] = fq_name
- rsp_body['uuid'] = obj_ids['uuid']
- rsp_body['href'] = self.generate_url('virtual-machine', obj_ids['uuid'])
-
- try:
- self._extension_mgrs['resourceApi'].map_method('post_virtual_machine_create', obj_dict)
- except Exception as e:
- pass
-
- return {'virtual-machine': rsp_body}
- #end virtual_machines_http_post
-
- def virtual_machines_http_get(self):
- # gather list of uuids using 1. any specified anchors
- # 2. any specified filters
- # if not 'detail' return list with any specified 'fields'
- # if 'detail' return list with props+refs + any specified 'fields'
-
- env = request.headers.environ
- tenant_name = env.get(hdr_server_tenant(), 'default-project')
- parent_uuids = None
- back_ref_uuids = None
- obj_uuids = None
- if (('parent_fq_name_str' in request.query) and
- ('parent_type' in request.query)):
- parent_fq_name = request.query.parent_fq_name_str.split(':')
- parent_type = request.query.parent_type
- parent_uuids = [self._db_conn.fq_name_to_uuid(parent_type, parent_fq_name)]
- elif 'parent_id' in request.query:
- parent_ids = request.query.parent_id.split(',')
- parent_uuids = [str(uuid.UUID(p_uuid)) for p_uuid in parent_ids]
- if 'back_ref_id' in request.query:
- back_ref_ids = request.query.back_ref_id.split(',')
- back_ref_uuids = [str(uuid.UUID(b_uuid)) for b_uuid in back_ref_ids]
- if 'obj_uuids' in request.query:
- obj_uuids = request.query.obj_uuids.split(',')
-
- # common handling for all resource get
- (ok, result) = self._get_common(request, parent_uuids)
- if not ok:
- (code, msg) = result
- self.config_object_error(None, None, 'virtual_machines', 'http_get_collection', msg)
- abort(code, msg)
-
- if 'count' in request.query:
- count = 'true' in request.query.count.lower()
- else:
- count = False
-
- filter_params = request.query.filters
- if filter_params:
- try:
- ff_key_vals = filter_params.split(',')
- ff_names = [ff.split('==')[0] for ff in ff_key_vals]
- ff_values = [ff.split('==')[1] for ff in ff_key_vals]
- filters = {'field_names': ff_names, 'field_values': ff_values}
- except Exception as e:
- abort(400, 'Invalid filter ' + filter_params)
- else:
- filters = None
- db_conn = self._db_conn
- (ok, result) = \
- db_conn.dbe_list('virtual-machine', parent_uuids, back_ref_uuids, obj_uuids, count, filters)
- if not ok:
- self.config_object_error(None, None, 'virtual_machines', 'http_get_collection', result)
- abort(404, result)
-
- # If only counting, return early
- if count:
- return {'virtual-machines': {'count': result}}
-
- if 'detail' in request.query:
- detail = 'true' in request.query.detail.lower()
- else:
- detail = False
-
- fq_names_uuids = result
- obj_dicts = []
- if not detail:
- if not self.is_admin_request():
- obj_ids_list = [{'uuid': obj_uuid} for _, obj_uuid in fq_names_uuids]
- obj_fields = [u'id_perms']
- (ok, result) = db_conn.dbe_read_multi('virtual-machine', obj_ids_list, obj_fields)
- if not ok:
- result = []
- for obj_result in result:
- # give chance for zk heartbeat/ping
- gevent.sleep(0)
- if obj_result['id_perms'].get('user_visible', True):
- obj_dict = {}
- obj_dict['uuid'] = obj_result['uuid']
- obj_dict['href'] = self.generate_url('virtual-machine', obj_result['uuid'])
- obj_dict['fq_name'] = obj_result['fq_name']
- obj_dicts.append(obj_dict)
- else:
- for fq_name, obj_uuid in fq_names_uuids:
- # give chance for zk heartbeat/ping
- gevent.sleep(0)
- obj_dict = {}
- obj_dict['uuid'] = obj_uuid
- obj_dict['href'] = self.generate_url('virtual-machine', obj_uuid)
- obj_dict['fq_name'] = fq_name
- obj_dicts.append(obj_dict)
- else: #detail
- obj_ids_list = [{'uuid': obj_uuid} for _, obj_uuid in fq_names_uuids]
- obj_fields = [u'id_perms', u'display_name'] + [u'service_instance_refs']
- if 'fields' in request.query:
- obj_fields.extend(request.query.fields.split(','))
- (ok, result) = db_conn.dbe_read_multi('virtual-machine', obj_ids_list, obj_fields)
-
- if not ok:
- result = []
-
- for obj_result in result:
- # give chance for zk heartbeat/ping
- gevent.sleep(0)
- obj_dict = {}
- obj_dict['name'] = obj_result['fq_name'][-1]
- obj_dict['href'] = self.generate_url('virtual-machine', obj_result['uuid'])
- obj_dict.update(obj_result)
- if (obj_dict['id_perms'].get('user_visible', True) or
- self.is_admin_request()):
- obj_dicts.append({'virtual-machine': obj_dict})
-
- return {'virtual-machines': obj_dicts}
- #end virtual_machines_http_get
-
- def _virtual_machine_create_default_children(self, parent_obj):
- # Create a default child only if provisioned for
- r_class = self.get_resource_class('virtual-machine-interface')
- if r_class and r_class.generate_default_instance:
- child_obj = VirtualMachineInterface(parent_obj = parent_obj)
- child_dict = child_obj.__dict__
- fq_name = child_dict['fq_name']
- child_dict['id_perms'] = self._get_default_id_perms('virtual-machine-interface')
-
- db_conn = self._db_conn
- (ok, result) = db_conn.dbe_alloc('virtual-machine-interface', child_dict)
- if not ok:
- return (ok, result)
-
- obj_ids = result
- db_conn.dbe_create('virtual-machine-interface', obj_ids, child_dict)
- self._virtual_machine_interface_create_default_children(child_obj)
-
- pass
- #end _virtual_machine_create_default_children
-
- def _virtual_machine_delete_default_children(self, parent_dict):
- # Delete a default child only if provisioned for
- r_class = self.get_resource_class('virtual-machine-interface')
- if r_class and r_class.generate_default_instance:
- # first locate default child then delete it
- has_infos = parent_dict.get('virtual_machine_interfaces')
- if has_infos:
- for has_info in has_infos:
- if has_info['to'][-1] == 'default-virtual-machine-interface':
- default_child_id = has_info['href'].split('/')[-1]
- self.virtual_machine_interface_http_delete(default_child_id)
- break
-
- pass
- #end _virtual_machine_delete_default_children
-
- def interface_route_table_http_get(self, id):
- try:
- self._extension_mgrs['resourceApi'].map_method('pre_interface_route_table_read', id)
- except Exception as e:
- pass
-
- # TODO get vals from request out of the global ASAP
- etag = request.headers.get('If-None-Match')
- try:
- obj_type = self._db_conn.uuid_to_obj_type(id)
- except NoIdError:
- obj_type = None
- if obj_type != 'interface_route_table':
- abort(404, 'No interface-route-table object found for id %s' %(id))
- # common handling for all resource get
- (ok, result) = self._get_common(request, id)
- if not ok:
- (code, msg) = result
- self.config_object_error(id, None, 'interface_route_table', 'http_get', msg)
- abort(code, msg)
-
- # type-specific hook
- r_class = self.get_resource_class('interface-route-table')
- if r_class:
- r_class.http_get(id)
-
- db_conn = self._db_conn
- if etag:
- obj_ids = {'uuid': id}
- (ok, result) = db_conn.dbe_is_latest(obj_ids, etag.replace('"', ''))
- if not ok:
- # Not present in DB
- self.config_object_error(id, None, 'interface_route_table', 'http_get', result)
- abort(404, result)
-
- is_latest = result
- if is_latest:
- # send Not-Modified, caches use this for read optimization
- response.status = 304
- return
- #end if etag
-
- obj_ids = {'uuid': id}
-
- # Generate field list for db layer
- properties = [u'interface_route_table_routes', u'id_perms', u'display_name']
- references = []
- back_references = [u'project_back_refs', 'virtual_machine_interface_back_refs']
- children = []
- if 'fields' in request.query:
- obj_fields = request.query.fields.split(',')
- else: # default props + children + refs + backrefs
- obj_fields = properties + references
- if 'exclude_back_refs' not in request.query:
- obj_fields = obj_fields + back_references
- if 'exclude_children' not in request.query:
- obj_fields = obj_fields + children
-
- (ok, result) = db_conn.dbe_read('interface-route-table', obj_ids, obj_fields)
- if not ok:
- # Not present in DB
- self.config_object_error(id, None, 'interface_route_table', 'http_get', result)
- abort(404, result)
-
- # check visibility
- if (not result['id_perms'].get('user_visible', True) and
- not self.is_admin_request()):
- result = 'This object is not visible by users: %s' % id
- self.config_object_error(id, None, 'interface_route_table', 'http_get', result)
- abort(404, result)
-
- rsp_body = {}
- rsp_body['uuid'] = id
- rsp_body['href'] = self.generate_url('interface-route-table', id)
- rsp_body['name'] = result['fq_name'][-1]
- rsp_body.update(result)
- id_perms = result['id_perms']
- response.set_header('ETag', '"' + id_perms['last_modified'] + '"')
- try:
- self._extension_mgrs['resourceApi'].map_method('post_interface_route_table_read', id, rsp_body)
- except Exception as e:
- pass
-
- return {'interface-route-table': rsp_body}
- #end interface_route_table_http_get
-
- def interface_route_table_http_put(self, id):
- key = 'interface-route-table'
- obj_dict = request.json[key]
-
- try:
- self._extension_mgrs['resourceApi'].map_method('pre_interface_route_table_update', id, obj_dict)
- except Exception as e:
- pass
-
- db_conn = self._db_conn
- try:
- obj_type = db_conn.uuid_to_obj_type(id)
- if obj_type != 'interface_route_table':
- abort(404, 'No interface-route-table object found for id %s' %(id))
- fq_name = db_conn.uuid_to_fq_name(id)
- except NoIdError as e:
- abort(404, str(e))
- prop_dict = obj_dict.get('interface_route_table_routes')
- if prop_dict:
- buf = cStringIO.StringIO()
- xx_interface_route_table_routes = RouteTableType(**prop_dict)
- xx_interface_route_table_routes.export(buf)
- node = etree.fromstring(buf.getvalue())
- xx_interface_route_table_routes = RouteTableType()
- try:
- xx_interface_route_table_routes.build(node)
- except Exception as e:
- abort(400, str(e))
- prop_dict = obj_dict.get('id_perms')
- if prop_dict:
- buf = cStringIO.StringIO()
- xx_id_perms = IdPermsType(**prop_dict)
- xx_id_perms.export(buf)
- node = etree.fromstring(buf.getvalue())
- xx_id_perms = IdPermsType()
- try:
- xx_id_perms.build(node)
- except Exception as e:
- abort(400, str(e))
- # common handling for all resource put
- (ok, result) = self._put_common(request, 'interface_route_table', id, fq_name, obj_dict)
- if not ok:
- (code, msg) = result
- self.config_object_error(id, None, 'interface_route_table', 'http_put', msg)
- abort(code, msg)
-
-
- # State modification starts from here. Ensure that cleanup is done for all state changes
- cleanup_on_failure = []
- # type-specific hook
- r_class = self.get_resource_class('interface-route-table')
- if r_class:
- (ok, put_result) = r_class.http_put(id, fq_name, obj_dict, self._db_conn)
- if not ok:
- (code, msg) = put_result
- self.config_object_error(id, None, 'interface_route_table', 'http_put', msg)
- abort(code, msg)
- callable = getattr(r_class, 'http_put_fail', None)
- if callable:
- cleanup_on_failure.append((callable, [id, fq_name, obj_dict, self._db_conn]))
-
- obj_ids = {'uuid': id}
- try:
- (ok, result) = db_conn.dbe_update('interface-route-table', obj_ids, obj_dict)
- except Exception as e:
- ok = False
- result = str(e)
- if not ok:
- for fail_cleanup_callable, cleanup_args in cleanup_on_failure:
- fail_cleanup_callable(*cleanup_args)
- self.config_object_error(id, None, 'interface_route_table', 'http_put', result)
- abort(404, result)
-
- rsp_body = {}
- rsp_body['uuid'] = id
- rsp_body['href'] = self.generate_url('interface-route-table', id)
-
- try:
- self._extension_mgrs['resourceApi'].map_method('post_interface_route_table_update', id, obj_dict)
- except Exception as e:
- pass
-
- return {'interface-route-table': rsp_body}
- #end interface_route_table_http_put
-
- def interface_route_table_http_delete(self, id):
- db_conn = self._db_conn
- # if obj doesn't exist return early
- try:
- obj_type = db_conn.uuid_to_obj_type(id)
- if obj_type != 'interface_route_table':
- abort(404, 'No interface-route-table object found for id %s' %(id))
- _ = db_conn.uuid_to_fq_name(id)
- except NoIdError:
- abort(404, 'ID %s does not exist' %(id))
-
- try:
- self._extension_mgrs['resourceApi'].map_method('pre_interface_route_table_delete', id)
- except Exception as e:
- pass
-
- # read in obj from db (accepting error) to get details of it
- obj_ids = {'uuid': id}
- back_references = [u'project_back_refs', 'virtual_machine_interface_back_refs']
- children = []
- obj_fields = children + back_references
- (read_ok, read_result) = db_conn.dbe_read('interface-route-table', obj_ids, obj_fields)
- if not read_ok:
- if read_result.startswith('Unknown id:'):
- abort(404, 'ID %s does not exist' %(id))
- else:
- self.config_object_error(id, None, 'interface_route_table', 'http_delete', read_result)
- # proceed down to delete the resource
-
- # common handling for all resource delete
- parent_type = read_result.get('parent_type', None)
- (ok, del_result) = self._delete_common(request, 'interface_route_table', id, parent_type)
- if not ok:
- (code, msg) = del_result
- self.config_object_error(id, None, 'interface_route_table', 'http_delete', msg)
- abort(code, msg)
-
- fq_name = read_result['fq_name']
- ifmap_id = cfgm_common.imid.get_ifmap_id_from_fq_name('interface-route-table', fq_name)
- obj_ids['imid'] = ifmap_id
- if parent_type:
- parent_imid = cfgm_common.imid.get_ifmap_id_from_fq_name(parent_type, fq_name[:-1])
- obj_ids['parent_imid'] = parent_imid
-
- # State modification starts from here. Ensure that cleanup is done for all state changes
- cleanup_on_failure = []
-
- # type-specific hook
- r_class = self.get_resource_class('interface-route-table')
- if r_class:
- if read_ok:
- # fail if non-default children or backrefs exist
- virtual_machine_interface_back_refs = read_result.get('virtual_machine_interface_back_refs', None)
- if virtual_machine_interface_back_refs:
- back_ref_urls = [back_ref_info['href'] for back_ref_info in read_result['virtual_machine_interface_back_refs']]
- back_ref_str = ', '.join(back_ref_urls)
- err_msg = 'Back-References from ' + back_ref_str + ' still exist'
- self.config_object_error(id, None, 'interface_route_table', 'http_delete', err_msg)
- abort(409, err_msg)
-
-
- # Delete default children first
- self._interface_route_table_delete_default_children(read_result)
-
- (ok, del_result) = r_class.http_delete(id, read_result, db_conn)
- if not ok:
- (code, msg) = del_result
- self.config_object_error(id, None, 'interface_route_table', 'http_delete', msg)
- abort(code, msg)
- callable = getattr(r_class, 'http_delete_fail', None)
- if callable:
- cleanup_on_failure.append((callable, [id, read_result, db_conn]))
- #end if read_ok
-
- try:
- (ok, del_result) = db_conn.dbe_delete('interface-route-table', obj_ids, read_result)
- except Exception as e:
- ok = False
- del_result = str(e)
- if not ok:
- for fail_cleanup_callable, cleanup_args in cleanup_on_failure:
- fail_cleanup_callable(*cleanup_args)
- self.config_object_error(id, None, 'interface_route_table', 'http_delete', del_result)
- abort(409, del_result)
-
- try:
- self._extension_mgrs['resourceApi'].map_method('post_interface_route_table_delete', id, read_result)
- except Exception as e:
- pass
-
- #end interface_route_table_http_delete
-
- def interface_route_tables_http_post(self):
- key = 'interface-route-table'
- obj_dict = request.json[key]
- self._post_validate(key, obj_dict=obj_dict)
- fq_name = obj_dict['fq_name']
-
- try:
- self._extension_mgrs['resourceApi'].map_method('pre_interface_route_table_create', obj_dict)
- except Exception as e:
- pass
-
- prop_dict = obj_dict.get('interface_route_table_routes')
- if prop_dict:
- buf = cStringIO.StringIO()
- xx_interface_route_table_routes = RouteTableType(**prop_dict)
- xx_interface_route_table_routes.export(buf)
- node = etree.fromstring(buf.getvalue())
- xx_interface_route_table_routes = RouteTableType()
- try:
- xx_interface_route_table_routes.build(node)
- except Exception as e:
- abort(400, str(e))
- prop_dict = obj_dict.get('id_perms')
- if prop_dict:
- buf = cStringIO.StringIO()
- xx_id_perms = IdPermsType(**prop_dict)
- xx_id_perms.export(buf)
- node = etree.fromstring(buf.getvalue())
- xx_id_perms = IdPermsType()
- try:
- xx_id_perms.build(node)
- except Exception as e:
- abort(400, str(e))
- # common handling for all resource create
- (ok, result) = self._post_common(request, 'interface-route-table', obj_dict)
- if not ok:
- (code, msg) = result
- fq_name_str = ':'.join(obj_dict.get('fq_name', []))
- self.config_object_error(None, fq_name_str, 'interface_route_table', 'http_post', msg)
- abort(code, msg)
-
- name = obj_dict['fq_name'][-1]
- fq_name = obj_dict['fq_name']
-
- db_conn = self._db_conn
-
- # if client gave parent_type of config-root, ignore and remove
- if 'parent_type' in obj_dict and obj_dict['parent_type'] == 'config-root':
- del obj_dict['parent_type']
-
- if 'parent_type' in obj_dict:
- # non config-root child, verify parent exists
- parent_type = obj_dict['parent_type']
- parent_fq_name = obj_dict['fq_name'][:-1]
- try:
- parent_uuid = self._db_conn.fq_name_to_uuid(parent_type, parent_fq_name)
- (ok, status) = self._permissions.check_perms_write(request, parent_uuid)
- if not ok:
- (code, err_msg) = status
- abort(code, err_msg)
- self._permissions.set_user_role(request, obj_dict)
- except NoIdError:
- err_msg = 'Parent ' + pformat(parent_fq_name) + ' type ' + parent_type + ' does not exist'
- fq_name_str = ':'.join(parent_fq_name)
- self.config_object_error(None, fq_name_str, 'interface_route_table', 'http_post', err_msg)
- abort(400, err_msg)
-
-
- # State modification starts from here. Ensure that cleanup is done for all state changes
- cleanup_on_failure = []
- # Alloc and Store id-mappings before creating entry on pubsub store.
- # Else a subscriber can ask for an id mapping before we have stored it
- uuid_requested = result
- (ok, result) = db_conn.dbe_alloc('interface-route-table', obj_dict, uuid_requested)
- if not ok:
- (code, msg) = result
- fq_name_str = ':'.join(obj_dict['fq_name'])
- self.config_object_error(None, fq_name_str, 'interface_route_table', 'http_post', result)
- abort(code, msg)
- cleanup_on_failure.append((db_conn.dbe_release, ['interface_route_table', fq_name]))
-
- obj_ids = result
-
- env = request.headers.environ
- tenant_name = env.get(hdr_server_tenant(), 'default-project')
-
-
- # type-specific hook
- r_class = self.get_resource_class('interface-route-table')
- if r_class:
- try:
- (ok, result) = r_class.http_post_collection(tenant_name, obj_dict, db_conn)
- except Exception as e:
- ok = False
- result = (500, str(e))
- if not ok:
- for fail_cleanup_callable, cleanup_args in cleanup_on_failure:
- fail_cleanup_callable(*cleanup_args)
- (code, msg) = result
- fq_name_str = ':'.join(fq_name)
- self.config_object_error(None, fq_name_str, 'interface_route_table', 'http_post', msg)
- abort(code, msg)
- callable = getattr(r_class, 'http_post_collection_fail', None)
- if callable:
- cleanup_on_failure.append((callable, [tenant_name, obj_dict, db_conn]))
-
- try:
- (ok, result) = \
- db_conn.dbe_create('interface-route-table', obj_ids, obj_dict)
- except Exception as e:
- ok = False
- result = str(e)
-
- if not ok:
- for fail_cleanup_callable, cleanup_args in cleanup_on_failure:
- fail_cleanup_callable(*cleanup_args)
- fq_name_str = ':'.join(fq_name)
- self.config_object_error(None, fq_name_str, 'interface_route_table', 'http_post', result)
- abort(404, result)
-
- rsp_body = {}
- rsp_body['name'] = name
- rsp_body['fq_name'] = fq_name
- rsp_body['uuid'] = obj_ids['uuid']
- rsp_body['href'] = self.generate_url('interface-route-table', obj_ids['uuid'])
- if 'parent_type' in obj_dict:
- # non config-root child, send back parent uuid/href
- rsp_body['parent_uuid'] = parent_uuid
- rsp_body['parent_href'] = self.generate_url(parent_type, parent_uuid)
-
- try:
- self._extension_mgrs['resourceApi'].map_method('post_interface_route_table_create', obj_dict)
- except Exception as e:
- pass
-
- return {'interface-route-table': rsp_body}
- #end interface_route_tables_http_post
-
- def interface_route_tables_http_get(self):
- # gather list of uuids using 1. any specified anchors
- # 2. any specified filters
- # if not 'detail' return list with any specified 'fields'
- # if 'detail' return list with props+refs + any specified 'fields'
-
- env = request.headers.environ
- tenant_name = env.get(hdr_server_tenant(), 'default-project')
- parent_uuids = None
- back_ref_uuids = None
- obj_uuids = None
- if (('parent_fq_name_str' in request.query) and
- ('parent_type' in request.query)):
- parent_fq_name = request.query.parent_fq_name_str.split(':')
- parent_type = request.query.parent_type
- parent_uuids = [self._db_conn.fq_name_to_uuid(parent_type, parent_fq_name)]
- elif 'parent_id' in request.query:
- parent_ids = request.query.parent_id.split(',')
- parent_uuids = [str(uuid.UUID(p_uuid)) for p_uuid in parent_ids]
- if 'back_ref_id' in request.query:
- back_ref_ids = request.query.back_ref_id.split(',')
- back_ref_uuids = [str(uuid.UUID(b_uuid)) for b_uuid in back_ref_ids]
- if 'obj_uuids' in request.query:
- obj_uuids = request.query.obj_uuids.split(',')
-
- # common handling for all resource get
- (ok, result) = self._get_common(request, parent_uuids)
- if not ok:
- (code, msg) = result
- self.config_object_error(None, None, 'interface_route_tables', 'http_get_collection', msg)
- abort(code, msg)
-
- if 'count' in request.query:
- count = 'true' in request.query.count.lower()
- else:
- count = False
-
- filter_params = request.query.filters
- if filter_params:
- try:
- ff_key_vals = filter_params.split(',')
- ff_names = [ff.split('==')[0] for ff in ff_key_vals]
- ff_values = [ff.split('==')[1] for ff in ff_key_vals]
- filters = {'field_names': ff_names, 'field_values': ff_values}
- except Exception as e:
- abort(400, 'Invalid filter ' + filter_params)
- else:
- filters = None
- db_conn = self._db_conn
- (ok, result) = \
- db_conn.dbe_list('interface-route-table', parent_uuids, back_ref_uuids, obj_uuids, count, filters)
- if not ok:
- self.config_object_error(None, None, 'interface_route_tables', 'http_get_collection', result)
- abort(404, result)
-
- # If only counting, return early
- if count:
- return {'interface-route-tables': {'count': result}}
-
- if 'detail' in request.query:
- detail = 'true' in request.query.detail.lower()
- else:
- detail = False
-
- fq_names_uuids = result
- obj_dicts = []
- if not detail:
- if not self.is_admin_request():
- obj_ids_list = [{'uuid': obj_uuid} for _, obj_uuid in fq_names_uuids]
- obj_fields = [u'id_perms']
- (ok, result) = db_conn.dbe_read_multi('interface-route-table', obj_ids_list, obj_fields)
- if not ok:
- result = []
- for obj_result in result:
- # give chance for zk heartbeat/ping
- gevent.sleep(0)
- if obj_result['id_perms'].get('user_visible', True):
- obj_dict = {}
- obj_dict['uuid'] = obj_result['uuid']
- obj_dict['href'] = self.generate_url('interface-route-table', obj_result['uuid'])
- obj_dict['fq_name'] = obj_result['fq_name']
- obj_dicts.append(obj_dict)
- else:
- for fq_name, obj_uuid in fq_names_uuids:
- # give chance for zk heartbeat/ping
- gevent.sleep(0)
- obj_dict = {}
- obj_dict['uuid'] = obj_uuid
- obj_dict['href'] = self.generate_url('interface-route-table', obj_uuid)
- obj_dict['fq_name'] = fq_name
- obj_dicts.append(obj_dict)
- else: #detail
- obj_ids_list = [{'uuid': obj_uuid} for _, obj_uuid in fq_names_uuids]
- obj_fields = [u'interface_route_table_routes', u'id_perms', u'display_name'] + []
- if 'fields' in request.query:
- obj_fields.extend(request.query.fields.split(','))
- (ok, result) = db_conn.dbe_read_multi('interface-route-table', obj_ids_list, obj_fields)
-
- if not ok:
- result = []
-
- for obj_result in result:
- # give chance for zk heartbeat/ping
- gevent.sleep(0)
- obj_dict = {}
- obj_dict['name'] = obj_result['fq_name'][-1]
- obj_dict['href'] = self.generate_url('interface-route-table', obj_result['uuid'])
- obj_dict.update(obj_result)
- if (obj_dict['id_perms'].get('user_visible', True) or
- self.is_admin_request()):
- obj_dicts.append({'interface-route-table': obj_dict})
-
- return {'interface-route-tables': obj_dicts}
- #end interface_route_tables_http_get
-
- def _interface_route_table_create_default_children(self, parent_obj):
- pass
- #end _interface_route_table_create_default_children
-
- def _interface_route_table_delete_default_children(self, parent_dict):
- pass
- #end _interface_route_table_delete_default_children
-
- def service_template_http_get(self, id):
- try:
- self._extension_mgrs['resourceApi'].map_method('pre_service_template_read', id)
- except Exception as e:
- pass
-
- # TODO get vals from request out of the global ASAP
- etag = request.headers.get('If-None-Match')
- try:
- obj_type = self._db_conn.uuid_to_obj_type(id)
- except NoIdError:
- obj_type = None
- if obj_type != 'service_template':
- abort(404, 'No service-template object found for id %s' %(id))
- # common handling for all resource get
- (ok, result) = self._get_common(request, id)
- if not ok:
- (code, msg) = result
- self.config_object_error(id, None, 'service_template', 'http_get', msg)
- abort(code, msg)
-
- # type-specific hook
- r_class = self.get_resource_class('service-template')
- if r_class:
- r_class.http_get(id)
-
- db_conn = self._db_conn
- if etag:
- obj_ids = {'uuid': id}
- (ok, result) = db_conn.dbe_is_latest(obj_ids, etag.replace('"', ''))
- if not ok:
- # Not present in DB
- self.config_object_error(id, None, 'service_template', 'http_get', result)
- abort(404, result)
-
- is_latest = result
- if is_latest:
- # send Not-Modified, caches use this for read optimization
- response.status = 304
- return
- #end if etag
-
- obj_ids = {'uuid': id}
-
- # Generate field list for db layer
- properties = [u'service_template_properties', u'id_perms', u'display_name']
- references = []
- back_references = [u'domain_back_refs', u'service_instance_back_refs']
- children = []
- if 'fields' in request.query:
- obj_fields = request.query.fields.split(',')
- else: # default props + children + refs + backrefs
- obj_fields = properties + references
- if 'exclude_back_refs' not in request.query:
- obj_fields = obj_fields + back_references
- if 'exclude_children' not in request.query:
- obj_fields = obj_fields + children
-
- (ok, result) = db_conn.dbe_read('service-template', obj_ids, obj_fields)
- if not ok:
- # Not present in DB
- self.config_object_error(id, None, 'service_template', 'http_get', result)
- abort(404, result)
-
- # check visibility
- if (not result['id_perms'].get('user_visible', True) and
- not self.is_admin_request()):
- result = 'This object is not visible by users: %s' % id
- self.config_object_error(id, None, 'service_template', 'http_get', result)
- abort(404, result)
-
- rsp_body = {}
- rsp_body['uuid'] = id
- rsp_body['href'] = self.generate_url('service-template', id)
- rsp_body['name'] = result['fq_name'][-1]
- rsp_body.update(result)
- id_perms = result['id_perms']
- response.set_header('ETag', '"' + id_perms['last_modified'] + '"')
- try:
- self._extension_mgrs['resourceApi'].map_method('post_service_template_read', id, rsp_body)
- except Exception as e:
- pass
-
- return {'service-template': rsp_body}
- #end service_template_http_get
-
- def service_template_http_put(self, id):
- key = 'service-template'
- obj_dict = request.json[key]
-
- try:
- self._extension_mgrs['resourceApi'].map_method('pre_service_template_update', id, obj_dict)
- except Exception as e:
- pass
-
- db_conn = self._db_conn
- try:
- obj_type = db_conn.uuid_to_obj_type(id)
- if obj_type != 'service_template':
- abort(404, 'No service-template object found for id %s' %(id))
- fq_name = db_conn.uuid_to_fq_name(id)
- except NoIdError as e:
- abort(404, str(e))
- prop_dict = obj_dict.get('service_template_properties')
- if prop_dict:
- buf = cStringIO.StringIO()
- xx_service_template_properties = ServiceTemplateType(**prop_dict)
- xx_service_template_properties.export(buf)
- node = etree.fromstring(buf.getvalue())
- xx_service_template_properties = ServiceTemplateType()
- try:
- xx_service_template_properties.build(node)
- except Exception as e:
- abort(400, str(e))
- prop_dict = obj_dict.get('id_perms')
- if prop_dict:
- buf = cStringIO.StringIO()
- xx_id_perms = IdPermsType(**prop_dict)
- xx_id_perms.export(buf)
- node = etree.fromstring(buf.getvalue())
- xx_id_perms = IdPermsType()
- try:
- xx_id_perms.build(node)
- except Exception as e:
- abort(400, str(e))
- # common handling for all resource put
- (ok, result) = self._put_common(request, 'service_template', id, fq_name, obj_dict)
- if not ok:
- (code, msg) = result
- self.config_object_error(id, None, 'service_template', 'http_put', msg)
- abort(code, msg)
-
-
- # State modification starts from here. Ensure that cleanup is done for all state changes
- cleanup_on_failure = []
- # type-specific hook
- r_class = self.get_resource_class('service-template')
- if r_class:
- (ok, put_result) = r_class.http_put(id, fq_name, obj_dict, self._db_conn)
- if not ok:
- (code, msg) = put_result
- self.config_object_error(id, None, 'service_template', 'http_put', msg)
- abort(code, msg)
- callable = getattr(r_class, 'http_put_fail', None)
- if callable:
- cleanup_on_failure.append((callable, [id, fq_name, obj_dict, self._db_conn]))
-
- obj_ids = {'uuid': id}
- try:
- (ok, result) = db_conn.dbe_update('service-template', obj_ids, obj_dict)
- except Exception as e:
- ok = False
- result = str(e)
- if not ok:
- for fail_cleanup_callable, cleanup_args in cleanup_on_failure:
- fail_cleanup_callable(*cleanup_args)
- self.config_object_error(id, None, 'service_template', 'http_put', result)
- abort(404, result)
-
- rsp_body = {}
- rsp_body['uuid'] = id
- rsp_body['href'] = self.generate_url('service-template', id)
-
- try:
- self._extension_mgrs['resourceApi'].map_method('post_service_template_update', id, obj_dict)
- except Exception as e:
- pass
-
- return {'service-template': rsp_body}
- #end service_template_http_put
-
- def service_template_http_delete(self, id):
- db_conn = self._db_conn
- # if obj doesn't exist return early
- try:
- obj_type = db_conn.uuid_to_obj_type(id)
- if obj_type != 'service_template':
- abort(404, 'No service-template object found for id %s' %(id))
- _ = db_conn.uuid_to_fq_name(id)
- except NoIdError:
- abort(404, 'ID %s does not exist' %(id))
-
- try:
- self._extension_mgrs['resourceApi'].map_method('pre_service_template_delete', id)
- except Exception as e:
- pass
-
- # read in obj from db (accepting error) to get details of it
- obj_ids = {'uuid': id}
- back_references = [u'domain_back_refs', u'service_instance_back_refs']
- children = []
- obj_fields = children + back_references
- (read_ok, read_result) = db_conn.dbe_read('service-template', obj_ids, obj_fields)
- if not read_ok:
- if read_result.startswith('Unknown id:'):
- abort(404, 'ID %s does not exist' %(id))
- else:
- self.config_object_error(id, None, 'service_template', 'http_delete', read_result)
- # proceed down to delete the resource
-
- # common handling for all resource delete
- parent_type = read_result.get('parent_type', None)
- (ok, del_result) = self._delete_common(request, 'service_template', id, parent_type)
- if not ok:
- (code, msg) = del_result
- self.config_object_error(id, None, 'service_template', 'http_delete', msg)
- abort(code, msg)
-
- fq_name = read_result['fq_name']
- ifmap_id = cfgm_common.imid.get_ifmap_id_from_fq_name('service-template', fq_name)
- obj_ids['imid'] = ifmap_id
- if parent_type:
- parent_imid = cfgm_common.imid.get_ifmap_id_from_fq_name(parent_type, fq_name[:-1])
- obj_ids['parent_imid'] = parent_imid
-
- # State modification starts from here. Ensure that cleanup is done for all state changes
- cleanup_on_failure = []
-
- # type-specific hook
- r_class = self.get_resource_class('service-template')
- if r_class:
- if read_ok:
- # fail if non-default children or backrefs exist
- service_instance_back_refs = read_result.get('service_instance_back_refs', None)
- if service_instance_back_refs:
- back_ref_urls = [back_ref_info['href'] for back_ref_info in read_result['service_instance_back_refs']]
- back_ref_str = ', '.join(back_ref_urls)
- err_msg = 'Back-References from ' + back_ref_str + ' still exist'
- self.config_object_error(id, None, 'service_template', 'http_delete', err_msg)
- abort(409, err_msg)
-
-
- # Delete default children first
- self._service_template_delete_default_children(read_result)
-
- (ok, del_result) = r_class.http_delete(id, read_result, db_conn)
- if not ok:
- (code, msg) = del_result
- self.config_object_error(id, None, 'service_template', 'http_delete', msg)
- abort(code, msg)
- callable = getattr(r_class, 'http_delete_fail', None)
- if callable:
- cleanup_on_failure.append((callable, [id, read_result, db_conn]))
- #end if read_ok
-
- try:
- (ok, del_result) = db_conn.dbe_delete('service-template', obj_ids, read_result)
- except Exception as e:
- ok = False
- del_result = str(e)
- if not ok:
- for fail_cleanup_callable, cleanup_args in cleanup_on_failure:
- fail_cleanup_callable(*cleanup_args)
- self.config_object_error(id, None, 'service_template', 'http_delete', del_result)
- abort(409, del_result)
-
- try:
- self._extension_mgrs['resourceApi'].map_method('post_service_template_delete', id, read_result)
- except Exception as e:
- pass
-
- #end service_template_http_delete
-
- def service_templates_http_post(self):
- key = 'service-template'
- obj_dict = request.json[key]
- self._post_validate(key, obj_dict=obj_dict)
- fq_name = obj_dict['fq_name']
-
- try:
- self._extension_mgrs['resourceApi'].map_method('pre_service_template_create', obj_dict)
- except Exception as e:
- pass
-
- prop_dict = obj_dict.get('service_template_properties')
- if prop_dict:
- buf = cStringIO.StringIO()
- xx_service_template_properties = ServiceTemplateType(**prop_dict)
- xx_service_template_properties.export(buf)
- node = etree.fromstring(buf.getvalue())
- xx_service_template_properties = ServiceTemplateType()
- try:
- xx_service_template_properties.build(node)
- except Exception as e:
- abort(400, str(e))
- prop_dict = obj_dict.get('id_perms')
- if prop_dict:
- buf = cStringIO.StringIO()
- xx_id_perms = IdPermsType(**prop_dict)
- xx_id_perms.export(buf)
- node = etree.fromstring(buf.getvalue())
- xx_id_perms = IdPermsType()
- try:
- xx_id_perms.build(node)
- except Exception as e:
- abort(400, str(e))
- # common handling for all resource create
- (ok, result) = self._post_common(request, 'service-template', obj_dict)
- if not ok:
- (code, msg) = result
- fq_name_str = ':'.join(obj_dict.get('fq_name', []))
- self.config_object_error(None, fq_name_str, 'service_template', 'http_post', msg)
- abort(code, msg)
-
- name = obj_dict['fq_name'][-1]
- fq_name = obj_dict['fq_name']
-
- db_conn = self._db_conn
-
- # if client gave parent_type of config-root, ignore and remove
- if 'parent_type' in obj_dict and obj_dict['parent_type'] == 'config-root':
- del obj_dict['parent_type']
-
- if 'parent_type' in obj_dict:
- # non config-root child, verify parent exists
- parent_type = obj_dict['parent_type']
- parent_fq_name = obj_dict['fq_name'][:-1]
- try:
- parent_uuid = self._db_conn.fq_name_to_uuid(parent_type, parent_fq_name)
- (ok, status) = self._permissions.check_perms_write(request, parent_uuid)
- if not ok:
- (code, err_msg) = status
- abort(code, err_msg)
- self._permissions.set_user_role(request, obj_dict)
- except NoIdError:
- err_msg = 'Parent ' + pformat(parent_fq_name) + ' type ' + parent_type + ' does not exist'
- fq_name_str = ':'.join(parent_fq_name)
- self.config_object_error(None, fq_name_str, 'service_template', 'http_post', err_msg)
- abort(400, err_msg)
-
-
- # State modification starts from here. Ensure that cleanup is done for all state changes
- cleanup_on_failure = []
- # Alloc and Store id-mappings before creating entry on pubsub store.
- # Else a subscriber can ask for an id mapping before we have stored it
- uuid_requested = result
- (ok, result) = db_conn.dbe_alloc('service-template', obj_dict, uuid_requested)
- if not ok:
- (code, msg) = result
- fq_name_str = ':'.join(obj_dict['fq_name'])
- self.config_object_error(None, fq_name_str, 'service_template', 'http_post', result)
- abort(code, msg)
- cleanup_on_failure.append((db_conn.dbe_release, ['service_template', fq_name]))
-
- obj_ids = result
-
- env = request.headers.environ
- tenant_name = env.get(hdr_server_tenant(), 'default-project')
-
-
- # type-specific hook
- r_class = self.get_resource_class('service-template')
- if r_class:
- try:
- (ok, result) = r_class.http_post_collection(tenant_name, obj_dict, db_conn)
- except Exception as e:
- ok = False
- result = (500, str(e))
- if not ok:
- for fail_cleanup_callable, cleanup_args in cleanup_on_failure:
- fail_cleanup_callable(*cleanup_args)
- (code, msg) = result
- fq_name_str = ':'.join(fq_name)
- self.config_object_error(None, fq_name_str, 'service_template', 'http_post', msg)
- abort(code, msg)
- callable = getattr(r_class, 'http_post_collection_fail', None)
- if callable:
- cleanup_on_failure.append((callable, [tenant_name, obj_dict, db_conn]))
-
- try:
- (ok, result) = \
- db_conn.dbe_create('service-template', obj_ids, obj_dict)
- except Exception as e:
- ok = False
- result = str(e)
-
- if not ok:
- for fail_cleanup_callable, cleanup_args in cleanup_on_failure:
- fail_cleanup_callable(*cleanup_args)
- fq_name_str = ':'.join(fq_name)
- self.config_object_error(None, fq_name_str, 'service_template', 'http_post', result)
- abort(404, result)
-
- rsp_body = {}
- rsp_body['name'] = name
- rsp_body['fq_name'] = fq_name
- rsp_body['uuid'] = obj_ids['uuid']
- rsp_body['href'] = self.generate_url('service-template', obj_ids['uuid'])
- if 'parent_type' in obj_dict:
- # non config-root child, send back parent uuid/href
- rsp_body['parent_uuid'] = parent_uuid
- rsp_body['parent_href'] = self.generate_url(parent_type, parent_uuid)
-
- try:
- self._extension_mgrs['resourceApi'].map_method('post_service_template_create', obj_dict)
- except Exception as e:
- pass
-
- return {'service-template': rsp_body}
- #end service_templates_http_post
-
- def service_templates_http_get(self):
- # gather list of uuids using 1. any specified anchors
- # 2. any specified filters
- # if not 'detail' return list with any specified 'fields'
- # if 'detail' return list with props+refs + any specified 'fields'
-
- env = request.headers.environ
- tenant_name = env.get(hdr_server_tenant(), 'default-project')
- parent_uuids = None
- back_ref_uuids = None
- obj_uuids = None
- if (('parent_fq_name_str' in request.query) and
- ('parent_type' in request.query)):
- parent_fq_name = request.query.parent_fq_name_str.split(':')
- parent_type = request.query.parent_type
- parent_uuids = [self._db_conn.fq_name_to_uuid(parent_type, parent_fq_name)]
- elif 'parent_id' in request.query:
- parent_ids = request.query.parent_id.split(',')
- parent_uuids = [str(uuid.UUID(p_uuid)) for p_uuid in parent_ids]
- if 'back_ref_id' in request.query:
- back_ref_ids = request.query.back_ref_id.split(',')
- back_ref_uuids = [str(uuid.UUID(b_uuid)) for b_uuid in back_ref_ids]
- if 'obj_uuids' in request.query:
- obj_uuids = request.query.obj_uuids.split(',')
-
- # common handling for all resource get
- (ok, result) = self._get_common(request, parent_uuids)
- if not ok:
- (code, msg) = result
- self.config_object_error(None, None, 'service_templates', 'http_get_collection', msg)
- abort(code, msg)
-
- if 'count' in request.query:
- count = 'true' in request.query.count.lower()
- else:
- count = False
-
- filter_params = request.query.filters
- if filter_params:
- try:
- ff_key_vals = filter_params.split(',')
- ff_names = [ff.split('==')[0] for ff in ff_key_vals]
- ff_values = [ff.split('==')[1] for ff in ff_key_vals]
- filters = {'field_names': ff_names, 'field_values': ff_values}
- except Exception as e:
- abort(400, 'Invalid filter ' + filter_params)
- else:
- filters = None
- db_conn = self._db_conn
- (ok, result) = \
- db_conn.dbe_list('service-template', parent_uuids, back_ref_uuids, obj_uuids, count, filters)
- if not ok:
- self.config_object_error(None, None, 'service_templates', 'http_get_collection', result)
- abort(404, result)
-
- # If only counting, return early
- if count:
- return {'service-templates': {'count': result}}
-
- if 'detail' in request.query:
- detail = 'true' in request.query.detail.lower()
- else:
- detail = False
-
- fq_names_uuids = result
- obj_dicts = []
- if not detail:
- if not self.is_admin_request():
- obj_ids_list = [{'uuid': obj_uuid} for _, obj_uuid in fq_names_uuids]
- obj_fields = [u'id_perms']
- (ok, result) = db_conn.dbe_read_multi('service-template', obj_ids_list, obj_fields)
- if not ok:
- result = []
- for obj_result in result:
- # give chance for zk heartbeat/ping
- gevent.sleep(0)
- if obj_result['id_perms'].get('user_visible', True):
- obj_dict = {}
- obj_dict['uuid'] = obj_result['uuid']
- obj_dict['href'] = self.generate_url('service-template', obj_result['uuid'])
- obj_dict['fq_name'] = obj_result['fq_name']
- obj_dicts.append(obj_dict)
- else:
- for fq_name, obj_uuid in fq_names_uuids:
- # give chance for zk heartbeat/ping
- gevent.sleep(0)
- obj_dict = {}
- obj_dict['uuid'] = obj_uuid
- obj_dict['href'] = self.generate_url('service-template', obj_uuid)
- obj_dict['fq_name'] = fq_name
- obj_dicts.append(obj_dict)
- else: #detail
- obj_ids_list = [{'uuid': obj_uuid} for _, obj_uuid in fq_names_uuids]
- obj_fields = [u'service_template_properties', u'id_perms', u'display_name'] + []
- if 'fields' in request.query:
- obj_fields.extend(request.query.fields.split(','))
- (ok, result) = db_conn.dbe_read_multi('service-template', obj_ids_list, obj_fields)
-
- if not ok:
- result = []
-
- for obj_result in result:
- # give chance for zk heartbeat/ping
- gevent.sleep(0)
- obj_dict = {}
- obj_dict['name'] = obj_result['fq_name'][-1]
- obj_dict['href'] = self.generate_url('service-template', obj_result['uuid'])
- obj_dict.update(obj_result)
- if (obj_dict['id_perms'].get('user_visible', True) or
- self.is_admin_request()):
- obj_dicts.append({'service-template': obj_dict})
-
- return {'service-templates': obj_dicts}
- #end service_templates_http_get
-
- def _service_template_create_default_children(self, parent_obj):
- pass
- #end _service_template_create_default_children
-
- def _service_template_delete_default_children(self, parent_dict):
- pass
- #end _service_template_delete_default_children
-
- def virtual_ip_http_get(self, id):
- try:
- self._extension_mgrs['resourceApi'].map_method('pre_virtual_ip_read', id)
- except Exception as e:
- pass
-
- # TODO get vals from request out of the global ASAP
- etag = request.headers.get('If-None-Match')
- try:
- obj_type = self._db_conn.uuid_to_obj_type(id)
- except NoIdError:
- obj_type = None
- if obj_type != 'virtual_ip':
- abort(404, 'No virtual-ip object found for id %s' %(id))
- # common handling for all resource get
- (ok, result) = self._get_common(request, id)
- if not ok:
- (code, msg) = result
- self.config_object_error(id, None, 'virtual_ip', 'http_get', msg)
- abort(code, msg)
-
- # type-specific hook
- r_class = self.get_resource_class('virtual-ip')
- if r_class:
- r_class.http_get(id)
-
- db_conn = self._db_conn
- if etag:
- obj_ids = {'uuid': id}
- (ok, result) = db_conn.dbe_is_latest(obj_ids, etag.replace('"', ''))
- if not ok:
- # Not present in DB
- self.config_object_error(id, None, 'virtual_ip', 'http_get', result)
- abort(404, result)
-
- is_latest = result
- if is_latest:
- # send Not-Modified, caches use this for read optimization
- response.status = 304
- return
- #end if etag
-
- obj_ids = {'uuid': id}
-
- # Generate field list for db layer
- properties = [u'virtual_ip_properties', u'id_perms', u'display_name']
- references = [u'loadbalancer_pool_refs', 'virtual_machine_interface_refs']
- back_references = [u'project_back_refs']
- children = []
- if 'fields' in request.query:
- obj_fields = request.query.fields.split(',')
- else: # default props + children + refs + backrefs
- obj_fields = properties + references
- if 'exclude_back_refs' not in request.query:
- obj_fields = obj_fields + back_references
- if 'exclude_children' not in request.query:
- obj_fields = obj_fields + children
-
- (ok, result) = db_conn.dbe_read('virtual-ip', obj_ids, obj_fields)
- if not ok:
- # Not present in DB
- self.config_object_error(id, None, 'virtual_ip', 'http_get', result)
- abort(404, result)
-
- # check visibility
- if (not result['id_perms'].get('user_visible', True) and
- not self.is_admin_request()):
- result = 'This object is not visible by users: %s' % id
- self.config_object_error(id, None, 'virtual_ip', 'http_get', result)
- abort(404, result)
-
- rsp_body = {}
- rsp_body['uuid'] = id
- rsp_body['href'] = self.generate_url('virtual-ip', id)
- rsp_body['name'] = result['fq_name'][-1]
- rsp_body.update(result)
- id_perms = result['id_perms']
- response.set_header('ETag', '"' + id_perms['last_modified'] + '"')
- try:
- self._extension_mgrs['resourceApi'].map_method('post_virtual_ip_read', id, rsp_body)
- except Exception as e:
- pass
-
- return {'virtual-ip': rsp_body}
- #end virtual_ip_http_get
-
- def virtual_ip_http_put(self, id):
- key = 'virtual-ip'
- obj_dict = request.json[key]
-
- try:
- self._extension_mgrs['resourceApi'].map_method('pre_virtual_ip_update', id, obj_dict)
- except Exception as e:
- pass
-
- db_conn = self._db_conn
- try:
- obj_type = db_conn.uuid_to_obj_type(id)
- if obj_type != 'virtual_ip':
- abort(404, 'No virtual-ip object found for id %s' %(id))
- fq_name = db_conn.uuid_to_fq_name(id)
- except NoIdError as e:
- abort(404, str(e))
- prop_dict = obj_dict.get('virtual_ip_properties')
- if prop_dict:
- buf = cStringIO.StringIO()
- xx_virtual_ip_properties = VirtualIpType(**prop_dict)
- xx_virtual_ip_properties.export(buf)
- node = etree.fromstring(buf.getvalue())
- xx_virtual_ip_properties = VirtualIpType()
- try:
- xx_virtual_ip_properties.build(node)
- except Exception as e:
- abort(400, str(e))
- prop_dict = obj_dict.get('id_perms')
- if prop_dict:
- buf = cStringIO.StringIO()
- xx_id_perms = IdPermsType(**prop_dict)
- xx_id_perms.export(buf)
- node = etree.fromstring(buf.getvalue())
- xx_id_perms = IdPermsType()
- try:
- xx_id_perms.build(node)
- except Exception as e:
- abort(400, str(e))
- # common handling for all resource put
- (ok, result) = self._put_common(request, 'virtual_ip', id, fq_name, obj_dict)
- if not ok:
- (code, msg) = result
- self.config_object_error(id, None, 'virtual_ip', 'http_put', msg)
- abort(code, msg)
-
- # Validate perms
- objtype_list = [u'loadbalancer_pool', 'virtual_machine_interface']
- for obj_type in objtype_list:
- refs = obj_dict.get('%s_refs'%(obj_type), None)
- if refs:
- for ref in refs:
- try:
- ref_uuid = db_conn.fq_name_to_uuid(obj_type, ref['to'])
- except NoIdError as e:
- abort(404, str(e))
- (ok, status) = self._permissions.check_perms_link(request, ref_uuid)
- if not ok:
- (code, err_msg) = status
- abort(code, err_msg)
-
- # State modification starts from here. Ensure that cleanup is done for all state changes
- cleanup_on_failure = []
- # type-specific hook
- r_class = self.get_resource_class('virtual-ip')
- if r_class:
- (ok, put_result) = r_class.http_put(id, fq_name, obj_dict, self._db_conn)
- if not ok:
- (code, msg) = put_result
- self.config_object_error(id, None, 'virtual_ip', 'http_put', msg)
- abort(code, msg)
- callable = getattr(r_class, 'http_put_fail', None)
- if callable:
- cleanup_on_failure.append((callable, [id, fq_name, obj_dict, self._db_conn]))
-
- obj_ids = {'uuid': id}
- try:
- (ok, result) = db_conn.dbe_update('virtual-ip', obj_ids, obj_dict)
- except Exception as e:
- ok = False
- result = str(e)
- if not ok:
- for fail_cleanup_callable, cleanup_args in cleanup_on_failure:
- fail_cleanup_callable(*cleanup_args)
- self.config_object_error(id, None, 'virtual_ip', 'http_put', result)
- abort(404, result)
-
- rsp_body = {}
- rsp_body['uuid'] = id
- rsp_body['href'] = self.generate_url('virtual-ip', id)
-
- try:
- self._extension_mgrs['resourceApi'].map_method('post_virtual_ip_update', id, obj_dict)
- except Exception as e:
- pass
-
- return {'virtual-ip': rsp_body}
- #end virtual_ip_http_put
-
- def virtual_ip_http_delete(self, id):
- db_conn = self._db_conn
- # if obj doesn't exist return early
- try:
- obj_type = db_conn.uuid_to_obj_type(id)
- if obj_type != 'virtual_ip':
- abort(404, 'No virtual-ip object found for id %s' %(id))
- _ = db_conn.uuid_to_fq_name(id)
- except NoIdError:
- abort(404, 'ID %s does not exist' %(id))
-
- try:
- self._extension_mgrs['resourceApi'].map_method('pre_virtual_ip_delete', id)
- except Exception as e:
- pass
-
- # read in obj from db (accepting error) to get details of it
- obj_ids = {'uuid': id}
- back_references = [u'project_back_refs']
- children = []
- obj_fields = children + back_references
- (read_ok, read_result) = db_conn.dbe_read('virtual-ip', obj_ids, obj_fields)
- if not read_ok:
- if read_result.startswith('Unknown id:'):
- abort(404, 'ID %s does not exist' %(id))
- else:
- self.config_object_error(id, None, 'virtual_ip', 'http_delete', read_result)
- # proceed down to delete the resource
-
- # common handling for all resource delete
- parent_type = read_result.get('parent_type', None)
- (ok, del_result) = self._delete_common(request, 'virtual_ip', id, parent_type)
- if not ok:
- (code, msg) = del_result
- self.config_object_error(id, None, 'virtual_ip', 'http_delete', msg)
- abort(code, msg)
-
- fq_name = read_result['fq_name']
- ifmap_id = cfgm_common.imid.get_ifmap_id_from_fq_name('virtual-ip', fq_name)
- obj_ids['imid'] = ifmap_id
- if parent_type:
- parent_imid = cfgm_common.imid.get_ifmap_id_from_fq_name(parent_type, fq_name[:-1])
- obj_ids['parent_imid'] = parent_imid
-
- # State modification starts from here. Ensure that cleanup is done for all state changes
- cleanup_on_failure = []
-
- # type-specific hook
- r_class = self.get_resource_class('virtual-ip')
- if r_class:
- if read_ok:
- # fail if non-default children or backrefs exist
-
- # Delete default children first
- self._virtual_ip_delete_default_children(read_result)
-
- (ok, del_result) = r_class.http_delete(id, read_result, db_conn)
- if not ok:
- (code, msg) = del_result
- self.config_object_error(id, None, 'virtual_ip', 'http_delete', msg)
- abort(code, msg)
- callable = getattr(r_class, 'http_delete_fail', None)
- if callable:
- cleanup_on_failure.append((callable, [id, read_result, db_conn]))
- #end if read_ok
-
- try:
- (ok, del_result) = db_conn.dbe_delete('virtual-ip', obj_ids, read_result)
- except Exception as e:
- ok = False
- del_result = str(e)
- if not ok:
- for fail_cleanup_callable, cleanup_args in cleanup_on_failure:
- fail_cleanup_callable(*cleanup_args)
- self.config_object_error(id, None, 'virtual_ip', 'http_delete', del_result)
- abort(409, del_result)
-
- try:
- self._extension_mgrs['resourceApi'].map_method('post_virtual_ip_delete', id, read_result)
- except Exception as e:
- pass
-
- #end virtual_ip_http_delete
-
- def virtual_ips_http_post(self):
- key = 'virtual-ip'
- obj_dict = request.json[key]
- self._post_validate(key, obj_dict=obj_dict)
- fq_name = obj_dict['fq_name']
-
- try:
- self._extension_mgrs['resourceApi'].map_method('pre_virtual_ip_create', obj_dict)
- except Exception as e:
- pass
-
- prop_dict = obj_dict.get('virtual_ip_properties')
- if prop_dict:
- buf = cStringIO.StringIO()
- xx_virtual_ip_properties = VirtualIpType(**prop_dict)
- xx_virtual_ip_properties.export(buf)
- node = etree.fromstring(buf.getvalue())
- xx_virtual_ip_properties = VirtualIpType()
- try:
- xx_virtual_ip_properties.build(node)
- except Exception as e:
- abort(400, str(e))
- prop_dict = obj_dict.get('id_perms')
- if prop_dict:
- buf = cStringIO.StringIO()
- xx_id_perms = IdPermsType(**prop_dict)
- xx_id_perms.export(buf)
- node = etree.fromstring(buf.getvalue())
- xx_id_perms = IdPermsType()
- try:
- xx_id_perms.build(node)
- except Exception as e:
- abort(400, str(e))
- # common handling for all resource create
- (ok, result) = self._post_common(request, 'virtual-ip', obj_dict)
- if not ok:
- (code, msg) = result
- fq_name_str = ':'.join(obj_dict.get('fq_name', []))
- self.config_object_error(None, fq_name_str, 'virtual_ip', 'http_post', msg)
- abort(code, msg)
-
- name = obj_dict['fq_name'][-1]
- fq_name = obj_dict['fq_name']
-
- db_conn = self._db_conn
-
- # if client gave parent_type of config-root, ignore and remove
- if 'parent_type' in obj_dict and obj_dict['parent_type'] == 'config-root':
- del obj_dict['parent_type']
-
- if 'parent_type' in obj_dict:
- # non config-root child, verify parent exists
- parent_type = obj_dict['parent_type']
- parent_fq_name = obj_dict['fq_name'][:-1]
- try:
- parent_uuid = self._db_conn.fq_name_to_uuid(parent_type, parent_fq_name)
- (ok, status) = self._permissions.check_perms_write(request, parent_uuid)
- if not ok:
- (code, err_msg) = status
- abort(code, err_msg)
- self._permissions.set_user_role(request, obj_dict)
- except NoIdError:
- err_msg = 'Parent ' + pformat(parent_fq_name) + ' type ' + parent_type + ' does not exist'
- fq_name_str = ':'.join(parent_fq_name)
- self.config_object_error(None, fq_name_str, 'virtual_ip', 'http_post', err_msg)
- abort(400, err_msg)
-
- # Validate perms
- objtype_list = [u'loadbalancer_pool', 'virtual_machine_interface']
- for obj_type in objtype_list:
- refs = obj_dict.get('%s_refs'%(obj_type), None)
- if refs:
- for ref in refs:
- ref_uuid = db_conn.fq_name_to_uuid(obj_type, ref['to'])
- (ok, status) = self._permissions.check_perms_link(request, ref_uuid)
- if not ok:
- (code, err_msg) = status
- abort(code, err_msg)
-
- # State modification starts from here. Ensure that cleanup is done for all state changes
- cleanup_on_failure = []
- # Alloc and Store id-mappings before creating entry on pubsub store.
- # Else a subscriber can ask for an id mapping before we have stored it
- uuid_requested = result
- (ok, result) = db_conn.dbe_alloc('virtual-ip', obj_dict, uuid_requested)
- if not ok:
- (code, msg) = result
- fq_name_str = ':'.join(obj_dict['fq_name'])
- self.config_object_error(None, fq_name_str, 'virtual_ip', 'http_post', result)
- abort(code, msg)
- cleanup_on_failure.append((db_conn.dbe_release, ['virtual_ip', fq_name]))
-
- obj_ids = result
-
- env = request.headers.environ
- tenant_name = env.get(hdr_server_tenant(), 'default-project')
-
-
- # type-specific hook
- r_class = self.get_resource_class('virtual-ip')
- if r_class:
- try:
- (ok, result) = r_class.http_post_collection(tenant_name, obj_dict, db_conn)
- except Exception as e:
- ok = False
- result = (500, str(e))
- if not ok:
- for fail_cleanup_callable, cleanup_args in cleanup_on_failure:
- fail_cleanup_callable(*cleanup_args)
- (code, msg) = result
- fq_name_str = ':'.join(fq_name)
- self.config_object_error(None, fq_name_str, 'virtual_ip', 'http_post', msg)
- abort(code, msg)
- callable = getattr(r_class, 'http_post_collection_fail', None)
- if callable:
- cleanup_on_failure.append((callable, [tenant_name, obj_dict, db_conn]))
-
- try:
- (ok, result) = \
- db_conn.dbe_create('virtual-ip', obj_ids, obj_dict)
- except Exception as e:
- ok = False
- result = str(e)
-
- if not ok:
- for fail_cleanup_callable, cleanup_args in cleanup_on_failure:
- fail_cleanup_callable(*cleanup_args)
- fq_name_str = ':'.join(fq_name)
- self.config_object_error(None, fq_name_str, 'virtual_ip', 'http_post', result)
- abort(404, result)
-
- rsp_body = {}
- rsp_body['name'] = name
- rsp_body['fq_name'] = fq_name
- rsp_body['uuid'] = obj_ids['uuid']
- rsp_body['href'] = self.generate_url('virtual-ip', obj_ids['uuid'])
- if 'parent_type' in obj_dict:
- # non config-root child, send back parent uuid/href
- rsp_body['parent_uuid'] = parent_uuid
- rsp_body['parent_href'] = self.generate_url(parent_type, parent_uuid)
-
- try:
- self._extension_mgrs['resourceApi'].map_method('post_virtual_ip_create', obj_dict)
- except Exception as e:
- pass
-
- return {'virtual-ip': rsp_body}
- #end virtual_ips_http_post
-
- def virtual_ips_http_get(self):
- # gather list of uuids using 1. any specified anchors
- # 2. any specified filters
- # if not 'detail' return list with any specified 'fields'
- # if 'detail' return list with props+refs + any specified 'fields'
-
- env = request.headers.environ
- tenant_name = env.get(hdr_server_tenant(), 'default-project')
- parent_uuids = None
- back_ref_uuids = None
- obj_uuids = None
- if (('parent_fq_name_str' in request.query) and
- ('parent_type' in request.query)):
- parent_fq_name = request.query.parent_fq_name_str.split(':')
- parent_type = request.query.parent_type
- parent_uuids = [self._db_conn.fq_name_to_uuid(parent_type, parent_fq_name)]
- elif 'parent_id' in request.query:
- parent_ids = request.query.parent_id.split(',')
- parent_uuids = [str(uuid.UUID(p_uuid)) for p_uuid in parent_ids]
- if 'back_ref_id' in request.query:
- back_ref_ids = request.query.back_ref_id.split(',')
- back_ref_uuids = [str(uuid.UUID(b_uuid)) for b_uuid in back_ref_ids]
- if 'obj_uuids' in request.query:
- obj_uuids = request.query.obj_uuids.split(',')
-
- # common handling for all resource get
- (ok, result) = self._get_common(request, parent_uuids)
- if not ok:
- (code, msg) = result
- self.config_object_error(None, None, 'virtual_ips', 'http_get_collection', msg)
- abort(code, msg)
-
- if 'count' in request.query:
- count = 'true' in request.query.count.lower()
- else:
- count = False
-
- filter_params = request.query.filters
- if filter_params:
- try:
- ff_key_vals = filter_params.split(',')
- ff_names = [ff.split('==')[0] for ff in ff_key_vals]
- ff_values = [ff.split('==')[1] for ff in ff_key_vals]
- filters = {'field_names': ff_names, 'field_values': ff_values}
- except Exception as e:
- abort(400, 'Invalid filter ' + filter_params)
- else:
- filters = None
- db_conn = self._db_conn
- (ok, result) = \
- db_conn.dbe_list('virtual-ip', parent_uuids, back_ref_uuids, obj_uuids, count, filters)
- if not ok:
- self.config_object_error(None, None, 'virtual_ips', 'http_get_collection', result)
- abort(404, result)
-
- # If only counting, return early
- if count:
- return {'virtual-ips': {'count': result}}
-
- if 'detail' in request.query:
- detail = 'true' in request.query.detail.lower()
- else:
- detail = False
-
- fq_names_uuids = result
- obj_dicts = []
- if not detail:
- if not self.is_admin_request():
- obj_ids_list = [{'uuid': obj_uuid} for _, obj_uuid in fq_names_uuids]
- obj_fields = [u'id_perms']
- (ok, result) = db_conn.dbe_read_multi('virtual-ip', obj_ids_list, obj_fields)
- if not ok:
- result = []
- for obj_result in result:
- # give chance for zk heartbeat/ping
- gevent.sleep(0)
- if obj_result['id_perms'].get('user_visible', True):
- obj_dict = {}
- obj_dict['uuid'] = obj_result['uuid']
- obj_dict['href'] = self.generate_url('virtual-ip', obj_result['uuid'])
- obj_dict['fq_name'] = obj_result['fq_name']
- obj_dicts.append(obj_dict)
- else:
- for fq_name, obj_uuid in fq_names_uuids:
- # give chance for zk heartbeat/ping
- gevent.sleep(0)
- obj_dict = {}
- obj_dict['uuid'] = obj_uuid
- obj_dict['href'] = self.generate_url('virtual-ip', obj_uuid)
- obj_dict['fq_name'] = fq_name
- obj_dicts.append(obj_dict)
- else: #detail
- obj_ids_list = [{'uuid': obj_uuid} for _, obj_uuid in fq_names_uuids]
- obj_fields = [u'virtual_ip_properties', u'id_perms', u'display_name'] + [u'loadbalancer_pool_refs', 'virtual_machine_interface_refs']
- if 'fields' in request.query:
- obj_fields.extend(request.query.fields.split(','))
- (ok, result) = db_conn.dbe_read_multi('virtual-ip', obj_ids_list, obj_fields)
-
- if not ok:
- result = []
-
- for obj_result in result:
- # give chance for zk heartbeat/ping
- gevent.sleep(0)
- obj_dict = {}
- obj_dict['name'] = obj_result['fq_name'][-1]
- obj_dict['href'] = self.generate_url('virtual-ip', obj_result['uuid'])
- obj_dict.update(obj_result)
- if (obj_dict['id_perms'].get('user_visible', True) or
- self.is_admin_request()):
- obj_dicts.append({'virtual-ip': obj_dict})
-
- return {'virtual-ips': obj_dicts}
- #end virtual_ips_http_get
-
- def _virtual_ip_create_default_children(self, parent_obj):
- pass
- #end _virtual_ip_create_default_children
-
- def _virtual_ip_delete_default_children(self, parent_dict):
- pass
- #end _virtual_ip_delete_default_children
-
- def loadbalancer_member_http_get(self, id):
- try:
- self._extension_mgrs['resourceApi'].map_method('pre_loadbalancer_member_read', id)
- except Exception as e:
- pass
-
- # TODO get vals from request out of the global ASAP
- etag = request.headers.get('If-None-Match')
- try:
- obj_type = self._db_conn.uuid_to_obj_type(id)
- except NoIdError:
- obj_type = None
- if obj_type != 'loadbalancer_member':
- abort(404, 'No loadbalancer-member object found for id %s' %(id))
- # common handling for all resource get
- (ok, result) = self._get_common(request, id)
- if not ok:
- (code, msg) = result
- self.config_object_error(id, None, 'loadbalancer_member', 'http_get', msg)
- abort(code, msg)
-
- # type-specific hook
- r_class = self.get_resource_class('loadbalancer-member')
- if r_class:
- r_class.http_get(id)
-
- db_conn = self._db_conn
- if etag:
- obj_ids = {'uuid': id}
- (ok, result) = db_conn.dbe_is_latest(obj_ids, etag.replace('"', ''))
- if not ok:
- # Not present in DB
- self.config_object_error(id, None, 'loadbalancer_member', 'http_get', result)
- abort(404, result)
-
- is_latest = result
- if is_latest:
- # send Not-Modified, caches use this for read optimization
- response.status = 304
- return
- #end if etag
-
- obj_ids = {'uuid': id}
-
- # Generate field list for db layer
- properties = [u'loadbalancer_member_properties', u'id_perms', u'display_name']
- references = []
- back_references = [u'loadbalancer_pool_back_refs']
- children = []
- if 'fields' in request.query:
- obj_fields = request.query.fields.split(',')
- else: # default props + children + refs + backrefs
- obj_fields = properties + references
- if 'exclude_back_refs' not in request.query:
- obj_fields = obj_fields + back_references
- if 'exclude_children' not in request.query:
- obj_fields = obj_fields + children
-
- (ok, result) = db_conn.dbe_read('loadbalancer-member', obj_ids, obj_fields)
- if not ok:
- # Not present in DB
- self.config_object_error(id, None, 'loadbalancer_member', 'http_get', result)
- abort(404, result)
-
- # check visibility
- if (not result['id_perms'].get('user_visible', True) and
- not self.is_admin_request()):
- result = 'This object is not visible by users: %s' % id
- self.config_object_error(id, None, 'loadbalancer_member', 'http_get', result)
- abort(404, result)
-
- rsp_body = {}
- rsp_body['uuid'] = id
- rsp_body['href'] = self.generate_url('loadbalancer-member', id)
- rsp_body['name'] = result['fq_name'][-1]
- rsp_body.update(result)
- id_perms = result['id_perms']
- response.set_header('ETag', '"' + id_perms['last_modified'] + '"')
- try:
- self._extension_mgrs['resourceApi'].map_method('post_loadbalancer_member_read', id, rsp_body)
- except Exception as e:
- pass
-
- return {'loadbalancer-member': rsp_body}
- #end loadbalancer_member_http_get
-
- def loadbalancer_member_http_put(self, id):
- key = 'loadbalancer-member'
- obj_dict = request.json[key]
-
- try:
- self._extension_mgrs['resourceApi'].map_method('pre_loadbalancer_member_update', id, obj_dict)
- except Exception as e:
- pass
-
- db_conn = self._db_conn
- try:
- obj_type = db_conn.uuid_to_obj_type(id)
- if obj_type != 'loadbalancer_member':
- abort(404, 'No loadbalancer-member object found for id %s' %(id))
- fq_name = db_conn.uuid_to_fq_name(id)
- except NoIdError as e:
- abort(404, str(e))
- prop_dict = obj_dict.get('loadbalancer_member_properties')
- if prop_dict:
- buf = cStringIO.StringIO()
- xx_loadbalancer_member_properties = LoadbalancerMemberType(**prop_dict)
- xx_loadbalancer_member_properties.export(buf)
- node = etree.fromstring(buf.getvalue())
- xx_loadbalancer_member_properties = LoadbalancerMemberType()
- try:
- xx_loadbalancer_member_properties.build(node)
- except Exception as e:
- abort(400, str(e))
- prop_dict = obj_dict.get('id_perms')
- if prop_dict:
- buf = cStringIO.StringIO()
- xx_id_perms = IdPermsType(**prop_dict)
- xx_id_perms.export(buf)
- node = etree.fromstring(buf.getvalue())
- xx_id_perms = IdPermsType()
- try:
- xx_id_perms.build(node)
- except Exception as e:
- abort(400, str(e))
- # common handling for all resource put
- (ok, result) = self._put_common(request, 'loadbalancer_member', id, fq_name, obj_dict)
- if not ok:
- (code, msg) = result
- self.config_object_error(id, None, 'loadbalancer_member', 'http_put', msg)
- abort(code, msg)
-
-
- # State modification starts from here. Ensure that cleanup is done for all state changes
- cleanup_on_failure = []
- # type-specific hook
- r_class = self.get_resource_class('loadbalancer-member')
- if r_class:
- (ok, put_result) = r_class.http_put(id, fq_name, obj_dict, self._db_conn)
- if not ok:
- (code, msg) = put_result
- self.config_object_error(id, None, 'loadbalancer_member', 'http_put', msg)
- abort(code, msg)
- callable = getattr(r_class, 'http_put_fail', None)
- if callable:
- cleanup_on_failure.append((callable, [id, fq_name, obj_dict, self._db_conn]))
-
- obj_ids = {'uuid': id}
- try:
- (ok, result) = db_conn.dbe_update('loadbalancer-member', obj_ids, obj_dict)
- except Exception as e:
- ok = False
- result = str(e)
- if not ok:
- for fail_cleanup_callable, cleanup_args in cleanup_on_failure:
- fail_cleanup_callable(*cleanup_args)
- self.config_object_error(id, None, 'loadbalancer_member', 'http_put', result)
- abort(404, result)
-
- rsp_body = {}
- rsp_body['uuid'] = id
- rsp_body['href'] = self.generate_url('loadbalancer-member', id)
-
- try:
- self._extension_mgrs['resourceApi'].map_method('post_loadbalancer_member_update', id, obj_dict)
- except Exception as e:
- pass
-
- return {'loadbalancer-member': rsp_body}
- #end loadbalancer_member_http_put
-
- def loadbalancer_member_http_delete(self, id):
- db_conn = self._db_conn
- # if obj doesn't exist return early
- try:
- obj_type = db_conn.uuid_to_obj_type(id)
- if obj_type != 'loadbalancer_member':
- abort(404, 'No loadbalancer-member object found for id %s' %(id))
- _ = db_conn.uuid_to_fq_name(id)
- except NoIdError:
- abort(404, 'ID %s does not exist' %(id))
-
- try:
- self._extension_mgrs['resourceApi'].map_method('pre_loadbalancer_member_delete', id)
- except Exception as e:
- pass
-
- # read in obj from db (accepting error) to get details of it
- obj_ids = {'uuid': id}
- back_references = [u'loadbalancer_pool_back_refs']
- children = []
- obj_fields = children + back_references
- (read_ok, read_result) = db_conn.dbe_read('loadbalancer-member', obj_ids, obj_fields)
- if not read_ok:
- if read_result.startswith('Unknown id:'):
- abort(404, 'ID %s does not exist' %(id))
- else:
- self.config_object_error(id, None, 'loadbalancer_member', 'http_delete', read_result)
- # proceed down to delete the resource
-
- # common handling for all resource delete
- parent_type = read_result.get('parent_type', None)
- (ok, del_result) = self._delete_common(request, 'loadbalancer_member', id, parent_type)
- if not ok:
- (code, msg) = del_result
- self.config_object_error(id, None, 'loadbalancer_member', 'http_delete', msg)
- abort(code, msg)
-
- fq_name = read_result['fq_name']
- ifmap_id = cfgm_common.imid.get_ifmap_id_from_fq_name('loadbalancer-member', fq_name)
- obj_ids['imid'] = ifmap_id
- if parent_type:
- parent_imid = cfgm_common.imid.get_ifmap_id_from_fq_name(parent_type, fq_name[:-1])
- obj_ids['parent_imid'] = parent_imid
-
- # State modification starts from here. Ensure that cleanup is done for all state changes
- cleanup_on_failure = []
-
- # type-specific hook
- r_class = self.get_resource_class('loadbalancer-member')
- if r_class:
- if read_ok:
- # fail if non-default children or backrefs exist
-
- # Delete default children first
- self._loadbalancer_member_delete_default_children(read_result)
-
- (ok, del_result) = r_class.http_delete(id, read_result, db_conn)
- if not ok:
- (code, msg) = del_result
- self.config_object_error(id, None, 'loadbalancer_member', 'http_delete', msg)
- abort(code, msg)
- callable = getattr(r_class, 'http_delete_fail', None)
- if callable:
- cleanup_on_failure.append((callable, [id, read_result, db_conn]))
- #end if read_ok
-
- try:
- (ok, del_result) = db_conn.dbe_delete('loadbalancer-member', obj_ids, read_result)
- except Exception as e:
- ok = False
- del_result = str(e)
- if not ok:
- for fail_cleanup_callable, cleanup_args in cleanup_on_failure:
- fail_cleanup_callable(*cleanup_args)
- self.config_object_error(id, None, 'loadbalancer_member', 'http_delete', del_result)
- abort(409, del_result)
-
- try:
- self._extension_mgrs['resourceApi'].map_method('post_loadbalancer_member_delete', id, read_result)
- except Exception as e:
- pass
-
- #end loadbalancer_member_http_delete
-
- def loadbalancer_members_http_post(self):
- key = 'loadbalancer-member'
- obj_dict = request.json[key]
- self._post_validate(key, obj_dict=obj_dict)
- fq_name = obj_dict['fq_name']
-
- try:
- self._extension_mgrs['resourceApi'].map_method('pre_loadbalancer_member_create', obj_dict)
- except Exception as e:
- pass
-
- prop_dict = obj_dict.get('loadbalancer_member_properties')
- if prop_dict:
- buf = cStringIO.StringIO()
- xx_loadbalancer_member_properties = LoadbalancerMemberType(**prop_dict)
- xx_loadbalancer_member_properties.export(buf)
- node = etree.fromstring(buf.getvalue())
- xx_loadbalancer_member_properties = LoadbalancerMemberType()
- try:
- xx_loadbalancer_member_properties.build(node)
- except Exception as e:
- abort(400, str(e))
- prop_dict = obj_dict.get('id_perms')
- if prop_dict:
- buf = cStringIO.StringIO()
- xx_id_perms = IdPermsType(**prop_dict)
- xx_id_perms.export(buf)
- node = etree.fromstring(buf.getvalue())
- xx_id_perms = IdPermsType()
- try:
- xx_id_perms.build(node)
- except Exception as e:
- abort(400, str(e))
- # common handling for all resource create
- (ok, result) = self._post_common(request, 'loadbalancer-member', obj_dict)
- if not ok:
- (code, msg) = result
- fq_name_str = ':'.join(obj_dict.get('fq_name', []))
- self.config_object_error(None, fq_name_str, 'loadbalancer_member', 'http_post', msg)
- abort(code, msg)
-
- name = obj_dict['fq_name'][-1]
- fq_name = obj_dict['fq_name']
-
- db_conn = self._db_conn
-
- # if client gave parent_type of config-root, ignore and remove
- if 'parent_type' in obj_dict and obj_dict['parent_type'] == 'config-root':
- del obj_dict['parent_type']
-
- if 'parent_type' in obj_dict:
- # non config-root child, verify parent exists
- parent_type = obj_dict['parent_type']
- parent_fq_name = obj_dict['fq_name'][:-1]
- try:
- parent_uuid = self._db_conn.fq_name_to_uuid(parent_type, parent_fq_name)
- (ok, status) = self._permissions.check_perms_write(request, parent_uuid)
- if not ok:
- (code, err_msg) = status
- abort(code, err_msg)
- self._permissions.set_user_role(request, obj_dict)
- except NoIdError:
- err_msg = 'Parent ' + pformat(parent_fq_name) + ' type ' + parent_type + ' does not exist'
- fq_name_str = ':'.join(parent_fq_name)
- self.config_object_error(None, fq_name_str, 'loadbalancer_member', 'http_post', err_msg)
- abort(400, err_msg)
-
-
- # State modification starts from here. Ensure that cleanup is done for all state changes
- cleanup_on_failure = []
- # Alloc and Store id-mappings before creating entry on pubsub store.
- # Else a subscriber can ask for an id mapping before we have stored it
- uuid_requested = result
- (ok, result) = db_conn.dbe_alloc('loadbalancer-member', obj_dict, uuid_requested)
- if not ok:
- (code, msg) = result
- fq_name_str = ':'.join(obj_dict['fq_name'])
- self.config_object_error(None, fq_name_str, 'loadbalancer_member', 'http_post', result)
- abort(code, msg)
- cleanup_on_failure.append((db_conn.dbe_release, ['loadbalancer_member', fq_name]))
-
- obj_ids = result
-
- env = request.headers.environ
- tenant_name = env.get(hdr_server_tenant(), 'default-project')
-
-
- # type-specific hook
- r_class = self.get_resource_class('loadbalancer-member')
- if r_class:
- try:
- (ok, result) = r_class.http_post_collection(tenant_name, obj_dict, db_conn)
- except Exception as e:
- ok = False
- result = (500, str(e))
- if not ok:
- for fail_cleanup_callable, cleanup_args in cleanup_on_failure:
- fail_cleanup_callable(*cleanup_args)
- (code, msg) = result
- fq_name_str = ':'.join(fq_name)
- self.config_object_error(None, fq_name_str, 'loadbalancer_member', 'http_post', msg)
- abort(code, msg)
- callable = getattr(r_class, 'http_post_collection_fail', None)
- if callable:
- cleanup_on_failure.append((callable, [tenant_name, obj_dict, db_conn]))
-
- try:
- (ok, result) = \
- db_conn.dbe_create('loadbalancer-member', obj_ids, obj_dict)
- except Exception as e:
- ok = False
- result = str(e)
-
- if not ok:
- for fail_cleanup_callable, cleanup_args in cleanup_on_failure:
- fail_cleanup_callable(*cleanup_args)
- fq_name_str = ':'.join(fq_name)
- self.config_object_error(None, fq_name_str, 'loadbalancer_member', 'http_post', result)
- abort(404, result)
-
- rsp_body = {}
- rsp_body['name'] = name
- rsp_body['fq_name'] = fq_name
- rsp_body['uuid'] = obj_ids['uuid']
- rsp_body['href'] = self.generate_url('loadbalancer-member', obj_ids['uuid'])
- if 'parent_type' in obj_dict:
- # non config-root child, send back parent uuid/href
- rsp_body['parent_uuid'] = parent_uuid
- rsp_body['parent_href'] = self.generate_url(parent_type, parent_uuid)
-
- try:
- self._extension_mgrs['resourceApi'].map_method('post_loadbalancer_member_create', obj_dict)
- except Exception as e:
- pass
-
- return {'loadbalancer-member': rsp_body}
- #end loadbalancer_members_http_post
-
- def loadbalancer_members_http_get(self):
- # gather list of uuids using 1. any specified anchors
- # 2. any specified filters
- # if not 'detail' return list with any specified 'fields'
- # if 'detail' return list with props+refs + any specified 'fields'
-
- env = request.headers.environ
- tenant_name = env.get(hdr_server_tenant(), 'default-project')
- parent_uuids = None
- back_ref_uuids = None
- obj_uuids = None
- if (('parent_fq_name_str' in request.query) and
- ('parent_type' in request.query)):
- parent_fq_name = request.query.parent_fq_name_str.split(':')
- parent_type = request.query.parent_type
- parent_uuids = [self._db_conn.fq_name_to_uuid(parent_type, parent_fq_name)]
- elif 'parent_id' in request.query:
- parent_ids = request.query.parent_id.split(',')
- parent_uuids = [str(uuid.UUID(p_uuid)) for p_uuid in parent_ids]
- if 'back_ref_id' in request.query:
- back_ref_ids = request.query.back_ref_id.split(',')
- back_ref_uuids = [str(uuid.UUID(b_uuid)) for b_uuid in back_ref_ids]
- if 'obj_uuids' in request.query:
- obj_uuids = request.query.obj_uuids.split(',')
-
- # common handling for all resource get
- (ok, result) = self._get_common(request, parent_uuids)
- if not ok:
- (code, msg) = result
- self.config_object_error(None, None, 'loadbalancer_members', 'http_get_collection', msg)
- abort(code, msg)
-
- if 'count' in request.query:
- count = 'true' in request.query.count.lower()
- else:
- count = False
-
- filter_params = request.query.filters
- if filter_params:
- try:
- ff_key_vals = filter_params.split(',')
- ff_names = [ff.split('==')[0] for ff in ff_key_vals]
- ff_values = [ff.split('==')[1] for ff in ff_key_vals]
- filters = {'field_names': ff_names, 'field_values': ff_values}
- except Exception as e:
- abort(400, 'Invalid filter ' + filter_params)
- else:
- filters = None
- db_conn = self._db_conn
- (ok, result) = \
- db_conn.dbe_list('loadbalancer-member', parent_uuids, back_ref_uuids, obj_uuids, count, filters)
- if not ok:
- self.config_object_error(None, None, 'loadbalancer_members', 'http_get_collection', result)
- abort(404, result)
-
- # If only counting, return early
- if count:
- return {'loadbalancer-members': {'count': result}}
-
- if 'detail' in request.query:
- detail = 'true' in request.query.detail.lower()
- else:
- detail = False
-
- fq_names_uuids = result
- obj_dicts = []
- if not detail:
- if not self.is_admin_request():
- obj_ids_list = [{'uuid': obj_uuid} for _, obj_uuid in fq_names_uuids]
- obj_fields = [u'id_perms']
- (ok, result) = db_conn.dbe_read_multi('loadbalancer-member', obj_ids_list, obj_fields)
- if not ok:
- result = []
- for obj_result in result:
- # give chance for zk heartbeat/ping
- gevent.sleep(0)
- if obj_result['id_perms'].get('user_visible', True):
- obj_dict = {}
- obj_dict['uuid'] = obj_result['uuid']
- obj_dict['href'] = self.generate_url('loadbalancer-member', obj_result['uuid'])
- obj_dict['fq_name'] = obj_result['fq_name']
- obj_dicts.append(obj_dict)
- else:
- for fq_name, obj_uuid in fq_names_uuids:
- # give chance for zk heartbeat/ping
- gevent.sleep(0)
- obj_dict = {}
- obj_dict['uuid'] = obj_uuid
- obj_dict['href'] = self.generate_url('loadbalancer-member', obj_uuid)
- obj_dict['fq_name'] = fq_name
- obj_dicts.append(obj_dict)
- else: #detail
- obj_ids_list = [{'uuid': obj_uuid} for _, obj_uuid in fq_names_uuids]
- obj_fields = [u'loadbalancer_member_properties', u'id_perms', u'display_name'] + []
- if 'fields' in request.query:
- obj_fields.extend(request.query.fields.split(','))
- (ok, result) = db_conn.dbe_read_multi('loadbalancer-member', obj_ids_list, obj_fields)
-
- if not ok:
- result = []
-
- for obj_result in result:
- # give chance for zk heartbeat/ping
- gevent.sleep(0)
- obj_dict = {}
- obj_dict['name'] = obj_result['fq_name'][-1]
- obj_dict['href'] = self.generate_url('loadbalancer-member', obj_result['uuid'])
- obj_dict.update(obj_result)
- if (obj_dict['id_perms'].get('user_visible', True) or
- self.is_admin_request()):
- obj_dicts.append({'loadbalancer-member': obj_dict})
-
- return {'loadbalancer-members': obj_dicts}
- #end loadbalancer_members_http_get
-
- def _loadbalancer_member_create_default_children(self, parent_obj):
- pass
- #end _loadbalancer_member_create_default_children
-
- def _loadbalancer_member_delete_default_children(self, parent_dict):
- pass
- #end _loadbalancer_member_delete_default_children
-
- def security_group_http_get(self, id):
- try:
- self._extension_mgrs['resourceApi'].map_method('pre_security_group_read', id)
- except Exception as e:
- pass
-
- # TODO get vals from request out of the global ASAP
- etag = request.headers.get('If-None-Match')
- try:
- obj_type = self._db_conn.uuid_to_obj_type(id)
- except NoIdError:
- obj_type = None
- if obj_type != 'security_group':
- abort(404, 'No security-group object found for id %s' %(id))
- # common handling for all resource get
- (ok, result) = self._get_common(request, id)
- if not ok:
- (code, msg) = result
- self.config_object_error(id, None, 'security_group', 'http_get', msg)
- abort(code, msg)
-
- # type-specific hook
- r_class = self.get_resource_class('security-group')
- if r_class:
- r_class.http_get(id)
-
- db_conn = self._db_conn
- if etag:
- obj_ids = {'uuid': id}
- (ok, result) = db_conn.dbe_is_latest(obj_ids, etag.replace('"', ''))
- if not ok:
- # Not present in DB
- self.config_object_error(id, None, 'security_group', 'http_get', result)
- abort(404, result)
-
- is_latest = result
- if is_latest:
- # send Not-Modified, caches use this for read optimization
- response.status = 304
- return
- #end if etag
-
- obj_ids = {'uuid': id}
-
- # Generate field list for db layer
- properties = [u'security_group_id', u'configured_security_group_id', u'security_group_entries', u'id_perms', u'display_name']
- references = []
- back_references = [u'project_back_refs', 'virtual_machine_interface_back_refs']
- children = [u'access_control_lists']
- if 'fields' in request.query:
- obj_fields = request.query.fields.split(',')
- else: # default props + children + refs + backrefs
- obj_fields = properties + references
- if 'exclude_back_refs' not in request.query:
- obj_fields = obj_fields + back_references
- if 'exclude_children' not in request.query:
- obj_fields = obj_fields + children
-
- (ok, result) = db_conn.dbe_read('security-group', obj_ids, obj_fields)
- if not ok:
- # Not present in DB
- self.config_object_error(id, None, 'security_group', 'http_get', result)
- abort(404, result)
-
- # check visibility
- if (not result['id_perms'].get('user_visible', True) and
- not self.is_admin_request()):
- result = 'This object is not visible by users: %s' % id
- self.config_object_error(id, None, 'security_group', 'http_get', result)
- abort(404, result)
-
- rsp_body = {}
- rsp_body['uuid'] = id
- rsp_body['href'] = self.generate_url('security-group', id)
- rsp_body['name'] = result['fq_name'][-1]
- rsp_body.update(result)
- id_perms = result['id_perms']
- response.set_header('ETag', '"' + id_perms['last_modified'] + '"')
- try:
- self._extension_mgrs['resourceApi'].map_method('post_security_group_read', id, rsp_body)
- except Exception as e:
- pass
-
- return {'security-group': rsp_body}
- #end security_group_http_get
-
- def security_group_http_put(self, id):
- key = 'security-group'
- obj_dict = request.json[key]
-
- try:
- self._extension_mgrs['resourceApi'].map_method('pre_security_group_update', id, obj_dict)
- except Exception as e:
- pass
-
- db_conn = self._db_conn
- try:
- obj_type = db_conn.uuid_to_obj_type(id)
- if obj_type != 'security_group':
- abort(404, 'No security-group object found for id %s' %(id))
- fq_name = db_conn.uuid_to_fq_name(id)
- except NoIdError as e:
- abort(404, str(e))
- prop_dict = obj_dict.get('security_group_entries')
- if prop_dict:
- buf = cStringIO.StringIO()
- xx_security_group_entries = PolicyEntriesType(**prop_dict)
- xx_security_group_entries.export(buf)
- node = etree.fromstring(buf.getvalue())
- xx_security_group_entries = PolicyEntriesType()
- try:
- xx_security_group_entries.build(node)
- except Exception as e:
- abort(400, str(e))
- prop_dict = obj_dict.get('id_perms')
- if prop_dict:
- buf = cStringIO.StringIO()
- xx_id_perms = IdPermsType(**prop_dict)
- xx_id_perms.export(buf)
- node = etree.fromstring(buf.getvalue())
- xx_id_perms = IdPermsType()
- try:
- xx_id_perms.build(node)
- except Exception as e:
- abort(400, str(e))
- # common handling for all resource put
- (ok, result) = self._put_common(request, 'security_group', id, fq_name, obj_dict)
- if not ok:
- (code, msg) = result
- self.config_object_error(id, None, 'security_group', 'http_put', msg)
- abort(code, msg)
-
- # Validate perms
- objtype_list = [u'access_control_list']
- for obj_type in objtype_list:
- refs = obj_dict.get('%s_refs'%(obj_type), None)
- if refs:
- for ref in refs:
- try:
- ref_uuid = db_conn.fq_name_to_uuid(obj_type, ref['to'])
- except NoIdError as e:
- abort(404, str(e))
- (ok, status) = self._permissions.check_perms_link(request, ref_uuid)
- if not ok:
- (code, err_msg) = status
- abort(code, err_msg)
-
- # State modification starts from here. Ensure that cleanup is done for all state changes
- cleanup_on_failure = []
- # type-specific hook
- r_class = self.get_resource_class('security-group')
- if r_class:
- (ok, put_result) = r_class.http_put(id, fq_name, obj_dict, self._db_conn)
- if not ok:
- (code, msg) = put_result
- self.config_object_error(id, None, 'security_group', 'http_put', msg)
- abort(code, msg)
- callable = getattr(r_class, 'http_put_fail', None)
- if callable:
- cleanup_on_failure.append((callable, [id, fq_name, obj_dict, self._db_conn]))
-
- obj_ids = {'uuid': id}
- try:
- (ok, result) = db_conn.dbe_update('security-group', obj_ids, obj_dict)
- except Exception as e:
- ok = False
- result = str(e)
- if not ok:
- for fail_cleanup_callable, cleanup_args in cleanup_on_failure:
- fail_cleanup_callable(*cleanup_args)
- self.config_object_error(id, None, 'security_group', 'http_put', result)
- abort(404, result)
-
- rsp_body = {}
- rsp_body['uuid'] = id
- rsp_body['href'] = self.generate_url('security-group', id)
-
- try:
- self._extension_mgrs['resourceApi'].map_method('post_security_group_update', id, obj_dict)
- except Exception as e:
- pass
-
- return {'security-group': rsp_body}
- #end security_group_http_put
-
- def security_group_http_delete(self, id):
- db_conn = self._db_conn
- # if obj doesn't exist return early
- try:
- obj_type = db_conn.uuid_to_obj_type(id)
- if obj_type != 'security_group':
- abort(404, 'No security-group object found for id %s' %(id))
- _ = db_conn.uuid_to_fq_name(id)
- except NoIdError:
- abort(404, 'ID %s does not exist' %(id))
-
- try:
- self._extension_mgrs['resourceApi'].map_method('pre_security_group_delete', id)
- except Exception as e:
- pass
-
- # read in obj from db (accepting error) to get details of it
- obj_ids = {'uuid': id}
- back_references = [u'project_back_refs', 'virtual_machine_interface_back_refs']
- children = [u'access_control_lists']
- obj_fields = children + back_references
- (read_ok, read_result) = db_conn.dbe_read('security-group', obj_ids, obj_fields)
- if not read_ok:
- if read_result.startswith('Unknown id:'):
- abort(404, 'ID %s does not exist' %(id))
- else:
- self.config_object_error(id, None, 'security_group', 'http_delete', read_result)
- # proceed down to delete the resource
-
- # common handling for all resource delete
- parent_type = read_result.get('parent_type', None)
- (ok, del_result) = self._delete_common(request, 'security_group', id, parent_type)
- if not ok:
- (code, msg) = del_result
- self.config_object_error(id, None, 'security_group', 'http_delete', msg)
- abort(code, msg)
-
- fq_name = read_result['fq_name']
- ifmap_id = cfgm_common.imid.get_ifmap_id_from_fq_name('security-group', fq_name)
- obj_ids['imid'] = ifmap_id
- if parent_type:
- parent_imid = cfgm_common.imid.get_ifmap_id_from_fq_name(parent_type, fq_name[:-1])
- obj_ids['parent_imid'] = parent_imid
-
- # State modification starts from here. Ensure that cleanup is done for all state changes
- cleanup_on_failure = []
-
- # type-specific hook
- r_class = self.get_resource_class('security-group')
- if r_class:
- if read_ok:
- # fail if non-default children or backrefs exist
- virtual_machine_interface_back_refs = read_result.get('virtual_machine_interface_back_refs', None)
- if virtual_machine_interface_back_refs:
- back_ref_urls = [back_ref_info['href'] for back_ref_info in read_result['virtual_machine_interface_back_refs']]
- back_ref_str = ', '.join(back_ref_urls)
- err_msg = 'Back-References from ' + back_ref_str + ' still exist'
- self.config_object_error(id, None, 'security_group', 'http_delete', err_msg)
- abort(409, err_msg)
-
-
- # Delete default children first
- self._security_group_delete_default_children(read_result)
-
- (ok, del_result) = r_class.http_delete(id, read_result, db_conn)
- if not ok:
- (code, msg) = del_result
- self.config_object_error(id, None, 'security_group', 'http_delete', msg)
- abort(code, msg)
- callable = getattr(r_class, 'http_delete_fail', None)
- if callable:
- cleanup_on_failure.append((callable, [id, read_result, db_conn]))
- #end if read_ok
-
- try:
- (ok, del_result) = db_conn.dbe_delete('security-group', obj_ids, read_result)
- except Exception as e:
- ok = False
- del_result = str(e)
- if not ok:
- for fail_cleanup_callable, cleanup_args in cleanup_on_failure:
- fail_cleanup_callable(*cleanup_args)
- self.config_object_error(id, None, 'security_group', 'http_delete', del_result)
- abort(409, del_result)
-
- try:
- self._extension_mgrs['resourceApi'].map_method('post_security_group_delete', id, read_result)
- except Exception as e:
- pass
-
- #end security_group_http_delete
-
- def security_groups_http_post(self):
- key = 'security-group'
- obj_dict = request.json[key]
- self._post_validate(key, obj_dict=obj_dict)
- fq_name = obj_dict['fq_name']
-
- try:
- self._extension_mgrs['resourceApi'].map_method('pre_security_group_create', obj_dict)
- except Exception as e:
- pass
-
- prop_dict = obj_dict.get('security_group_entries')
- if prop_dict:
- buf = cStringIO.StringIO()
- xx_security_group_entries = PolicyEntriesType(**prop_dict)
- xx_security_group_entries.export(buf)
- node = etree.fromstring(buf.getvalue())
- xx_security_group_entries = PolicyEntriesType()
- try:
- xx_security_group_entries.build(node)
- except Exception as e:
- abort(400, str(e))
- prop_dict = obj_dict.get('id_perms')
- if prop_dict:
- buf = cStringIO.StringIO()
- xx_id_perms = IdPermsType(**prop_dict)
- xx_id_perms.export(buf)
- node = etree.fromstring(buf.getvalue())
- xx_id_perms = IdPermsType()
- try:
- xx_id_perms.build(node)
- except Exception as e:
- abort(400, str(e))
- # common handling for all resource create
- (ok, result) = self._post_common(request, 'security-group', obj_dict)
- if not ok:
- (code, msg) = result
- fq_name_str = ':'.join(obj_dict.get('fq_name', []))
- self.config_object_error(None, fq_name_str, 'security_group', 'http_post', msg)
- abort(code, msg)
-
- name = obj_dict['fq_name'][-1]
- fq_name = obj_dict['fq_name']
-
- db_conn = self._db_conn
-
- # if client gave parent_type of config-root, ignore and remove
- if 'parent_type' in obj_dict and obj_dict['parent_type'] == 'config-root':
- del obj_dict['parent_type']
-
- if 'parent_type' in obj_dict:
- # non config-root child, verify parent exists
- parent_type = obj_dict['parent_type']
- parent_fq_name = obj_dict['fq_name'][:-1]
- try:
- parent_uuid = self._db_conn.fq_name_to_uuid(parent_type, parent_fq_name)
- (ok, status) = self._permissions.check_perms_write(request, parent_uuid)
- if not ok:
- (code, err_msg) = status
- abort(code, err_msg)
- self._permissions.set_user_role(request, obj_dict)
- except NoIdError:
- err_msg = 'Parent ' + pformat(parent_fq_name) + ' type ' + parent_type + ' does not exist'
- fq_name_str = ':'.join(parent_fq_name)
- self.config_object_error(None, fq_name_str, 'security_group', 'http_post', err_msg)
- abort(400, err_msg)
-
- # Validate perms
- objtype_list = [u'access_control_list']
- for obj_type in objtype_list:
- refs = obj_dict.get('%s_refs'%(obj_type), None)
- if refs:
- for ref in refs:
- ref_uuid = db_conn.fq_name_to_uuid(obj_type, ref['to'])
- (ok, status) = self._permissions.check_perms_link(request, ref_uuid)
- if not ok:
- (code, err_msg) = status
- abort(code, err_msg)
-
- # State modification starts from here. Ensure that cleanup is done for all state changes
- cleanup_on_failure = []
- # Alloc and Store id-mappings before creating entry on pubsub store.
- # Else a subscriber can ask for an id mapping before we have stored it
- uuid_requested = result
- (ok, result) = db_conn.dbe_alloc('security-group', obj_dict, uuid_requested)
- if not ok:
- (code, msg) = result
- fq_name_str = ':'.join(obj_dict['fq_name'])
- self.config_object_error(None, fq_name_str, 'security_group', 'http_post', result)
- abort(code, msg)
- cleanup_on_failure.append((db_conn.dbe_release, ['security_group', fq_name]))
-
- obj_ids = result
-
- env = request.headers.environ
- tenant_name = env.get(hdr_server_tenant(), 'default-project')
-
-
- # type-specific hook
- r_class = self.get_resource_class('security-group')
- if r_class:
- try:
- (ok, result) = r_class.http_post_collection(tenant_name, obj_dict, db_conn)
- except Exception as e:
- ok = False
- result = (500, str(e))
- if not ok:
- for fail_cleanup_callable, cleanup_args in cleanup_on_failure:
- fail_cleanup_callable(*cleanup_args)
- (code, msg) = result
- fq_name_str = ':'.join(fq_name)
- self.config_object_error(None, fq_name_str, 'security_group', 'http_post', msg)
- abort(code, msg)
- callable = getattr(r_class, 'http_post_collection_fail', None)
- if callable:
- cleanup_on_failure.append((callable, [tenant_name, obj_dict, db_conn]))
-
- try:
- (ok, result) = \
- db_conn.dbe_create('security-group', obj_ids, obj_dict)
- except Exception as e:
- ok = False
- result = str(e)
-
- if not ok:
- for fail_cleanup_callable, cleanup_args in cleanup_on_failure:
- fail_cleanup_callable(*cleanup_args)
- fq_name_str = ':'.join(fq_name)
- self.config_object_error(None, fq_name_str, 'security_group', 'http_post', result)
- abort(404, result)
-
- rsp_body = {}
- rsp_body['name'] = name
- rsp_body['fq_name'] = fq_name
- rsp_body['uuid'] = obj_ids['uuid']
- rsp_body['href'] = self.generate_url('security-group', obj_ids['uuid'])
- if 'parent_type' in obj_dict:
- # non config-root child, send back parent uuid/href
- rsp_body['parent_uuid'] = parent_uuid
- rsp_body['parent_href'] = self.generate_url(parent_type, parent_uuid)
-
- try:
- self._extension_mgrs['resourceApi'].map_method('post_security_group_create', obj_dict)
- except Exception as e:
- pass
-
- return {'security-group': rsp_body}
- #end security_groups_http_post
-
- def security_groups_http_get(self):
- # gather list of uuids using 1. any specified anchors
- # 2. any specified filters
- # if not 'detail' return list with any specified 'fields'
- # if 'detail' return list with props+refs + any specified 'fields'
-
- env = request.headers.environ
- tenant_name = env.get(hdr_server_tenant(), 'default-project')
- parent_uuids = None
- back_ref_uuids = None
- obj_uuids = None
- if (('parent_fq_name_str' in request.query) and
- ('parent_type' in request.query)):
- parent_fq_name = request.query.parent_fq_name_str.split(':')
- parent_type = request.query.parent_type
- parent_uuids = [self._db_conn.fq_name_to_uuid(parent_type, parent_fq_name)]
- elif 'parent_id' in request.query:
- parent_ids = request.query.parent_id.split(',')
- parent_uuids = [str(uuid.UUID(p_uuid)) for p_uuid in parent_ids]
- if 'back_ref_id' in request.query:
- back_ref_ids = request.query.back_ref_id.split(',')
- back_ref_uuids = [str(uuid.UUID(b_uuid)) for b_uuid in back_ref_ids]
- if 'obj_uuids' in request.query:
- obj_uuids = request.query.obj_uuids.split(',')
-
- # common handling for all resource get
- (ok, result) = self._get_common(request, parent_uuids)
- if not ok:
- (code, msg) = result
- self.config_object_error(None, None, 'security_groups', 'http_get_collection', msg)
- abort(code, msg)
-
- if 'count' in request.query:
- count = 'true' in request.query.count.lower()
- else:
- count = False
-
- filter_params = request.query.filters
- if filter_params:
- try:
- ff_key_vals = filter_params.split(',')
- ff_names = [ff.split('==')[0] for ff in ff_key_vals]
- ff_values = [ff.split('==')[1] for ff in ff_key_vals]
- filters = {'field_names': ff_names, 'field_values': ff_values}
- except Exception as e:
- abort(400, 'Invalid filter ' + filter_params)
- else:
- filters = None
- db_conn = self._db_conn
- (ok, result) = \
- db_conn.dbe_list('security-group', parent_uuids, back_ref_uuids, obj_uuids, count, filters)
- if not ok:
- self.config_object_error(None, None, 'security_groups', 'http_get_collection', result)
- abort(404, result)
-
- # If only counting, return early
- if count:
- return {'security-groups': {'count': result}}
-
- if 'detail' in request.query:
- detail = 'true' in request.query.detail.lower()
- else:
- detail = False
-
- fq_names_uuids = result
- obj_dicts = []
- if not detail:
- if not self.is_admin_request():
- obj_ids_list = [{'uuid': obj_uuid} for _, obj_uuid in fq_names_uuids]
- obj_fields = [u'id_perms']
- (ok, result) = db_conn.dbe_read_multi('security-group', obj_ids_list, obj_fields)
- if not ok:
- result = []
- for obj_result in result:
- # give chance for zk heartbeat/ping
- gevent.sleep(0)
- if obj_result['id_perms'].get('user_visible', True):
- obj_dict = {}
- obj_dict['uuid'] = obj_result['uuid']
- obj_dict['href'] = self.generate_url('security-group', obj_result['uuid'])
- obj_dict['fq_name'] = obj_result['fq_name']
- obj_dicts.append(obj_dict)
- else:
- for fq_name, obj_uuid in fq_names_uuids:
- # give chance for zk heartbeat/ping
- gevent.sleep(0)
- obj_dict = {}
- obj_dict['uuid'] = obj_uuid
- obj_dict['href'] = self.generate_url('security-group', obj_uuid)
- obj_dict['fq_name'] = fq_name
- obj_dicts.append(obj_dict)
- else: #detail
- obj_ids_list = [{'uuid': obj_uuid} for _, obj_uuid in fq_names_uuids]
- obj_fields = [u'security_group_id', u'configured_security_group_id', u'security_group_entries', u'id_perms', u'display_name'] + []
- if 'fields' in request.query:
- obj_fields.extend(request.query.fields.split(','))
- (ok, result) = db_conn.dbe_read_multi('security-group', obj_ids_list, obj_fields)
-
- if not ok:
- result = []
-
- for obj_result in result:
- # give chance for zk heartbeat/ping
- gevent.sleep(0)
- obj_dict = {}
- obj_dict['name'] = obj_result['fq_name'][-1]
- obj_dict['href'] = self.generate_url('security-group', obj_result['uuid'])
- obj_dict.update(obj_result)
- if (obj_dict['id_perms'].get('user_visible', True) or
- self.is_admin_request()):
- obj_dicts.append({'security-group': obj_dict})
-
- return {'security-groups': obj_dicts}
- #end security_groups_http_get
-
- def _security_group_create_default_children(self, parent_obj):
- pass
- #end _security_group_create_default_children
-
- def _security_group_delete_default_children(self, parent_dict):
- # Delete a default child only if provisioned for
- r_class = self.get_resource_class('virtual-machine-interface')
- if r_class and r_class.generate_default_instance:
- # first locate default child then delete it
- has_infos = parent_dict.get('access_control_lists')
- if has_infos:
- for has_info in has_infos:
- if has_info['to'][-1] == 'default-access-control-list':
- default_child_id = has_info['href'].split('/')[-1]
- self.access_control_list_http_delete(default_child_id)
- break
-
- pass
- #end _security_group_delete_default_children
-
- def provider_attachment_http_get(self, id):
- try:
- self._extension_mgrs['resourceApi'].map_method('pre_provider_attachment_read', id)
- except Exception as e:
- pass
-
- # TODO get vals from request out of the global ASAP
- etag = request.headers.get('If-None-Match')
- try:
- obj_type = self._db_conn.uuid_to_obj_type(id)
- except NoIdError:
- obj_type = None
- if obj_type != 'provider_attachment':
- abort(404, 'No provider-attachment object found for id %s' %(id))
- # common handling for all resource get
- (ok, result) = self._get_common(request, id)
- if not ok:
- (code, msg) = result
- self.config_object_error(id, None, 'provider_attachment', 'http_get', msg)
- abort(code, msg)
-
- # type-specific hook
- r_class = self.get_resource_class('provider-attachment')
- if r_class:
- r_class.http_get(id)
-
- db_conn = self._db_conn
- if etag:
- obj_ids = {'uuid': id}
- (ok, result) = db_conn.dbe_is_latest(obj_ids, etag.replace('"', ''))
- if not ok:
- # Not present in DB
- self.config_object_error(id, None, 'provider_attachment', 'http_get', result)
- abort(404, result)
-
- is_latest = result
- if is_latest:
- # send Not-Modified, caches use this for read optimization
- response.status = 304
- return
- #end if etag
-
- obj_ids = {'uuid': id}
-
- # Generate field list for db layer
- properties = [u'id_perms', u'display_name']
- references = ['virtual_router_refs']
- back_references = ['customer_attachment_back_refs']
- children = []
- if 'fields' in request.query:
- obj_fields = request.query.fields.split(',')
- else: # default props + children + refs + backrefs
- obj_fields = properties + references
- if 'exclude_back_refs' not in request.query:
- obj_fields = obj_fields + back_references
- if 'exclude_children' not in request.query:
- obj_fields = obj_fields + children
-
- (ok, result) = db_conn.dbe_read('provider-attachment', obj_ids, obj_fields)
- if not ok:
- # Not present in DB
- self.config_object_error(id, None, 'provider_attachment', 'http_get', result)
- abort(404, result)
-
- # check visibility
- if (not result['id_perms'].get('user_visible', True) and
- not self.is_admin_request()):
- result = 'This object is not visible by users: %s' % id
- self.config_object_error(id, None, 'provider_attachment', 'http_get', result)
- abort(404, result)
-
- rsp_body = {}
- rsp_body['uuid'] = id
- rsp_body['href'] = self.generate_url('provider-attachment', id)
- rsp_body['name'] = result['fq_name'][-1]
- rsp_body.update(result)
- id_perms = result['id_perms']
- response.set_header('ETag', '"' + id_perms['last_modified'] + '"')
- try:
- self._extension_mgrs['resourceApi'].map_method('post_provider_attachment_read', id, rsp_body)
- except Exception as e:
- pass
-
- return {'provider-attachment': rsp_body}
- #end provider_attachment_http_get
-
- def provider_attachment_http_put(self, id):
- key = 'provider-attachment'
- obj_dict = request.json[key]
-
- try:
- self._extension_mgrs['resourceApi'].map_method('pre_provider_attachment_update', id, obj_dict)
- except Exception as e:
- pass
-
- db_conn = self._db_conn
- try:
- obj_type = db_conn.uuid_to_obj_type(id)
- if obj_type != 'provider_attachment':
- abort(404, 'No provider-attachment object found for id %s' %(id))
- fq_name = db_conn.uuid_to_fq_name(id)
- except NoIdError as e:
- abort(404, str(e))
- prop_dict = obj_dict.get('id_perms')
- if prop_dict:
- buf = cStringIO.StringIO()
- xx_id_perms = IdPermsType(**prop_dict)
- xx_id_perms.export(buf)
- node = etree.fromstring(buf.getvalue())
- xx_id_perms = IdPermsType()
- try:
- xx_id_perms.build(node)
- except Exception as e:
- abort(400, str(e))
- # common handling for all resource put
- (ok, result) = self._put_common(request, 'provider_attachment', id, fq_name, obj_dict)
- if not ok:
- (code, msg) = result
- self.config_object_error(id, None, 'provider_attachment', 'http_put', msg)
- abort(code, msg)
-
- # Validate perms
- objtype_list = ['virtual_router']
- for obj_type in objtype_list:
- refs = obj_dict.get('%s_refs'%(obj_type), None)
- if refs:
- for ref in refs:
- try:
- ref_uuid = db_conn.fq_name_to_uuid(obj_type, ref['to'])
- except NoIdError as e:
- abort(404, str(e))
- (ok, status) = self._permissions.check_perms_link(request, ref_uuid)
- if not ok:
- (code, err_msg) = status
- abort(code, err_msg)
-
- # State modification starts from here. Ensure that cleanup is done for all state changes
- cleanup_on_failure = []
- # type-specific hook
- r_class = self.get_resource_class('provider-attachment')
- if r_class:
- (ok, put_result) = r_class.http_put(id, fq_name, obj_dict, self._db_conn)
- if not ok:
- (code, msg) = put_result
- self.config_object_error(id, None, 'provider_attachment', 'http_put', msg)
- abort(code, msg)
- callable = getattr(r_class, 'http_put_fail', None)
- if callable:
- cleanup_on_failure.append((callable, [id, fq_name, obj_dict, self._db_conn]))
-
- obj_ids = {'uuid': id}
- try:
- (ok, result) = db_conn.dbe_update('provider-attachment', obj_ids, obj_dict)
- except Exception as e:
- ok = False
- result = str(e)
- if not ok:
- for fail_cleanup_callable, cleanup_args in cleanup_on_failure:
- fail_cleanup_callable(*cleanup_args)
- self.config_object_error(id, None, 'provider_attachment', 'http_put', result)
- abort(404, result)
-
- rsp_body = {}
- rsp_body['uuid'] = id
- rsp_body['href'] = self.generate_url('provider-attachment', id)
-
- try:
- self._extension_mgrs['resourceApi'].map_method('post_provider_attachment_update', id, obj_dict)
- except Exception as e:
- pass
-
- return {'provider-attachment': rsp_body}
- #end provider_attachment_http_put
-
- def provider_attachment_http_delete(self, id):
- db_conn = self._db_conn
- # if obj doesn't exist return early
- try:
- obj_type = db_conn.uuid_to_obj_type(id)
- if obj_type != 'provider_attachment':
- abort(404, 'No provider-attachment object found for id %s' %(id))
- _ = db_conn.uuid_to_fq_name(id)
- except NoIdError:
- abort(404, 'ID %s does not exist' %(id))
-
- try:
- self._extension_mgrs['resourceApi'].map_method('pre_provider_attachment_delete', id)
- except Exception as e:
- pass
-
- # read in obj from db (accepting error) to get details of it
- obj_ids = {'uuid': id}
- back_references = ['customer_attachment_back_refs']
- children = []
- obj_fields = children + back_references
- (read_ok, read_result) = db_conn.dbe_read('provider-attachment', obj_ids, obj_fields)
- if not read_ok:
- if read_result.startswith('Unknown id:'):
- abort(404, 'ID %s does not exist' %(id))
- else:
- self.config_object_error(id, None, 'provider_attachment', 'http_delete', read_result)
- # proceed down to delete the resource
-
- # common handling for all resource delete
- parent_type = read_result.get('parent_type', None)
- (ok, del_result) = self._delete_common(request, 'provider_attachment', id, parent_type)
- if not ok:
- (code, msg) = del_result
- self.config_object_error(id, None, 'provider_attachment', 'http_delete', msg)
- abort(code, msg)
-
- fq_name = read_result['fq_name']
- ifmap_id = cfgm_common.imid.get_ifmap_id_from_fq_name('provider-attachment', fq_name)
- obj_ids['imid'] = ifmap_id
- if parent_type:
- parent_imid = cfgm_common.imid.get_ifmap_id_from_fq_name(parent_type, fq_name[:-1])
- obj_ids['parent_imid'] = parent_imid
-
- # State modification starts from here. Ensure that cleanup is done for all state changes
- cleanup_on_failure = []
-
- # type-specific hook
- r_class = self.get_resource_class('provider-attachment')
- if r_class:
- if read_ok:
- # fail if non-default children or backrefs exist
-
- # Delete default children first
- self._provider_attachment_delete_default_children(read_result)
-
- (ok, del_result) = r_class.http_delete(id, read_result, db_conn)
- if not ok:
- (code, msg) = del_result
- self.config_object_error(id, None, 'provider_attachment', 'http_delete', msg)
- abort(code, msg)
- callable = getattr(r_class, 'http_delete_fail', None)
- if callable:
- cleanup_on_failure.append((callable, [id, read_result, db_conn]))
- #end if read_ok
-
- try:
- (ok, del_result) = db_conn.dbe_delete('provider-attachment', obj_ids, read_result)
- except Exception as e:
- ok = False
- del_result = str(e)
- if not ok:
- for fail_cleanup_callable, cleanup_args in cleanup_on_failure:
- fail_cleanup_callable(*cleanup_args)
- self.config_object_error(id, None, 'provider_attachment', 'http_delete', del_result)
- abort(409, del_result)
-
- try:
- self._extension_mgrs['resourceApi'].map_method('post_provider_attachment_delete', id, read_result)
- except Exception as e:
- pass
-
- #end provider_attachment_http_delete
-
- def provider_attachments_http_post(self):
- key = 'provider-attachment'
- obj_dict = request.json[key]
- self._post_validate(key, obj_dict=obj_dict)
- fq_name = obj_dict['fq_name']
-
- try:
- self._extension_mgrs['resourceApi'].map_method('pre_provider_attachment_create', obj_dict)
- except Exception as e:
- pass
-
- prop_dict = obj_dict.get('id_perms')
- if prop_dict:
- buf = cStringIO.StringIO()
- xx_id_perms = IdPermsType(**prop_dict)
- xx_id_perms.export(buf)
- node = etree.fromstring(buf.getvalue())
- xx_id_perms = IdPermsType()
- try:
- xx_id_perms.build(node)
- except Exception as e:
- abort(400, str(e))
- # common handling for all resource create
- (ok, result) = self._post_common(request, 'provider-attachment', obj_dict)
- if not ok:
- (code, msg) = result
- fq_name_str = ':'.join(obj_dict.get('fq_name', []))
- self.config_object_error(None, fq_name_str, 'provider_attachment', 'http_post', msg)
- abort(code, msg)
-
- name = obj_dict['fq_name'][-1]
- fq_name = obj_dict['fq_name']
-
- db_conn = self._db_conn
-
- # Validate perms
- objtype_list = ['virtual_router']
- for obj_type in objtype_list:
- refs = obj_dict.get('%s_refs'%(obj_type), None)
- if refs:
- for ref in refs:
- ref_uuid = db_conn.fq_name_to_uuid(obj_type, ref['to'])
- (ok, status) = self._permissions.check_perms_link(request, ref_uuid)
- if not ok:
- (code, err_msg) = status
- abort(code, err_msg)
-
- # State modification starts from here. Ensure that cleanup is done for all state changes
- cleanup_on_failure = []
- # Alloc and Store id-mappings before creating entry on pubsub store.
- # Else a subscriber can ask for an id mapping before we have stored it
- uuid_requested = result
- (ok, result) = db_conn.dbe_alloc('provider-attachment', obj_dict, uuid_requested)
- if not ok:
- (code, msg) = result
- fq_name_str = ':'.join(obj_dict['fq_name'])
- self.config_object_error(None, fq_name_str, 'provider_attachment', 'http_post', result)
- abort(code, msg)
- cleanup_on_failure.append((db_conn.dbe_release, ['provider_attachment', fq_name]))
-
- obj_ids = result
-
- env = request.headers.environ
- tenant_name = env.get(hdr_server_tenant(), 'default-project')
-
-
- # type-specific hook
- r_class = self.get_resource_class('provider-attachment')
- if r_class:
- try:
- (ok, result) = r_class.http_post_collection(tenant_name, obj_dict, db_conn)
- except Exception as e:
- ok = False
- result = (500, str(e))
- if not ok:
- for fail_cleanup_callable, cleanup_args in cleanup_on_failure:
- fail_cleanup_callable(*cleanup_args)
- (code, msg) = result
- fq_name_str = ':'.join(fq_name)
- self.config_object_error(None, fq_name_str, 'provider_attachment', 'http_post', msg)
- abort(code, msg)
- callable = getattr(r_class, 'http_post_collection_fail', None)
- if callable:
- cleanup_on_failure.append((callable, [tenant_name, obj_dict, db_conn]))
-
- try:
- (ok, result) = \
- db_conn.dbe_create('provider-attachment', obj_ids, obj_dict)
- except Exception as e:
- ok = False
- result = str(e)
-
- if not ok:
- for fail_cleanup_callable, cleanup_args in cleanup_on_failure:
- fail_cleanup_callable(*cleanup_args)
- fq_name_str = ':'.join(fq_name)
- self.config_object_error(None, fq_name_str, 'provider_attachment', 'http_post', result)
- abort(404, result)
-
- rsp_body = {}
- rsp_body['name'] = name
- rsp_body['fq_name'] = fq_name
- rsp_body['uuid'] = obj_ids['uuid']
- rsp_body['href'] = self.generate_url('provider-attachment', obj_ids['uuid'])
-
- try:
- self._extension_mgrs['resourceApi'].map_method('post_provider_attachment_create', obj_dict)
- except Exception as e:
- pass
-
- return {'provider-attachment': rsp_body}
- #end provider_attachments_http_post
-
- def provider_attachments_http_get(self):
- # gather list of uuids using 1. any specified anchors
- # 2. any specified filters
- # if not 'detail' return list with any specified 'fields'
- # if 'detail' return list with props+refs + any specified 'fields'
-
- env = request.headers.environ
- tenant_name = env.get(hdr_server_tenant(), 'default-project')
- parent_uuids = None
- back_ref_uuids = None
- obj_uuids = None
- if (('parent_fq_name_str' in request.query) and
- ('parent_type' in request.query)):
- parent_fq_name = request.query.parent_fq_name_str.split(':')
- parent_type = request.query.parent_type
- parent_uuids = [self._db_conn.fq_name_to_uuid(parent_type, parent_fq_name)]
- elif 'parent_id' in request.query:
- parent_ids = request.query.parent_id.split(',')
- parent_uuids = [str(uuid.UUID(p_uuid)) for p_uuid in parent_ids]
- if 'back_ref_id' in request.query:
- back_ref_ids = request.query.back_ref_id.split(',')
- back_ref_uuids = [str(uuid.UUID(b_uuid)) for b_uuid in back_ref_ids]
- if 'obj_uuids' in request.query:
- obj_uuids = request.query.obj_uuids.split(',')
-
- # common handling for all resource get
- (ok, result) = self._get_common(request, parent_uuids)
- if not ok:
- (code, msg) = result
- self.config_object_error(None, None, 'provider_attachments', 'http_get_collection', msg)
- abort(code, msg)
-
- if 'count' in request.query:
- count = 'true' in request.query.count.lower()
- else:
- count = False
-
- filter_params = request.query.filters
- if filter_params:
- try:
- ff_key_vals = filter_params.split(',')
- ff_names = [ff.split('==')[0] for ff in ff_key_vals]
- ff_values = [ff.split('==')[1] for ff in ff_key_vals]
- filters = {'field_names': ff_names, 'field_values': ff_values}
- except Exception as e:
- abort(400, 'Invalid filter ' + filter_params)
- else:
- filters = None
- db_conn = self._db_conn
- (ok, result) = \
- db_conn.dbe_list('provider-attachment', parent_uuids, back_ref_uuids, obj_uuids, count, filters)
- if not ok:
- self.config_object_error(None, None, 'provider_attachments', 'http_get_collection', result)
- abort(404, result)
-
- # If only counting, return early
- if count:
- return {'provider-attachments': {'count': result}}
-
- if 'detail' in request.query:
- detail = 'true' in request.query.detail.lower()
- else:
- detail = False
-
- fq_names_uuids = result
- obj_dicts = []
- if not detail:
- if not self.is_admin_request():
- obj_ids_list = [{'uuid': obj_uuid} for _, obj_uuid in fq_names_uuids]
- obj_fields = [u'id_perms']
- (ok, result) = db_conn.dbe_read_multi('provider-attachment', obj_ids_list, obj_fields)
- if not ok:
- result = []
- for obj_result in result:
- # give chance for zk heartbeat/ping
- gevent.sleep(0)
- if obj_result['id_perms'].get('user_visible', True):
- obj_dict = {}
- obj_dict['uuid'] = obj_result['uuid']
- obj_dict['href'] = self.generate_url('provider-attachment', obj_result['uuid'])
- obj_dict['fq_name'] = obj_result['fq_name']
- obj_dicts.append(obj_dict)
- else:
- for fq_name, obj_uuid in fq_names_uuids:
- # give chance for zk heartbeat/ping
- gevent.sleep(0)
- obj_dict = {}
- obj_dict['uuid'] = obj_uuid
- obj_dict['href'] = self.generate_url('provider-attachment', obj_uuid)
- obj_dict['fq_name'] = fq_name
- obj_dicts.append(obj_dict)
- else: #detail
- obj_ids_list = [{'uuid': obj_uuid} for _, obj_uuid in fq_names_uuids]
- obj_fields = [u'id_perms', u'display_name'] + ['virtual_router_refs']
- if 'fields' in request.query:
- obj_fields.extend(request.query.fields.split(','))
- (ok, result) = db_conn.dbe_read_multi('provider-attachment', obj_ids_list, obj_fields)
-
- if not ok:
- result = []
-
- for obj_result in result:
- # give chance for zk heartbeat/ping
- gevent.sleep(0)
- obj_dict = {}
- obj_dict['name'] = obj_result['fq_name'][-1]
- obj_dict['href'] = self.generate_url('provider-attachment', obj_result['uuid'])
- obj_dict.update(obj_result)
- if (obj_dict['id_perms'].get('user_visible', True) or
- self.is_admin_request()):
- obj_dicts.append({'provider-attachment': obj_dict})
-
- return {'provider-attachments': obj_dicts}
- #end provider_attachments_http_get
-
- def _provider_attachment_create_default_children(self, parent_obj):
- pass
- #end _provider_attachment_create_default_children
-
- def _provider_attachment_delete_default_children(self, parent_dict):
- pass
- #end _provider_attachment_delete_default_children
-
- def virtual_machine_interface_http_get(self, id):
- try:
- self._extension_mgrs['resourceApi'].map_method('pre_virtual_machine_interface_read', id)
- except Exception as e:
- pass
-
- # TODO get vals from request out of the global ASAP
- etag = request.headers.get('If-None-Match')
- try:
- obj_type = self._db_conn.uuid_to_obj_type(id)
- except NoIdError:
- obj_type = None
- if obj_type != 'virtual_machine_interface':
- abort(404, 'No virtual-machine-interface object found for id %s' %(id))
- # common handling for all resource get
- (ok, result) = self._get_common(request, id)
- if not ok:
- (code, msg) = result
- self.config_object_error(id, None, 'virtual_machine_interface', 'http_get', msg)
- abort(code, msg)
-
- # type-specific hook
- r_class = self.get_resource_class('virtual-machine-interface')
- if r_class:
- r_class.http_get(id)
-
- db_conn = self._db_conn
- if etag:
- obj_ids = {'uuid': id}
- (ok, result) = db_conn.dbe_is_latest(obj_ids, etag.replace('"', ''))
- if not ok:
- # Not present in DB
- self.config_object_error(id, None, 'virtual_machine_interface', 'http_get', result)
- abort(404, result)
-
- is_latest = result
- if is_latest:
- # send Not-Modified, caches use this for read optimization
- response.status = 304
- return
- #end if etag
-
- obj_ids = {'uuid': id}
-
- # Generate field list for db layer
- properties = [u'virtual_machine_interface_mac_addresses', u'virtual_machine_interface_dhcp_option_list', u'virtual_machine_interface_host_routes', u'virtual_machine_interface_allowed_address_pairs', u'vrf_assign_table', u'virtual_machine_interface_device_owner', u'virtual_machine_interface_properties', u'id_perms', u'display_name']
- references = [u'qos_forwarding_class_refs', u'security_group_refs', 'virtual_machine_interface_refs', u'virtual_machine_refs', u'virtual_network_refs', 'routing_instance_refs', u'interface_route_table_refs']
- back_references = ['virtual_machine_interface_back_refs', u'virtual_machine_back_refs', u'project_back_refs', u'instance_ip_back_refs', u'subnet_back_refs', u'floating_ip_back_refs', u'logical_interface_back_refs', 'customer_attachment_back_refs', u'logical_router_back_refs', u'loadbalancer_pool_back_refs', u'virtual_ip_back_refs']
- children = []
- if 'fields' in request.query:
- obj_fields = request.query.fields.split(',')
- else: # default props + children + refs + backrefs
- obj_fields = properties + references
- if 'exclude_back_refs' not in request.query:
- obj_fields = obj_fields + back_references
- if 'exclude_children' not in request.query:
- obj_fields = obj_fields + children
-
- (ok, result) = db_conn.dbe_read('virtual-machine-interface', obj_ids, obj_fields)
- if not ok:
- # Not present in DB
- self.config_object_error(id, None, 'virtual_machine_interface', 'http_get', result)
- abort(404, result)
-
- # check visibility
- if (not result['id_perms'].get('user_visible', True) and
- not self.is_admin_request()):
- result = 'This object is not visible by users: %s' % id
- self.config_object_error(id, None, 'virtual_machine_interface', 'http_get', result)
- abort(404, result)
-
- rsp_body = {}
- rsp_body['uuid'] = id
- rsp_body['href'] = self.generate_url('virtual-machine-interface', id)
- rsp_body['name'] = result['fq_name'][-1]
- rsp_body.update(result)
- id_perms = result['id_perms']
- response.set_header('ETag', '"' + id_perms['last_modified'] + '"')
- try:
- self._extension_mgrs['resourceApi'].map_method('post_virtual_machine_interface_read', id, rsp_body)
- except Exception as e:
- pass
-
- return {'virtual-machine-interface': rsp_body}
- #end virtual_machine_interface_http_get
-
- def virtual_machine_interface_http_put(self, id):
- key = 'virtual-machine-interface'
- obj_dict = request.json[key]
-
- try:
- self._extension_mgrs['resourceApi'].map_method('pre_virtual_machine_interface_update', id, obj_dict)
- except Exception as e:
- pass
-
- db_conn = self._db_conn
- try:
- obj_type = db_conn.uuid_to_obj_type(id)
- if obj_type != 'virtual_machine_interface':
- abort(404, 'No virtual-machine-interface object found for id %s' %(id))
- fq_name = db_conn.uuid_to_fq_name(id)
- except NoIdError as e:
- abort(404, str(e))
- prop_dict = obj_dict.get('virtual_machine_interface_mac_addresses')
- if prop_dict:
- buf = cStringIO.StringIO()
- xx_virtual_machine_interface_mac_addresses = MacAddressesType(**prop_dict)
- xx_virtual_machine_interface_mac_addresses.export(buf)
- node = etree.fromstring(buf.getvalue())
- xx_virtual_machine_interface_mac_addresses = MacAddressesType()
- try:
- xx_virtual_machine_interface_mac_addresses.build(node)
- except Exception as e:
- abort(400, str(e))
- prop_dict = obj_dict.get('virtual_machine_interface_dhcp_option_list')
- if prop_dict:
- buf = cStringIO.StringIO()
- xx_virtual_machine_interface_dhcp_option_list = DhcpOptionsListType(**prop_dict)
- xx_virtual_machine_interface_dhcp_option_list.export(buf)
- node = etree.fromstring(buf.getvalue())
- xx_virtual_machine_interface_dhcp_option_list = DhcpOptionsListType()
- try:
- xx_virtual_machine_interface_dhcp_option_list.build(node)
- except Exception as e:
- abort(400, str(e))
- prop_dict = obj_dict.get('virtual_machine_interface_host_routes')
- if prop_dict:
- buf = cStringIO.StringIO()
- xx_virtual_machine_interface_host_routes = RouteTableType(**prop_dict)
- xx_virtual_machine_interface_host_routes.export(buf)
- node = etree.fromstring(buf.getvalue())
- xx_virtual_machine_interface_host_routes = RouteTableType()
- try:
- xx_virtual_machine_interface_host_routes.build(node)
- except Exception as e:
- abort(400, str(e))
- prop_dict = obj_dict.get('virtual_machine_interface_allowed_address_pairs')
- if prop_dict:
- buf = cStringIO.StringIO()
- xx_virtual_machine_interface_allowed_address_pairs = AllowedAddressPairs(**prop_dict)
- xx_virtual_machine_interface_allowed_address_pairs.export(buf)
- node = etree.fromstring(buf.getvalue())
- xx_virtual_machine_interface_allowed_address_pairs = AllowedAddressPairs()
- try:
- xx_virtual_machine_interface_allowed_address_pairs.build(node)
- except Exception as e:
- abort(400, str(e))
- prop_dict = obj_dict.get('vrf_assign_table')
- if prop_dict:
- buf = cStringIO.StringIO()
- xx_vrf_assign_table = VrfAssignTableType(**prop_dict)
- xx_vrf_assign_table.export(buf)
- node = etree.fromstring(buf.getvalue())
- xx_vrf_assign_table = VrfAssignTableType()
- try:
- xx_vrf_assign_table.build(node)
- except Exception as e:
- abort(400, str(e))
- prop_dict = obj_dict.get('virtual_machine_interface_properties')
- if prop_dict:
- buf = cStringIO.StringIO()
- xx_virtual_machine_interface_properties = VirtualMachineInterfacePropertiesType(**prop_dict)
- xx_virtual_machine_interface_properties.export(buf)
- node = etree.fromstring(buf.getvalue())
- xx_virtual_machine_interface_properties = VirtualMachineInterfacePropertiesType()
- try:
- xx_virtual_machine_interface_properties.build(node)
- except Exception as e:
- abort(400, str(e))
- prop_dict = obj_dict.get('id_perms')
- if prop_dict:
- buf = cStringIO.StringIO()
- xx_id_perms = IdPermsType(**prop_dict)
- xx_id_perms.export(buf)
- node = etree.fromstring(buf.getvalue())
- xx_id_perms = IdPermsType()
- try:
- xx_id_perms.build(node)
- except Exception as e:
- abort(400, str(e))
- for ref_dict in obj_dict.get('routing_instance_refs') or []:
- buf = cStringIO.StringIO()
- xx_routing_instance = PolicyBasedForwardingRuleType(**ref_dict['attr'])
- xx_routing_instance.export(buf)
- node = etree.fromstring(buf.getvalue())
- try:
- xx_routing_instance.build(node)
- except Exception as e:
- abort(400, str(e))
- # common handling for all resource put
- (ok, result) = self._put_common(request, 'virtual_machine_interface', id, fq_name, obj_dict)
- if not ok:
- (code, msg) = result
- self.config_object_error(id, None, 'virtual_machine_interface', 'http_put', msg)
- abort(code, msg)
-
- # Validate perms
- objtype_list = [u'qos_forwarding_class', u'security_group', 'virtual_machine_interface', u'virtual_machine', u'virtual_network', 'routing_instance', u'interface_route_table']
- for obj_type in objtype_list:
- refs = obj_dict.get('%s_refs'%(obj_type), None)
- if refs:
- for ref in refs:
- try:
- ref_uuid = db_conn.fq_name_to_uuid(obj_type, ref['to'])
- except NoIdError as e:
- abort(404, str(e))
- (ok, status) = self._permissions.check_perms_link(request, ref_uuid)
- if not ok:
- (code, err_msg) = status
- abort(code, err_msg)
-
- # State modification starts from here. Ensure that cleanup is done for all state changes
- cleanup_on_failure = []
- # type-specific hook
- r_class = self.get_resource_class('virtual-machine-interface')
- if r_class:
- (ok, put_result) = r_class.http_put(id, fq_name, obj_dict, self._db_conn)
- if not ok:
- (code, msg) = put_result
- self.config_object_error(id, None, 'virtual_machine_interface', 'http_put', msg)
- abort(code, msg)
- callable = getattr(r_class, 'http_put_fail', None)
- if callable:
- cleanup_on_failure.append((callable, [id, fq_name, obj_dict, self._db_conn]))
-
- obj_ids = {'uuid': id}
- try:
- (ok, result) = db_conn.dbe_update('virtual-machine-interface', obj_ids, obj_dict)
- except Exception as e:
- ok = False
- result = str(e)
- if not ok:
- for fail_cleanup_callable, cleanup_args in cleanup_on_failure:
- fail_cleanup_callable(*cleanup_args)
- self.config_object_error(id, None, 'virtual_machine_interface', 'http_put', result)
- abort(404, result)
-
- rsp_body = {}
- rsp_body['uuid'] = id
- rsp_body['href'] = self.generate_url('virtual-machine-interface', id)
-
- try:
- self._extension_mgrs['resourceApi'].map_method('post_virtual_machine_interface_update', id, obj_dict)
- except Exception as e:
- pass
-
- return {'virtual-machine-interface': rsp_body}
- #end virtual_machine_interface_http_put
-
- def virtual_machine_interface_http_delete(self, id):
- db_conn = self._db_conn
- # if obj doesn't exist return early
- try:
- obj_type = db_conn.uuid_to_obj_type(id)
- if obj_type != 'virtual_machine_interface':
- abort(404, 'No virtual-machine-interface object found for id %s' %(id))
- _ = db_conn.uuid_to_fq_name(id)
- except NoIdError:
- abort(404, 'ID %s does not exist' %(id))
-
- try:
- self._extension_mgrs['resourceApi'].map_method('pre_virtual_machine_interface_delete', id)
- except Exception as e:
- pass
-
- # read in obj from db (accepting error) to get details of it
- obj_ids = {'uuid': id}
- back_references = ['virtual_machine_interface_back_refs', u'virtual_machine_back_refs', u'project_back_refs', u'instance_ip_back_refs', u'subnet_back_refs', u'floating_ip_back_refs', u'logical_interface_back_refs', 'customer_attachment_back_refs', u'logical_router_back_refs', u'loadbalancer_pool_back_refs', u'virtual_ip_back_refs']
- children = []
- obj_fields = children + back_references
- (read_ok, read_result) = db_conn.dbe_read('virtual-machine-interface', obj_ids, obj_fields)
- if not read_ok:
- if read_result.startswith('Unknown id:'):
- abort(404, 'ID %s does not exist' %(id))
- else:
- self.config_object_error(id, None, 'virtual_machine_interface', 'http_delete', read_result)
- # proceed down to delete the resource
-
- # common handling for all resource delete
- parent_type = read_result.get('parent_type', None)
- (ok, del_result) = self._delete_common(request, 'virtual_machine_interface', id, parent_type)
- if not ok:
- (code, msg) = del_result
- self.config_object_error(id, None, 'virtual_machine_interface', 'http_delete', msg)
- abort(code, msg)
-
- fq_name = read_result['fq_name']
- ifmap_id = cfgm_common.imid.get_ifmap_id_from_fq_name('virtual-machine-interface', fq_name)
- obj_ids['imid'] = ifmap_id
- if parent_type:
- parent_imid = cfgm_common.imid.get_ifmap_id_from_fq_name(parent_type, fq_name[:-1])
- obj_ids['parent_imid'] = parent_imid
-
- # State modification starts from here. Ensure that cleanup is done for all state changes
- cleanup_on_failure = []
-
- # type-specific hook
- r_class = self.get_resource_class('virtual-machine-interface')
- if r_class:
- if read_ok:
- # fail if non-default children or backrefs exist
- virtual_machine_interface_back_refs = read_result.get('virtual_machine_interface_back_refs', None)
- if virtual_machine_interface_back_refs:
- back_ref_urls = [back_ref_info['href'] for back_ref_info in read_result['virtual_machine_interface_back_refs']]
- back_ref_str = ', '.join(back_ref_urls)
- err_msg = 'Back-References from ' + back_ref_str + ' still exist'
- self.config_object_error(id, None, 'virtual_machine_interface', 'http_delete', err_msg)
- abort(409, err_msg)
-
- instance_ip_back_refs = read_result.get('instance_ip_back_refs', None)
- if instance_ip_back_refs:
- back_ref_urls = [back_ref_info['href'] for back_ref_info in read_result['instance_ip_back_refs']]
- back_ref_str = ', '.join(back_ref_urls)
- err_msg = 'Back-References from ' + back_ref_str + ' still exist'
- self.config_object_error(id, None, 'virtual_machine_interface', 'http_delete', err_msg)
- abort(409, err_msg)
-
- subnet_back_refs = read_result.get('subnet_back_refs', None)
- if subnet_back_refs:
- back_ref_urls = [back_ref_info['href'] for back_ref_info in read_result['subnet_back_refs']]
- back_ref_str = ', '.join(back_ref_urls)
- err_msg = 'Back-References from ' + back_ref_str + ' still exist'
- self.config_object_error(id, None, 'virtual_machine_interface', 'http_delete', err_msg)
- abort(409, err_msg)
-
- floating_ip_back_refs = read_result.get('floating_ip_back_refs', None)
- if floating_ip_back_refs:
- back_ref_urls = [back_ref_info['href'] for back_ref_info in read_result['floating_ip_back_refs']]
- back_ref_str = ', '.join(back_ref_urls)
- err_msg = 'Back-References from ' + back_ref_str + ' still exist'
- self.config_object_error(id, None, 'virtual_machine_interface', 'http_delete', err_msg)
- abort(409, err_msg)
-
- logical_interface_back_refs = read_result.get('logical_interface_back_refs', None)
- if logical_interface_back_refs:
- back_ref_urls = [back_ref_info['href'] for back_ref_info in read_result['logical_interface_back_refs']]
- back_ref_str = ', '.join(back_ref_urls)
- err_msg = 'Back-References from ' + back_ref_str + ' still exist'
- self.config_object_error(id, None, 'virtual_machine_interface', 'http_delete', err_msg)
- abort(409, err_msg)
-
- customer_attachment_back_refs = read_result.get('customer_attachment_back_refs', None)
- if customer_attachment_back_refs:
- back_ref_urls = [back_ref_info['href'] for back_ref_info in read_result['customer_attachment_back_refs']]
- back_ref_str = ', '.join(back_ref_urls)
- err_msg = 'Back-References from ' + back_ref_str + ' still exist'
- self.config_object_error(id, None, 'virtual_machine_interface', 'http_delete', err_msg)
- abort(409, err_msg)
-
- logical_router_back_refs = read_result.get('logical_router_back_refs', None)
- if logical_router_back_refs:
- back_ref_urls = [back_ref_info['href'] for back_ref_info in read_result['logical_router_back_refs']]
- back_ref_str = ', '.join(back_ref_urls)
- err_msg = 'Back-References from ' + back_ref_str + ' still exist'
- self.config_object_error(id, None, 'virtual_machine_interface', 'http_delete', err_msg)
- abort(409, err_msg)
-
- loadbalancer_pool_back_refs = read_result.get('loadbalancer_pool_back_refs', None)
- if loadbalancer_pool_back_refs:
- back_ref_urls = [back_ref_info['href'] for back_ref_info in read_result['loadbalancer_pool_back_refs']]
- back_ref_str = ', '.join(back_ref_urls)
- err_msg = 'Back-References from ' + back_ref_str + ' still exist'
- self.config_object_error(id, None, 'virtual_machine_interface', 'http_delete', err_msg)
- abort(409, err_msg)
-
- virtual_ip_back_refs = read_result.get('virtual_ip_back_refs', None)
- if virtual_ip_back_refs:
- back_ref_urls = [back_ref_info['href'] for back_ref_info in read_result['virtual_ip_back_refs']]
- back_ref_str = ', '.join(back_ref_urls)
- err_msg = 'Back-References from ' + back_ref_str + ' still exist'
- self.config_object_error(id, None, 'virtual_machine_interface', 'http_delete', err_msg)
- abort(409, err_msg)
-
-
- # Delete default children first
- self._virtual_machine_interface_delete_default_children(read_result)
-
- (ok, del_result) = r_class.http_delete(id, read_result, db_conn)
- if not ok:
- (code, msg) = del_result
- self.config_object_error(id, None, 'virtual_machine_interface', 'http_delete', msg)
- abort(code, msg)
- callable = getattr(r_class, 'http_delete_fail', None)
- if callable:
- cleanup_on_failure.append((callable, [id, read_result, db_conn]))
- #end if read_ok
-
- try:
- (ok, del_result) = db_conn.dbe_delete('virtual-machine-interface', obj_ids, read_result)
- except Exception as e:
- ok = False
- del_result = str(e)
- if not ok:
- for fail_cleanup_callable, cleanup_args in cleanup_on_failure:
- fail_cleanup_callable(*cleanup_args)
- self.config_object_error(id, None, 'virtual_machine_interface', 'http_delete', del_result)
- abort(409, del_result)
-
- try:
- self._extension_mgrs['resourceApi'].map_method('post_virtual_machine_interface_delete', id, read_result)
- except Exception as e:
- pass
-
- #end virtual_machine_interface_http_delete
-
- def virtual_machine_interfaces_http_post(self):
- key = 'virtual-machine-interface'
- obj_dict = request.json[key]
- self._post_validate(key, obj_dict=obj_dict)
- fq_name = obj_dict['fq_name']
-
- try:
- self._extension_mgrs['resourceApi'].map_method('pre_virtual_machine_interface_create', obj_dict)
- except Exception as e:
- pass
-
- prop_dict = obj_dict.get('virtual_machine_interface_mac_addresses')
- if prop_dict:
- buf = cStringIO.StringIO()
- xx_virtual_machine_interface_mac_addresses = MacAddressesType(**prop_dict)
- xx_virtual_machine_interface_mac_addresses.export(buf)
- node = etree.fromstring(buf.getvalue())
- xx_virtual_machine_interface_mac_addresses = MacAddressesType()
- try:
- xx_virtual_machine_interface_mac_addresses.build(node)
- except Exception as e:
- abort(400, str(e))
- prop_dict = obj_dict.get('virtual_machine_interface_dhcp_option_list')
- if prop_dict:
- buf = cStringIO.StringIO()
- xx_virtual_machine_interface_dhcp_option_list = DhcpOptionsListType(**prop_dict)
- xx_virtual_machine_interface_dhcp_option_list.export(buf)
- node = etree.fromstring(buf.getvalue())
- xx_virtual_machine_interface_dhcp_option_list = DhcpOptionsListType()
- try:
- xx_virtual_machine_interface_dhcp_option_list.build(node)
- except Exception as e:
- abort(400, str(e))
- prop_dict = obj_dict.get('virtual_machine_interface_host_routes')
- if prop_dict:
- buf = cStringIO.StringIO()
- xx_virtual_machine_interface_host_routes = RouteTableType(**prop_dict)
- xx_virtual_machine_interface_host_routes.export(buf)
- node = etree.fromstring(buf.getvalue())
- xx_virtual_machine_interface_host_routes = RouteTableType()
- try:
- xx_virtual_machine_interface_host_routes.build(node)
- except Exception as e:
- abort(400, str(e))
- prop_dict = obj_dict.get('virtual_machine_interface_allowed_address_pairs')
- if prop_dict:
- buf = cStringIO.StringIO()
- xx_virtual_machine_interface_allowed_address_pairs = AllowedAddressPairs(**prop_dict)
- xx_virtual_machine_interface_allowed_address_pairs.export(buf)
- node = etree.fromstring(buf.getvalue())
- xx_virtual_machine_interface_allowed_address_pairs = AllowedAddressPairs()
- try:
- xx_virtual_machine_interface_allowed_address_pairs.build(node)
- except Exception as e:
- abort(400, str(e))
- prop_dict = obj_dict.get('vrf_assign_table')
- if prop_dict:
- buf = cStringIO.StringIO()
- xx_vrf_assign_table = VrfAssignTableType(**prop_dict)
- xx_vrf_assign_table.export(buf)
- node = etree.fromstring(buf.getvalue())
- xx_vrf_assign_table = VrfAssignTableType()
- try:
- xx_vrf_assign_table.build(node)
- except Exception as e:
- abort(400, str(e))
- prop_dict = obj_dict.get('virtual_machine_interface_properties')
- if prop_dict:
- buf = cStringIO.StringIO()
- xx_virtual_machine_interface_properties = VirtualMachineInterfacePropertiesType(**prop_dict)
- xx_virtual_machine_interface_properties.export(buf)
- node = etree.fromstring(buf.getvalue())
- xx_virtual_machine_interface_properties = VirtualMachineInterfacePropertiesType()
- try:
- xx_virtual_machine_interface_properties.build(node)
- except Exception as e:
- abort(400, str(e))
- prop_dict = obj_dict.get('id_perms')
- if prop_dict:
- buf = cStringIO.StringIO()
- xx_id_perms = IdPermsType(**prop_dict)
- xx_id_perms.export(buf)
- node = etree.fromstring(buf.getvalue())
- xx_id_perms = IdPermsType()
- try:
- xx_id_perms.build(node)
- except Exception as e:
- abort(400, str(e))
- for ref_dict in obj_dict.get('routing_instance_refs') or []:
- buf = cStringIO.StringIO()
- xx_routing_instance = PolicyBasedForwardingRuleType(**ref_dict['attr'])
- xx_routing_instance.export(buf)
- node = etree.fromstring(buf.getvalue())
- try:
- xx_routing_instance.build(node)
- except Exception as e:
- abort(400, str(e))
- # common handling for all resource create
- (ok, result) = self._post_common(request, 'virtual-machine-interface', obj_dict)
- if not ok:
- (code, msg) = result
- fq_name_str = ':'.join(obj_dict.get('fq_name', []))
- self.config_object_error(None, fq_name_str, 'virtual_machine_interface', 'http_post', msg)
- abort(code, msg)
-
- name = obj_dict['fq_name'][-1]
- fq_name = obj_dict['fq_name']
-
- db_conn = self._db_conn
-
- # if client gave parent_type of config-root, ignore and remove
- if 'parent_type' in obj_dict and obj_dict['parent_type'] == 'config-root':
- del obj_dict['parent_type']
-
- if 'parent_type' in obj_dict:
- # non config-root child, verify parent exists
- parent_type = obj_dict['parent_type']
- parent_fq_name = obj_dict['fq_name'][:-1]
- try:
- parent_uuid = self._db_conn.fq_name_to_uuid(parent_type, parent_fq_name)
- (ok, status) = self._permissions.check_perms_write(request, parent_uuid)
- if not ok:
- (code, err_msg) = status
- abort(code, err_msg)
- self._permissions.set_user_role(request, obj_dict)
- except NoIdError:
- err_msg = 'Parent ' + pformat(parent_fq_name) + ' type ' + parent_type + ' does not exist'
- fq_name_str = ':'.join(parent_fq_name)
- self.config_object_error(None, fq_name_str, 'virtual_machine_interface', 'http_post', err_msg)
- abort(400, err_msg)
-
- # Validate perms
- objtype_list = [u'qos_forwarding_class', u'security_group', 'virtual_machine_interface', u'virtual_machine', u'virtual_network', 'routing_instance', u'interface_route_table']
- for obj_type in objtype_list:
- refs = obj_dict.get('%s_refs'%(obj_type), None)
- if refs:
- for ref in refs:
- ref_uuid = db_conn.fq_name_to_uuid(obj_type, ref['to'])
- (ok, status) = self._permissions.check_perms_link(request, ref_uuid)
- if not ok:
- (code, err_msg) = status
- abort(code, err_msg)
-
- # State modification starts from here. Ensure that cleanup is done for all state changes
- cleanup_on_failure = []
- # Alloc and Store id-mappings before creating entry on pubsub store.
- # Else a subscriber can ask for an id mapping before we have stored it
- uuid_requested = result
- (ok, result) = db_conn.dbe_alloc('virtual-machine-interface', obj_dict, uuid_requested)
- if not ok:
- (code, msg) = result
- fq_name_str = ':'.join(obj_dict['fq_name'])
- self.config_object_error(None, fq_name_str, 'virtual_machine_interface', 'http_post', result)
- abort(code, msg)
- cleanup_on_failure.append((db_conn.dbe_release, ['virtual_machine_interface', fq_name]))
-
- obj_ids = result
-
- env = request.headers.environ
- tenant_name = env.get(hdr_server_tenant(), 'default-project')
-
-
- # type-specific hook
- r_class = self.get_resource_class('virtual-machine-interface')
- if r_class:
- try:
- (ok, result) = r_class.http_post_collection(tenant_name, obj_dict, db_conn)
- except Exception as e:
- ok = False
- result = (500, str(e))
- if not ok:
- for fail_cleanup_callable, cleanup_args in cleanup_on_failure:
- fail_cleanup_callable(*cleanup_args)
- (code, msg) = result
- fq_name_str = ':'.join(fq_name)
- self.config_object_error(None, fq_name_str, 'virtual_machine_interface', 'http_post', msg)
- abort(code, msg)
- callable = getattr(r_class, 'http_post_collection_fail', None)
- if callable:
- cleanup_on_failure.append((callable, [tenant_name, obj_dict, db_conn]))
-
- try:
- (ok, result) = \
- db_conn.dbe_create('virtual-machine-interface', obj_ids, obj_dict)
- except Exception as e:
- ok = False
- result = str(e)
-
- if not ok:
- for fail_cleanup_callable, cleanup_args in cleanup_on_failure:
- fail_cleanup_callable(*cleanup_args)
- fq_name_str = ':'.join(fq_name)
- self.config_object_error(None, fq_name_str, 'virtual_machine_interface', 'http_post', result)
- abort(404, result)
-
- rsp_body = {}
- rsp_body['name'] = name
- rsp_body['fq_name'] = fq_name
- rsp_body['uuid'] = obj_ids['uuid']
- rsp_body['href'] = self.generate_url('virtual-machine-interface', obj_ids['uuid'])
- if 'parent_type' in obj_dict:
- # non config-root child, send back parent uuid/href
- rsp_body['parent_uuid'] = parent_uuid
- rsp_body['parent_href'] = self.generate_url(parent_type, parent_uuid)
-
- try:
- self._extension_mgrs['resourceApi'].map_method('post_virtual_machine_interface_create', obj_dict)
- except Exception as e:
- pass
-
- return {'virtual-machine-interface': rsp_body}
- #end virtual_machine_interfaces_http_post
-
- def virtual_machine_interfaces_http_get(self):
- # gather list of uuids using 1. any specified anchors
- # 2. any specified filters
- # if not 'detail' return list with any specified 'fields'
- # if 'detail' return list with props+refs + any specified 'fields'
-
- env = request.headers.environ
- tenant_name = env.get(hdr_server_tenant(), 'default-project')
- parent_uuids = None
- back_ref_uuids = None
- obj_uuids = None
- if (('parent_fq_name_str' in request.query) and
- ('parent_type' in request.query)):
- parent_fq_name = request.query.parent_fq_name_str.split(':')
- parent_type = request.query.parent_type
- parent_uuids = [self._db_conn.fq_name_to_uuid(parent_type, parent_fq_name)]
- elif 'parent_id' in request.query:
- parent_ids = request.query.parent_id.split(',')
- parent_uuids = [str(uuid.UUID(p_uuid)) for p_uuid in parent_ids]
- if 'back_ref_id' in request.query:
- back_ref_ids = request.query.back_ref_id.split(',')
- back_ref_uuids = [str(uuid.UUID(b_uuid)) for b_uuid in back_ref_ids]
- if 'obj_uuids' in request.query:
- obj_uuids = request.query.obj_uuids.split(',')
-
- # common handling for all resource get
- (ok, result) = self._get_common(request, parent_uuids)
- if not ok:
- (code, msg) = result
- self.config_object_error(None, None, 'virtual_machine_interfaces', 'http_get_collection', msg)
- abort(code, msg)
-
- if 'count' in request.query:
- count = 'true' in request.query.count.lower()
- else:
- count = False
-
- filter_params = request.query.filters
- if filter_params:
- try:
- ff_key_vals = filter_params.split(',')
- ff_names = [ff.split('==')[0] for ff in ff_key_vals]
- ff_values = [ff.split('==')[1] for ff in ff_key_vals]
- filters = {'field_names': ff_names, 'field_values': ff_values}
- except Exception as e:
- abort(400, 'Invalid filter ' + filter_params)
- else:
- filters = None
- db_conn = self._db_conn
- (ok, result) = \
- db_conn.dbe_list('virtual-machine-interface', parent_uuids, back_ref_uuids, obj_uuids, count, filters)
- if not ok:
- self.config_object_error(None, None, 'virtual_machine_interfaces', 'http_get_collection', result)
- abort(404, result)
-
- # If only counting, return early
- if count:
- return {'virtual-machine-interfaces': {'count': result}}
-
- if 'detail' in request.query:
- detail = 'true' in request.query.detail.lower()
- else:
- detail = False
-
- fq_names_uuids = result
- obj_dicts = []
- if not detail:
- if not self.is_admin_request():
- obj_ids_list = [{'uuid': obj_uuid} for _, obj_uuid in fq_names_uuids]
- obj_fields = [u'id_perms']
- (ok, result) = db_conn.dbe_read_multi('virtual-machine-interface', obj_ids_list, obj_fields)
- if not ok:
- result = []
- for obj_result in result:
- # give chance for zk heartbeat/ping
- gevent.sleep(0)
- if obj_result['id_perms'].get('user_visible', True):
- obj_dict = {}
- obj_dict['uuid'] = obj_result['uuid']
- obj_dict['href'] = self.generate_url('virtual-machine-interface', obj_result['uuid'])
- obj_dict['fq_name'] = obj_result['fq_name']
- obj_dicts.append(obj_dict)
- else:
- for fq_name, obj_uuid in fq_names_uuids:
- # give chance for zk heartbeat/ping
- gevent.sleep(0)
- obj_dict = {}
- obj_dict['uuid'] = obj_uuid
- obj_dict['href'] = self.generate_url('virtual-machine-interface', obj_uuid)
- obj_dict['fq_name'] = fq_name
- obj_dicts.append(obj_dict)
- else: #detail
- obj_ids_list = [{'uuid': obj_uuid} for _, obj_uuid in fq_names_uuids]
- obj_fields = [u'virtual_machine_interface_mac_addresses', u'virtual_machine_interface_dhcp_option_list', u'virtual_machine_interface_host_routes', u'virtual_machine_interface_allowed_address_pairs', u'vrf_assign_table', u'virtual_machine_interface_device_owner', u'virtual_machine_interface_properties', u'id_perms', u'display_name'] + [u'qos_forwarding_class_refs', u'security_group_refs', 'virtual_machine_interface_refs', u'virtual_machine_refs', u'virtual_network_refs', 'routing_instance_refs', u'interface_route_table_refs']
- if 'fields' in request.query:
- obj_fields.extend(request.query.fields.split(','))
- (ok, result) = db_conn.dbe_read_multi('virtual-machine-interface', obj_ids_list, obj_fields)
-
- if not ok:
- result = []
-
- for obj_result in result:
- # give chance for zk heartbeat/ping
- gevent.sleep(0)
- obj_dict = {}
- obj_dict['name'] = obj_result['fq_name'][-1]
- obj_dict['href'] = self.generate_url('virtual-machine-interface', obj_result['uuid'])
- obj_dict.update(obj_result)
- if (obj_dict['id_perms'].get('user_visible', True) or
- self.is_admin_request()):
- obj_dicts.append({'virtual-machine-interface': obj_dict})
-
- return {'virtual-machine-interfaces': obj_dicts}
- #end virtual_machine_interfaces_http_get
-
- def _virtual_machine_interface_create_default_children(self, parent_obj):
- pass
- #end _virtual_machine_interface_create_default_children
-
- def _virtual_machine_interface_delete_default_children(self, parent_dict):
- pass
- #end _virtual_machine_interface_delete_default_children
-
- def loadbalancer_healthmonitor_http_get(self, id):
- try:
- self._extension_mgrs['resourceApi'].map_method('pre_loadbalancer_healthmonitor_read', id)
- except Exception as e:
- pass
-
- # TODO get vals from request out of the global ASAP
- etag = request.headers.get('If-None-Match')
- try:
- obj_type = self._db_conn.uuid_to_obj_type(id)
- except NoIdError:
- obj_type = None
- if obj_type != 'loadbalancer_healthmonitor':
- abort(404, 'No loadbalancer-healthmonitor object found for id %s' %(id))
- # common handling for all resource get
- (ok, result) = self._get_common(request, id)
- if not ok:
- (code, msg) = result
- self.config_object_error(id, None, 'loadbalancer_healthmonitor', 'http_get', msg)
- abort(code, msg)
-
- # type-specific hook
- r_class = self.get_resource_class('loadbalancer-healthmonitor')
- if r_class:
- r_class.http_get(id)
-
- db_conn = self._db_conn
- if etag:
- obj_ids = {'uuid': id}
- (ok, result) = db_conn.dbe_is_latest(obj_ids, etag.replace('"', ''))
- if not ok:
- # Not present in DB
- self.config_object_error(id, None, 'loadbalancer_healthmonitor', 'http_get', result)
- abort(404, result)
-
- is_latest = result
- if is_latest:
- # send Not-Modified, caches use this for read optimization
- response.status = 304
- return
- #end if etag
-
- obj_ids = {'uuid': id}
-
- # Generate field list for db layer
- properties = [u'loadbalancer_healthmonitor_properties', u'id_perms', u'display_name']
- references = []
- back_references = [u'project_back_refs', u'loadbalancer_pool_back_refs']
- children = []
- if 'fields' in request.query:
- obj_fields = request.query.fields.split(',')
- else: # default props + children + refs + backrefs
- obj_fields = properties + references
- if 'exclude_back_refs' not in request.query:
- obj_fields = obj_fields + back_references
- if 'exclude_children' not in request.query:
- obj_fields = obj_fields + children
-
- (ok, result) = db_conn.dbe_read('loadbalancer-healthmonitor', obj_ids, obj_fields)
- if not ok:
- # Not present in DB
- self.config_object_error(id, None, 'loadbalancer_healthmonitor', 'http_get', result)
- abort(404, result)
-
- # check visibility
- if (not result['id_perms'].get('user_visible', True) and
- not self.is_admin_request()):
- result = 'This object is not visible by users: %s' % id
- self.config_object_error(id, None, 'loadbalancer_healthmonitor', 'http_get', result)
- abort(404, result)
-
- rsp_body = {}
- rsp_body['uuid'] = id
- rsp_body['href'] = self.generate_url('loadbalancer-healthmonitor', id)
- rsp_body['name'] = result['fq_name'][-1]
- rsp_body.update(result)
- id_perms = result['id_perms']
- response.set_header('ETag', '"' + id_perms['last_modified'] + '"')
- try:
- self._extension_mgrs['resourceApi'].map_method('post_loadbalancer_healthmonitor_read', id, rsp_body)
- except Exception as e:
- pass
-
- return {'loadbalancer-healthmonitor': rsp_body}
- #end loadbalancer_healthmonitor_http_get
-
- def loadbalancer_healthmonitor_http_put(self, id):
- key = 'loadbalancer-healthmonitor'
- obj_dict = request.json[key]
-
- try:
- self._extension_mgrs['resourceApi'].map_method('pre_loadbalancer_healthmonitor_update', id, obj_dict)
- except Exception as e:
- pass
-
- db_conn = self._db_conn
- try:
- obj_type = db_conn.uuid_to_obj_type(id)
- if obj_type != 'loadbalancer_healthmonitor':
- abort(404, 'No loadbalancer-healthmonitor object found for id %s' %(id))
- fq_name = db_conn.uuid_to_fq_name(id)
- except NoIdError as e:
- abort(404, str(e))
- prop_dict = obj_dict.get('loadbalancer_healthmonitor_properties')
- if prop_dict:
- buf = cStringIO.StringIO()
- xx_loadbalancer_healthmonitor_properties = LoadbalancerHealthmonitorType(**prop_dict)
- xx_loadbalancer_healthmonitor_properties.export(buf)
- node = etree.fromstring(buf.getvalue())
- xx_loadbalancer_healthmonitor_properties = LoadbalancerHealthmonitorType()
- try:
- xx_loadbalancer_healthmonitor_properties.build(node)
- except Exception as e:
- abort(400, str(e))
- prop_dict = obj_dict.get('id_perms')
- if prop_dict:
- buf = cStringIO.StringIO()
- xx_id_perms = IdPermsType(**prop_dict)
- xx_id_perms.export(buf)
- node = etree.fromstring(buf.getvalue())
- xx_id_perms = IdPermsType()
- try:
- xx_id_perms.build(node)
- except Exception as e:
- abort(400, str(e))
- # common handling for all resource put
- (ok, result) = self._put_common(request, 'loadbalancer_healthmonitor', id, fq_name, obj_dict)
- if not ok:
- (code, msg) = result
- self.config_object_error(id, None, 'loadbalancer_healthmonitor', 'http_put', msg)
- abort(code, msg)
-
-
- # State modification starts from here. Ensure that cleanup is done for all state changes
- cleanup_on_failure = []
- # type-specific hook
- r_class = self.get_resource_class('loadbalancer-healthmonitor')
- if r_class:
- (ok, put_result) = r_class.http_put(id, fq_name, obj_dict, self._db_conn)
- if not ok:
- (code, msg) = put_result
- self.config_object_error(id, None, 'loadbalancer_healthmonitor', 'http_put', msg)
- abort(code, msg)
- callable = getattr(r_class, 'http_put_fail', None)
- if callable:
- cleanup_on_failure.append((callable, [id, fq_name, obj_dict, self._db_conn]))
-
- obj_ids = {'uuid': id}
- try:
- (ok, result) = db_conn.dbe_update('loadbalancer-healthmonitor', obj_ids, obj_dict)
- except Exception as e:
- ok = False
- result = str(e)
- if not ok:
- for fail_cleanup_callable, cleanup_args in cleanup_on_failure:
- fail_cleanup_callable(*cleanup_args)
- self.config_object_error(id, None, 'loadbalancer_healthmonitor', 'http_put', result)
- abort(404, result)
-
- rsp_body = {}
- rsp_body['uuid'] = id
- rsp_body['href'] = self.generate_url('loadbalancer-healthmonitor', id)
-
- try:
- self._extension_mgrs['resourceApi'].map_method('post_loadbalancer_healthmonitor_update', id, obj_dict)
- except Exception as e:
- pass
-
- return {'loadbalancer-healthmonitor': rsp_body}
- #end loadbalancer_healthmonitor_http_put
-
- def loadbalancer_healthmonitor_http_delete(self, id):
- db_conn = self._db_conn
- # if obj doesn't exist return early
- try:
- obj_type = db_conn.uuid_to_obj_type(id)
- if obj_type != 'loadbalancer_healthmonitor':
- abort(404, 'No loadbalancer-healthmonitor object found for id %s' %(id))
- _ = db_conn.uuid_to_fq_name(id)
- except NoIdError:
- abort(404, 'ID %s does not exist' %(id))
-
- try:
- self._extension_mgrs['resourceApi'].map_method('pre_loadbalancer_healthmonitor_delete', id)
- except Exception as e:
- pass
-
- # read in obj from db (accepting error) to get details of it
- obj_ids = {'uuid': id}
- back_references = [u'project_back_refs', u'loadbalancer_pool_back_refs']
- children = []
- obj_fields = children + back_references
- (read_ok, read_result) = db_conn.dbe_read('loadbalancer-healthmonitor', obj_ids, obj_fields)
- if not read_ok:
- if read_result.startswith('Unknown id:'):
- abort(404, 'ID %s does not exist' %(id))
- else:
- self.config_object_error(id, None, 'loadbalancer_healthmonitor', 'http_delete', read_result)
- # proceed down to delete the resource
-
- # common handling for all resource delete
- parent_type = read_result.get('parent_type', None)
- (ok, del_result) = self._delete_common(request, 'loadbalancer_healthmonitor', id, parent_type)
- if not ok:
- (code, msg) = del_result
- self.config_object_error(id, None, 'loadbalancer_healthmonitor', 'http_delete', msg)
- abort(code, msg)
-
- fq_name = read_result['fq_name']
- ifmap_id = cfgm_common.imid.get_ifmap_id_from_fq_name('loadbalancer-healthmonitor', fq_name)
- obj_ids['imid'] = ifmap_id
- if parent_type:
- parent_imid = cfgm_common.imid.get_ifmap_id_from_fq_name(parent_type, fq_name[:-1])
- obj_ids['parent_imid'] = parent_imid
-
- # State modification starts from here. Ensure that cleanup is done for all state changes
- cleanup_on_failure = []
-
- # type-specific hook
- r_class = self.get_resource_class('loadbalancer-healthmonitor')
- if r_class:
- if read_ok:
- # fail if non-default children or backrefs exist
- loadbalancer_pool_back_refs = read_result.get('loadbalancer_pool_back_refs', None)
- if loadbalancer_pool_back_refs:
- back_ref_urls = [back_ref_info['href'] for back_ref_info in read_result['loadbalancer_pool_back_refs']]
- back_ref_str = ', '.join(back_ref_urls)
- err_msg = 'Back-References from ' + back_ref_str + ' still exist'
- self.config_object_error(id, None, 'loadbalancer_healthmonitor', 'http_delete', err_msg)
- abort(409, err_msg)
-
-
- # Delete default children first
- self._loadbalancer_healthmonitor_delete_default_children(read_result)
-
- (ok, del_result) = r_class.http_delete(id, read_result, db_conn)
- if not ok:
- (code, msg) = del_result
- self.config_object_error(id, None, 'loadbalancer_healthmonitor', 'http_delete', msg)
- abort(code, msg)
- callable = getattr(r_class, 'http_delete_fail', None)
- if callable:
- cleanup_on_failure.append((callable, [id, read_result, db_conn]))
- #end if read_ok
-
- try:
- (ok, del_result) = db_conn.dbe_delete('loadbalancer-healthmonitor', obj_ids, read_result)
- except Exception as e:
- ok = False
- del_result = str(e)
- if not ok:
- for fail_cleanup_callable, cleanup_args in cleanup_on_failure:
- fail_cleanup_callable(*cleanup_args)
- self.config_object_error(id, None, 'loadbalancer_healthmonitor', 'http_delete', del_result)
- abort(409, del_result)
-
- try:
- self._extension_mgrs['resourceApi'].map_method('post_loadbalancer_healthmonitor_delete', id, read_result)
- except Exception as e:
- pass
-
- #end loadbalancer_healthmonitor_http_delete
-
- def loadbalancer_healthmonitors_http_post(self):
- key = 'loadbalancer-healthmonitor'
- obj_dict = request.json[key]
- self._post_validate(key, obj_dict=obj_dict)
- fq_name = obj_dict['fq_name']
-
- try:
- self._extension_mgrs['resourceApi'].map_method('pre_loadbalancer_healthmonitor_create', obj_dict)
- except Exception as e:
- pass
-
- prop_dict = obj_dict.get('loadbalancer_healthmonitor_properties')
- if prop_dict:
- buf = cStringIO.StringIO()
- xx_loadbalancer_healthmonitor_properties = LoadbalancerHealthmonitorType(**prop_dict)
- xx_loadbalancer_healthmonitor_properties.export(buf)
- node = etree.fromstring(buf.getvalue())
- xx_loadbalancer_healthmonitor_properties = LoadbalancerHealthmonitorType()
- try:
- xx_loadbalancer_healthmonitor_properties.build(node)
- except Exception as e:
- abort(400, str(e))
- prop_dict = obj_dict.get('id_perms')
- if prop_dict:
- buf = cStringIO.StringIO()
- xx_id_perms = IdPermsType(**prop_dict)
- xx_id_perms.export(buf)
- node = etree.fromstring(buf.getvalue())
- xx_id_perms = IdPermsType()
- try:
- xx_id_perms.build(node)
- except Exception as e:
- abort(400, str(e))
- # common handling for all resource create
- (ok, result) = self._post_common(request, 'loadbalancer-healthmonitor', obj_dict)
- if not ok:
- (code, msg) = result
- fq_name_str = ':'.join(obj_dict.get('fq_name', []))
- self.config_object_error(None, fq_name_str, 'loadbalancer_healthmonitor', 'http_post', msg)
- abort(code, msg)
-
- name = obj_dict['fq_name'][-1]
- fq_name = obj_dict['fq_name']
-
- db_conn = self._db_conn
-
- # if client gave parent_type of config-root, ignore and remove
- if 'parent_type' in obj_dict and obj_dict['parent_type'] == 'config-root':
- del obj_dict['parent_type']
-
- if 'parent_type' in obj_dict:
- # non config-root child, verify parent exists
- parent_type = obj_dict['parent_type']
- parent_fq_name = obj_dict['fq_name'][:-1]
- try:
- parent_uuid = self._db_conn.fq_name_to_uuid(parent_type, parent_fq_name)
- (ok, status) = self._permissions.check_perms_write(request, parent_uuid)
- if not ok:
- (code, err_msg) = status
- abort(code, err_msg)
- self._permissions.set_user_role(request, obj_dict)
- except NoIdError:
- err_msg = 'Parent ' + pformat(parent_fq_name) + ' type ' + parent_type + ' does not exist'
- fq_name_str = ':'.join(parent_fq_name)
- self.config_object_error(None, fq_name_str, 'loadbalancer_healthmonitor', 'http_post', err_msg)
- abort(400, err_msg)
-
-
- # State modification starts from here. Ensure that cleanup is done for all state changes
- cleanup_on_failure = []
- # Alloc and Store id-mappings before creating entry on pubsub store.
- # Else a subscriber can ask for an id mapping before we have stored it
- uuid_requested = result
- (ok, result) = db_conn.dbe_alloc('loadbalancer-healthmonitor', obj_dict, uuid_requested)
- if not ok:
- (code, msg) = result
- fq_name_str = ':'.join(obj_dict['fq_name'])
- self.config_object_error(None, fq_name_str, 'loadbalancer_healthmonitor', 'http_post', result)
- abort(code, msg)
- cleanup_on_failure.append((db_conn.dbe_release, ['loadbalancer_healthmonitor', fq_name]))
-
- obj_ids = result
-
- env = request.headers.environ
- tenant_name = env.get(hdr_server_tenant(), 'default-project')
-
-
- # type-specific hook
- r_class = self.get_resource_class('loadbalancer-healthmonitor')
- if r_class:
- try:
- (ok, result) = r_class.http_post_collection(tenant_name, obj_dict, db_conn)
- except Exception as e:
- ok = False
- result = (500, str(e))
- if not ok:
- for fail_cleanup_callable, cleanup_args in cleanup_on_failure:
- fail_cleanup_callable(*cleanup_args)
- (code, msg) = result
- fq_name_str = ':'.join(fq_name)
- self.config_object_error(None, fq_name_str, 'loadbalancer_healthmonitor', 'http_post', msg)
- abort(code, msg)
- callable = getattr(r_class, 'http_post_collection_fail', None)
- if callable:
- cleanup_on_failure.append((callable, [tenant_name, obj_dict, db_conn]))
-
- try:
- (ok, result) = \
- db_conn.dbe_create('loadbalancer-healthmonitor', obj_ids, obj_dict)
- except Exception as e:
- ok = False
- result = str(e)
-
- if not ok:
- for fail_cleanup_callable, cleanup_args in cleanup_on_failure:
- fail_cleanup_callable(*cleanup_args)
- fq_name_str = ':'.join(fq_name)
- self.config_object_error(None, fq_name_str, 'loadbalancer_healthmonitor', 'http_post', result)
- abort(404, result)
-
- rsp_body = {}
- rsp_body['name'] = name
- rsp_body['fq_name'] = fq_name
- rsp_body['uuid'] = obj_ids['uuid']
- rsp_body['href'] = self.generate_url('loadbalancer-healthmonitor', obj_ids['uuid'])
- if 'parent_type' in obj_dict:
- # non config-root child, send back parent uuid/href
- rsp_body['parent_uuid'] = parent_uuid
- rsp_body['parent_href'] = self.generate_url(parent_type, parent_uuid)
-
- try:
- self._extension_mgrs['resourceApi'].map_method('post_loadbalancer_healthmonitor_create', obj_dict)
- except Exception as e:
- pass
-
- return {'loadbalancer-healthmonitor': rsp_body}
- #end loadbalancer_healthmonitors_http_post
-
- def loadbalancer_healthmonitors_http_get(self):
- # gather list of uuids using 1. any specified anchors
- # 2. any specified filters
- # if not 'detail' return list with any specified 'fields'
- # if 'detail' return list with props+refs + any specified 'fields'
-
- env = request.headers.environ
- tenant_name = env.get(hdr_server_tenant(), 'default-project')
- parent_uuids = None
- back_ref_uuids = None
- obj_uuids = None
- if (('parent_fq_name_str' in request.query) and
- ('parent_type' in request.query)):
- parent_fq_name = request.query.parent_fq_name_str.split(':')
- parent_type = request.query.parent_type
- parent_uuids = [self._db_conn.fq_name_to_uuid(parent_type, parent_fq_name)]
- elif 'parent_id' in request.query:
- parent_ids = request.query.parent_id.split(',')
- parent_uuids = [str(uuid.UUID(p_uuid)) for p_uuid in parent_ids]
- if 'back_ref_id' in request.query:
- back_ref_ids = request.query.back_ref_id.split(',')
- back_ref_uuids = [str(uuid.UUID(b_uuid)) for b_uuid in back_ref_ids]
- if 'obj_uuids' in request.query:
- obj_uuids = request.query.obj_uuids.split(',')
-
- # common handling for all resource get
- (ok, result) = self._get_common(request, parent_uuids)
- if not ok:
- (code, msg) = result
- self.config_object_error(None, None, 'loadbalancer_healthmonitors', 'http_get_collection', msg)
- abort(code, msg)
-
- if 'count' in request.query:
- count = 'true' in request.query.count.lower()
- else:
- count = False
-
- filter_params = request.query.filters
- if filter_params:
- try:
- ff_key_vals = filter_params.split(',')
- ff_names = [ff.split('==')[0] for ff in ff_key_vals]
- ff_values = [ff.split('==')[1] for ff in ff_key_vals]
- filters = {'field_names': ff_names, 'field_values': ff_values}
- except Exception as e:
- abort(400, 'Invalid filter ' + filter_params)
- else:
- filters = None
- db_conn = self._db_conn
- (ok, result) = \
- db_conn.dbe_list('loadbalancer-healthmonitor', parent_uuids, back_ref_uuids, obj_uuids, count, filters)
- if not ok:
- self.config_object_error(None, None, 'loadbalancer_healthmonitors', 'http_get_collection', result)
- abort(404, result)
-
- # If only counting, return early
- if count:
- return {'loadbalancer-healthmonitors': {'count': result}}
-
- if 'detail' in request.query:
- detail = 'true' in request.query.detail.lower()
- else:
- detail = False
-
- fq_names_uuids = result
- obj_dicts = []
- if not detail:
- if not self.is_admin_request():
- obj_ids_list = [{'uuid': obj_uuid} for _, obj_uuid in fq_names_uuids]
- obj_fields = [u'id_perms']
- (ok, result) = db_conn.dbe_read_multi('loadbalancer-healthmonitor', obj_ids_list, obj_fields)
- if not ok:
- result = []
- for obj_result in result:
- # give chance for zk heartbeat/ping
- gevent.sleep(0)
- if obj_result['id_perms'].get('user_visible', True):
- obj_dict = {}
- obj_dict['uuid'] = obj_result['uuid']
- obj_dict['href'] = self.generate_url('loadbalancer-healthmonitor', obj_result['uuid'])
- obj_dict['fq_name'] = obj_result['fq_name']
- obj_dicts.append(obj_dict)
- else:
- for fq_name, obj_uuid in fq_names_uuids:
- # give chance for zk heartbeat/ping
- gevent.sleep(0)
- obj_dict = {}
- obj_dict['uuid'] = obj_uuid
- obj_dict['href'] = self.generate_url('loadbalancer-healthmonitor', obj_uuid)
- obj_dict['fq_name'] = fq_name
- obj_dicts.append(obj_dict)
- else: #detail
- obj_ids_list = [{'uuid': obj_uuid} for _, obj_uuid in fq_names_uuids]
- obj_fields = [u'loadbalancer_healthmonitor_properties', u'id_perms', u'display_name'] + []
- if 'fields' in request.query:
- obj_fields.extend(request.query.fields.split(','))
- (ok, result) = db_conn.dbe_read_multi('loadbalancer-healthmonitor', obj_ids_list, obj_fields)
-
- if not ok:
- result = []
-
- for obj_result in result:
- # give chance for zk heartbeat/ping
- gevent.sleep(0)
- obj_dict = {}
- obj_dict['name'] = obj_result['fq_name'][-1]
- obj_dict['href'] = self.generate_url('loadbalancer-healthmonitor', obj_result['uuid'])
- obj_dict.update(obj_result)
- if (obj_dict['id_perms'].get('user_visible', True) or
- self.is_admin_request()):
- obj_dicts.append({'loadbalancer-healthmonitor': obj_dict})
-
- return {'loadbalancer-healthmonitors': obj_dicts}
- #end loadbalancer_healthmonitors_http_get
-
- def _loadbalancer_healthmonitor_create_default_children(self, parent_obj):
- pass
- #end _loadbalancer_healthmonitor_create_default_children
-
- def _loadbalancer_healthmonitor_delete_default_children(self, parent_dict):
- pass
- #end _loadbalancer_healthmonitor_delete_default_children
-
- def virtual_network_http_get(self, id):
- try:
- self._extension_mgrs['resourceApi'].map_method('pre_virtual_network_read', id)
- except Exception as e:
- pass
-
- # TODO get vals from request out of the global ASAP
- etag = request.headers.get('If-None-Match')
- try:
- obj_type = self._db_conn.uuid_to_obj_type(id)
- except NoIdError:
- obj_type = None
- if obj_type != 'virtual_network':
- abort(404, 'No virtual-network object found for id %s' %(id))
- # common handling for all resource get
- (ok, result) = self._get_common(request, id)
- if not ok:
- (code, msg) = result
- self.config_object_error(id, None, 'virtual_network', 'http_get', msg)
- abort(code, msg)
-
- # type-specific hook
- r_class = self.get_resource_class('virtual-network')
- if r_class:
- r_class.http_get(id)
-
- db_conn = self._db_conn
- if etag:
- obj_ids = {'uuid': id}
- (ok, result) = db_conn.dbe_is_latest(obj_ids, etag.replace('"', ''))
- if not ok:
- # Not present in DB
- self.config_object_error(id, None, 'virtual_network', 'http_get', result)
- abort(404, result)
-
- is_latest = result
- if is_latest:
- # send Not-Modified, caches use this for read optimization
- response.status = 304
- return
- #end if etag
-
- obj_ids = {'uuid': id}
-
- # Generate field list for db layer
- properties = [u'virtual_network_properties', u'virtual_network_network_id', u'route_target_list', u'router_external', u'is_shared', u'external_ipam', u'flood_unknown_unicast', u'id_perms', u'display_name']
- references = [u'qos_forwarding_class_refs', u'network_ipam_refs', u'network_policy_refs', u'route_table_refs']
- back_references = [u'project_back_refs', 'virtual_machine_interface_back_refs', u'instance_ip_back_refs', u'physical_router_back_refs', u'logical_router_back_refs']
- children = [u'access_control_lists', u'floating_ip_pools', 'routing_instances']
- if 'fields' in request.query:
- obj_fields = request.query.fields.split(',')
- else: # default props + children + refs + backrefs
- obj_fields = properties + references
- if 'exclude_back_refs' not in request.query:
- obj_fields = obj_fields + back_references
- if 'exclude_children' not in request.query:
- obj_fields = obj_fields + children
-
- (ok, result) = db_conn.dbe_read('virtual-network', obj_ids, obj_fields)
- if not ok:
- # Not present in DB
- self.config_object_error(id, None, 'virtual_network', 'http_get', result)
- abort(404, result)
-
- # check visibility
- if (not result['id_perms'].get('user_visible', True) and
- not self.is_admin_request()):
- result = 'This object is not visible by users: %s' % id
- self.config_object_error(id, None, 'virtual_network', 'http_get', result)
- abort(404, result)
-
- rsp_body = {}
- rsp_body['uuid'] = id
- rsp_body['href'] = self.generate_url('virtual-network', id)
- rsp_body['name'] = result['fq_name'][-1]
- rsp_body.update(result)
- id_perms = result['id_perms']
- response.set_header('ETag', '"' + id_perms['last_modified'] + '"')
- try:
- self._extension_mgrs['resourceApi'].map_method('post_virtual_network_read', id, rsp_body)
- except Exception as e:
- pass
-
- return {'virtual-network': rsp_body}
- #end virtual_network_http_get
-
- def virtual_network_http_put(self, id):
- key = 'virtual-network'
- obj_dict = request.json[key]
-
- try:
- self._extension_mgrs['resourceApi'].map_method('pre_virtual_network_update', id, obj_dict)
- except Exception as e:
- pass
-
- db_conn = self._db_conn
- try:
- obj_type = db_conn.uuid_to_obj_type(id)
- if obj_type != 'virtual_network':
- abort(404, 'No virtual-network object found for id %s' %(id))
- fq_name = db_conn.uuid_to_fq_name(id)
- except NoIdError as e:
- abort(404, str(e))
- prop_dict = obj_dict.get('virtual_network_properties')
- if prop_dict:
- buf = cStringIO.StringIO()
- xx_virtual_network_properties = VirtualNetworkType(**prop_dict)
- xx_virtual_network_properties.export(buf)
- node = etree.fromstring(buf.getvalue())
- xx_virtual_network_properties = VirtualNetworkType()
- try:
- xx_virtual_network_properties.build(node)
- except Exception as e:
- abort(400, str(e))
- prop_dict = obj_dict.get('route_target_list')
- if prop_dict:
- buf = cStringIO.StringIO()
- xx_route_target_list = RouteTargetList(**prop_dict)
- xx_route_target_list.export(buf)
- node = etree.fromstring(buf.getvalue())
- xx_route_target_list = RouteTargetList()
- try:
- xx_route_target_list.build(node)
- except Exception as e:
- abort(400, str(e))
- prop_dict = obj_dict.get('id_perms')
- if prop_dict:
- buf = cStringIO.StringIO()
- xx_id_perms = IdPermsType(**prop_dict)
- xx_id_perms.export(buf)
- node = etree.fromstring(buf.getvalue())
- xx_id_perms = IdPermsType()
- try:
- xx_id_perms.build(node)
- except Exception as e:
- abort(400, str(e))
- for ref_dict in obj_dict.get('network_ipam_refs') or []:
- buf = cStringIO.StringIO()
- xx_network_ipam = VnSubnetsType(**ref_dict['attr'])
- xx_network_ipam.export(buf)
- node = etree.fromstring(buf.getvalue())
- try:
- xx_network_ipam.build(node)
- except Exception as e:
- abort(400, str(e))
- for ref_dict in obj_dict.get('network_policy_refs') or []:
- buf = cStringIO.StringIO()
- xx_network_policy = VirtualNetworkPolicyType(**ref_dict['attr'])
- xx_network_policy.export(buf)
- node = etree.fromstring(buf.getvalue())
- try:
- xx_network_policy.build(node)
- except Exception as e:
- abort(400, str(e))
- # common handling for all resource put
- (ok, result) = self._put_common(request, 'virtual_network', id, fq_name, obj_dict)
- if not ok:
- (code, msg) = result
- self.config_object_error(id, None, 'virtual_network', 'http_put', msg)
- abort(code, msg)
-
- # Validate perms
- objtype_list = [u'qos_forwarding_class', u'network_ipam', u'network_policy', u'access_control_list', u'floating_ip_pool', 'routing_instance', u'route_table']
- for obj_type in objtype_list:
- refs = obj_dict.get('%s_refs'%(obj_type), None)
- if refs:
- for ref in refs:
- try:
- ref_uuid = db_conn.fq_name_to_uuid(obj_type, ref['to'])
- except NoIdError as e:
- abort(404, str(e))
- (ok, status) = self._permissions.check_perms_link(request, ref_uuid)
- if not ok:
- (code, err_msg) = status
- abort(code, err_msg)
-
- # State modification starts from here. Ensure that cleanup is done for all state changes
- cleanup_on_failure = []
- # type-specific hook
- r_class = self.get_resource_class('virtual-network')
- if r_class:
- (ok, put_result) = r_class.http_put(id, fq_name, obj_dict, self._db_conn)
- if not ok:
- (code, msg) = put_result
- self.config_object_error(id, None, 'virtual_network', 'http_put', msg)
- abort(code, msg)
- callable = getattr(r_class, 'http_put_fail', None)
- if callable:
- cleanup_on_failure.append((callable, [id, fq_name, obj_dict, self._db_conn]))
-
- obj_ids = {'uuid': id}
- try:
- (ok, result) = db_conn.dbe_update('virtual-network', obj_ids, obj_dict)
- except Exception as e:
- ok = False
- result = str(e)
- if not ok:
- for fail_cleanup_callable, cleanup_args in cleanup_on_failure:
- fail_cleanup_callable(*cleanup_args)
- self.config_object_error(id, None, 'virtual_network', 'http_put', result)
- abort(404, result)
-
- rsp_body = {}
- rsp_body['uuid'] = id
- rsp_body['href'] = self.generate_url('virtual-network', id)
-
- try:
- self._extension_mgrs['resourceApi'].map_method('post_virtual_network_update', id, obj_dict)
- except Exception as e:
- pass
-
- return {'virtual-network': rsp_body}
- #end virtual_network_http_put
-
- def virtual_network_http_delete(self, id):
- db_conn = self._db_conn
- # if obj doesn't exist return early
- try:
- obj_type = db_conn.uuid_to_obj_type(id)
- if obj_type != 'virtual_network':
- abort(404, 'No virtual-network object found for id %s' %(id))
- _ = db_conn.uuid_to_fq_name(id)
- except NoIdError:
- abort(404, 'ID %s does not exist' %(id))
-
- try:
- self._extension_mgrs['resourceApi'].map_method('pre_virtual_network_delete', id)
- except Exception as e:
- pass
-
- # read in obj from db (accepting error) to get details of it
- obj_ids = {'uuid': id}
- back_references = [u'project_back_refs', 'virtual_machine_interface_back_refs', u'instance_ip_back_refs', u'physical_router_back_refs', u'logical_router_back_refs']
- children = [u'access_control_lists', u'floating_ip_pools', 'routing_instances']
- obj_fields = children + back_references
- (read_ok, read_result) = db_conn.dbe_read('virtual-network', obj_ids, obj_fields)
- if not read_ok:
- if read_result.startswith('Unknown id:'):
- abort(404, 'ID %s does not exist' %(id))
- else:
- self.config_object_error(id, None, 'virtual_network', 'http_delete', read_result)
- # proceed down to delete the resource
-
- # common handling for all resource delete
- parent_type = read_result.get('parent_type', None)
- (ok, del_result) = self._delete_common(request, 'virtual_network', id, parent_type)
- if not ok:
- (code, msg) = del_result
- self.config_object_error(id, None, 'virtual_network', 'http_delete', msg)
- abort(code, msg)
-
- fq_name = read_result['fq_name']
- ifmap_id = cfgm_common.imid.get_ifmap_id_from_fq_name('virtual-network', fq_name)
- obj_ids['imid'] = ifmap_id
- if parent_type:
- parent_imid = cfgm_common.imid.get_ifmap_id_from_fq_name(parent_type, fq_name[:-1])
- obj_ids['parent_imid'] = parent_imid
-
- # State modification starts from here. Ensure that cleanup is done for all state changes
- cleanup_on_failure = []
-
- # type-specific hook
- r_class = self.get_resource_class('virtual-network')
- if r_class:
- if read_ok:
- # fail if non-default children or backrefs exist
- floating_ip_pools = read_result.get('floating_ip_pools', None)
- if floating_ip_pools:
- has_infos = read_result['floating_ip_pools']
- if ((len(has_infos) > 1) or
- (len(has_infos) == 1 and has_infos[0]['to'][-1] != 'default-floating-ip-pool')):
- has_urls = [has_info['href'] for has_info in has_infos]
- has_str = ', '.join(has_urls)
- err_msg = 'Children ' + has_str + ' still exist'
- self.config_object_error(id, None, 'virtual_network', 'http_delete', err_msg)
- abort(409, err_msg)
-
- virtual_machine_interface_back_refs = read_result.get('virtual_machine_interface_back_refs', None)
- if virtual_machine_interface_back_refs:
- back_ref_urls = [back_ref_info['href'] for back_ref_info in read_result['virtual_machine_interface_back_refs']]
- back_ref_str = ', '.join(back_ref_urls)
- err_msg = 'Back-References from ' + back_ref_str + ' still exist'
- self.config_object_error(id, None, 'virtual_network', 'http_delete', err_msg)
- abort(409, err_msg)
-
- instance_ip_back_refs = read_result.get('instance_ip_back_refs', None)
- if instance_ip_back_refs:
- back_ref_urls = [back_ref_info['href'] for back_ref_info in read_result['instance_ip_back_refs']]
- back_ref_str = ', '.join(back_ref_urls)
- err_msg = 'Back-References from ' + back_ref_str + ' still exist'
- self.config_object_error(id, None, 'virtual_network', 'http_delete', err_msg)
- abort(409, err_msg)
-
- physical_router_back_refs = read_result.get('physical_router_back_refs', None)
- if physical_router_back_refs:
- back_ref_urls = [back_ref_info['href'] for back_ref_info in read_result['physical_router_back_refs']]
- back_ref_str = ', '.join(back_ref_urls)
- err_msg = 'Back-References from ' + back_ref_str + ' still exist'
- self.config_object_error(id, None, 'virtual_network', 'http_delete', err_msg)
- abort(409, err_msg)
-
- logical_router_back_refs = read_result.get('logical_router_back_refs', None)
- if logical_router_back_refs:
- back_ref_urls = [back_ref_info['href'] for back_ref_info in read_result['logical_router_back_refs']]
- back_ref_str = ', '.join(back_ref_urls)
- err_msg = 'Back-References from ' + back_ref_str + ' still exist'
- self.config_object_error(id, None, 'virtual_network', 'http_delete', err_msg)
- abort(409, err_msg)
-
-
- # Delete default children first
- self._virtual_network_delete_default_children(read_result)
-
- (ok, del_result) = r_class.http_delete(id, read_result, db_conn)
- if not ok:
- (code, msg) = del_result
- self.config_object_error(id, None, 'virtual_network', 'http_delete', msg)
- abort(code, msg)
- callable = getattr(r_class, 'http_delete_fail', None)
- if callable:
- cleanup_on_failure.append((callable, [id, read_result, db_conn]))
- #end if read_ok
-
- try:
- (ok, del_result) = db_conn.dbe_delete('virtual-network', obj_ids, read_result)
- except Exception as e:
- ok = False
- del_result = str(e)
- if not ok:
- for fail_cleanup_callable, cleanup_args in cleanup_on_failure:
- fail_cleanup_callable(*cleanup_args)
- self.config_object_error(id, None, 'virtual_network', 'http_delete', del_result)
- abort(409, del_result)
-
- try:
- self._extension_mgrs['resourceApi'].map_method('post_virtual_network_delete', id, read_result)
- except Exception as e:
- pass
-
- #end virtual_network_http_delete
-
- def virtual_networks_http_post(self):
- key = 'virtual-network'
- obj_dict = request.json[key]
- self._post_validate(key, obj_dict=obj_dict)
- fq_name = obj_dict['fq_name']
-
- try:
- self._extension_mgrs['resourceApi'].map_method('pre_virtual_network_create', obj_dict)
- except Exception as e:
- pass
-
- prop_dict = obj_dict.get('virtual_network_properties')
- if prop_dict:
- buf = cStringIO.StringIO()
- xx_virtual_network_properties = VirtualNetworkType(**prop_dict)
- xx_virtual_network_properties.export(buf)
- node = etree.fromstring(buf.getvalue())
- xx_virtual_network_properties = VirtualNetworkType()
- try:
- xx_virtual_network_properties.build(node)
- except Exception as e:
- abort(400, str(e))
- prop_dict = obj_dict.get('route_target_list')
- if prop_dict:
- buf = cStringIO.StringIO()
- xx_route_target_list = RouteTargetList(**prop_dict)
- xx_route_target_list.export(buf)
- node = etree.fromstring(buf.getvalue())
- xx_route_target_list = RouteTargetList()
- try:
- xx_route_target_list.build(node)
- except Exception as e:
- abort(400, str(e))
- prop_dict = obj_dict.get('id_perms')
- if prop_dict:
- buf = cStringIO.StringIO()
- xx_id_perms = IdPermsType(**prop_dict)
- xx_id_perms.export(buf)
- node = etree.fromstring(buf.getvalue())
- xx_id_perms = IdPermsType()
- try:
- xx_id_perms.build(node)
- except Exception as e:
- abort(400, str(e))
- for ref_dict in obj_dict.get('network_ipam_refs') or []:
- buf = cStringIO.StringIO()
- xx_network_ipam = VnSubnetsType(**ref_dict['attr'])
- xx_network_ipam.export(buf)
- node = etree.fromstring(buf.getvalue())
- try:
- xx_network_ipam.build(node)
- except Exception as e:
- abort(400, str(e))
- for ref_dict in obj_dict.get('network_policy_refs') or []:
- buf = cStringIO.StringIO()
- xx_network_policy = VirtualNetworkPolicyType(**ref_dict['attr'])
- xx_network_policy.export(buf)
- node = etree.fromstring(buf.getvalue())
- try:
- xx_network_policy.build(node)
- except Exception as e:
- abort(400, str(e))
- # common handling for all resource create
- (ok, result) = self._post_common(request, 'virtual-network', obj_dict)
- if not ok:
- (code, msg) = result
- fq_name_str = ':'.join(obj_dict.get('fq_name', []))
- self.config_object_error(None, fq_name_str, 'virtual_network', 'http_post', msg)
- abort(code, msg)
-
- name = obj_dict['fq_name'][-1]
- fq_name = obj_dict['fq_name']
-
- db_conn = self._db_conn
-
- # if client gave parent_type of config-root, ignore and remove
- if 'parent_type' in obj_dict and obj_dict['parent_type'] == 'config-root':
- del obj_dict['parent_type']
-
- if 'parent_type' in obj_dict:
- # non config-root child, verify parent exists
- parent_type = obj_dict['parent_type']
- parent_fq_name = obj_dict['fq_name'][:-1]
- try:
- parent_uuid = self._db_conn.fq_name_to_uuid(parent_type, parent_fq_name)
- (ok, status) = self._permissions.check_perms_write(request, parent_uuid)
- if not ok:
- (code, err_msg) = status
- abort(code, err_msg)
- self._permissions.set_user_role(request, obj_dict)
- except NoIdError:
- err_msg = 'Parent ' + pformat(parent_fq_name) + ' type ' + parent_type + ' does not exist'
- fq_name_str = ':'.join(parent_fq_name)
- self.config_object_error(None, fq_name_str, 'virtual_network', 'http_post', err_msg)
- abort(400, err_msg)
-
- # Validate perms
- objtype_list = [u'qos_forwarding_class', u'network_ipam', u'network_policy', u'access_control_list', u'floating_ip_pool', 'routing_instance', u'route_table']
- for obj_type in objtype_list:
- refs = obj_dict.get('%s_refs'%(obj_type), None)
- if refs:
- for ref in refs:
- ref_uuid = db_conn.fq_name_to_uuid(obj_type, ref['to'])
- (ok, status) = self._permissions.check_perms_link(request, ref_uuid)
- if not ok:
- (code, err_msg) = status
- abort(code, err_msg)
-
- # State modification starts from here. Ensure that cleanup is done for all state changes
- cleanup_on_failure = []
- # Alloc and Store id-mappings before creating entry on pubsub store.
- # Else a subscriber can ask for an id mapping before we have stored it
- uuid_requested = result
- (ok, result) = db_conn.dbe_alloc('virtual-network', obj_dict, uuid_requested)
- if not ok:
- (code, msg) = result
- fq_name_str = ':'.join(obj_dict['fq_name'])
- self.config_object_error(None, fq_name_str, 'virtual_network', 'http_post', result)
- abort(code, msg)
- cleanup_on_failure.append((db_conn.dbe_release, ['virtual_network', fq_name]))
-
- obj_ids = result
-
- env = request.headers.environ
- tenant_name = env.get(hdr_server_tenant(), 'default-project')
-
-
- # type-specific hook
- r_class = self.get_resource_class('virtual-network')
- if r_class:
- try:
- (ok, result) = r_class.http_post_collection(tenant_name, obj_dict, db_conn)
- except Exception as e:
- ok = False
- result = (500, str(e))
- if not ok:
- for fail_cleanup_callable, cleanup_args in cleanup_on_failure:
- fail_cleanup_callable(*cleanup_args)
- (code, msg) = result
- fq_name_str = ':'.join(fq_name)
- self.config_object_error(None, fq_name_str, 'virtual_network', 'http_post', msg)
- abort(code, msg)
- callable = getattr(r_class, 'http_post_collection_fail', None)
- if callable:
- cleanup_on_failure.append((callable, [tenant_name, obj_dict, db_conn]))
-
- try:
- (ok, result) = \
- db_conn.dbe_create('virtual-network', obj_ids, obj_dict)
- except Exception as e:
- ok = False
- result = str(e)
-
- if not ok:
- for fail_cleanup_callable, cleanup_args in cleanup_on_failure:
- fail_cleanup_callable(*cleanup_args)
- fq_name_str = ':'.join(fq_name)
- self.config_object_error(None, fq_name_str, 'virtual_network', 'http_post', result)
- abort(404, result)
-
- rsp_body = {}
- rsp_body['name'] = name
- rsp_body['fq_name'] = fq_name
- rsp_body['uuid'] = obj_ids['uuid']
- rsp_body['href'] = self.generate_url('virtual-network', obj_ids['uuid'])
- if 'parent_type' in obj_dict:
- # non config-root child, send back parent uuid/href
- rsp_body['parent_uuid'] = parent_uuid
- rsp_body['parent_href'] = self.generate_url(parent_type, parent_uuid)
-
- try:
- self._extension_mgrs['resourceApi'].map_method('post_virtual_network_create', obj_dict)
- except Exception as e:
- pass
-
- return {'virtual-network': rsp_body}
- #end virtual_networks_http_post
-
- def virtual_networks_http_get(self):
- # gather list of uuids using 1. any specified anchors
- # 2. any specified filters
- # if not 'detail' return list with any specified 'fields'
- # if 'detail' return list with props+refs + any specified 'fields'
-
- env = request.headers.environ
- tenant_name = env.get(hdr_server_tenant(), 'default-project')
- parent_uuids = None
- back_ref_uuids = None
- obj_uuids = None
- if (('parent_fq_name_str' in request.query) and
- ('parent_type' in request.query)):
- parent_fq_name = request.query.parent_fq_name_str.split(':')
- parent_type = request.query.parent_type
- parent_uuids = [self._db_conn.fq_name_to_uuid(parent_type, parent_fq_name)]
- elif 'parent_id' in request.query:
- parent_ids = request.query.parent_id.split(',')
- parent_uuids = [str(uuid.UUID(p_uuid)) for p_uuid in parent_ids]
- if 'back_ref_id' in request.query:
- back_ref_ids = request.query.back_ref_id.split(',')
- back_ref_uuids = [str(uuid.UUID(b_uuid)) for b_uuid in back_ref_ids]
- if 'obj_uuids' in request.query:
- obj_uuids = request.query.obj_uuids.split(',')
-
- # common handling for all resource get
- (ok, result) = self._get_common(request, parent_uuids)
- if not ok:
- (code, msg) = result
- self.config_object_error(None, None, 'virtual_networks', 'http_get_collection', msg)
- abort(code, msg)
-
- if 'count' in request.query:
- count = 'true' in request.query.count.lower()
- else:
- count = False
-
- filter_params = request.query.filters
- if filter_params:
- try:
- ff_key_vals = filter_params.split(',')
- ff_names = [ff.split('==')[0] for ff in ff_key_vals]
- ff_values = [ff.split('==')[1] for ff in ff_key_vals]
- filters = {'field_names': ff_names, 'field_values': ff_values}
- except Exception as e:
- abort(400, 'Invalid filter ' + filter_params)
- else:
- filters = None
- db_conn = self._db_conn
- (ok, result) = \
- db_conn.dbe_list('virtual-network', parent_uuids, back_ref_uuids, obj_uuids, count, filters)
- if not ok:
- self.config_object_error(None, None, 'virtual_networks', 'http_get_collection', result)
- abort(404, result)
-
- # If only counting, return early
- if count:
- return {'virtual-networks': {'count': result}}
-
- if 'detail' in request.query:
- detail = 'true' in request.query.detail.lower()
- else:
- detail = False
-
- fq_names_uuids = result
- obj_dicts = []
- if not detail:
- if not self.is_admin_request():
- obj_ids_list = [{'uuid': obj_uuid} for _, obj_uuid in fq_names_uuids]
- obj_fields = [u'id_perms']
- (ok, result) = db_conn.dbe_read_multi('virtual-network', obj_ids_list, obj_fields)
- if not ok:
- result = []
- for obj_result in result:
- # give chance for zk heartbeat/ping
- gevent.sleep(0)
- if obj_result['id_perms'].get('user_visible', True):
- obj_dict = {}
- obj_dict['uuid'] = obj_result['uuid']
- obj_dict['href'] = self.generate_url('virtual-network', obj_result['uuid'])
- obj_dict['fq_name'] = obj_result['fq_name']
- obj_dicts.append(obj_dict)
- else:
- for fq_name, obj_uuid in fq_names_uuids:
- # give chance for zk heartbeat/ping
- gevent.sleep(0)
- obj_dict = {}
- obj_dict['uuid'] = obj_uuid
- obj_dict['href'] = self.generate_url('virtual-network', obj_uuid)
- obj_dict['fq_name'] = fq_name
- obj_dicts.append(obj_dict)
- else: #detail
- obj_ids_list = [{'uuid': obj_uuid} for _, obj_uuid in fq_names_uuids]
- obj_fields = [u'virtual_network_properties', u'virtual_network_network_id', u'route_target_list', u'router_external', u'is_shared', u'external_ipam', u'flood_unknown_unicast', u'id_perms', u'display_name'] + [u'qos_forwarding_class_refs', u'network_ipam_refs', u'network_policy_refs', u'route_table_refs']
- if 'fields' in request.query:
- obj_fields.extend(request.query.fields.split(','))
- (ok, result) = db_conn.dbe_read_multi('virtual-network', obj_ids_list, obj_fields)
-
- if not ok:
- result = []
-
- for obj_result in result:
- # give chance for zk heartbeat/ping
- gevent.sleep(0)
- obj_dict = {}
- obj_dict['name'] = obj_result['fq_name'][-1]
- obj_dict['href'] = self.generate_url('virtual-network', obj_result['uuid'])
- obj_dict.update(obj_result)
- if (obj_dict['id_perms'].get('user_visible', True) or
- self.is_admin_request()):
- obj_dicts.append({'virtual-network': obj_dict})
-
- return {'virtual-networks': obj_dicts}
- #end virtual_networks_http_get
-
- def _virtual_network_create_default_children(self, parent_obj):
- # Create a default child only if provisioned for
- r_class = self.get_resource_class('floating-ip-pool')
- if r_class and r_class.generate_default_instance:
- child_obj = FloatingIpPool(parent_obj = parent_obj)
- child_dict = child_obj.__dict__
- fq_name = child_dict['fq_name']
- child_dict['id_perms'] = self._get_default_id_perms('floating-ip-pool')
-
- db_conn = self._db_conn
- (ok, result) = db_conn.dbe_alloc('floating-ip-pool', child_dict)
- if not ok:
- return (ok, result)
-
- obj_ids = result
- db_conn.dbe_create('floating-ip-pool', obj_ids, child_dict)
- self._floating_ip_pool_create_default_children(child_obj)
-
- pass
- #end _virtual_network_create_default_children
-
- def _virtual_network_delete_default_children(self, parent_dict):
- # Delete a default child only if provisioned for
- r_class = self.get_resource_class('floating-ip-pool')
- if r_class and r_class.generate_default_instance:
- # first locate default child then delete it
- has_infos = parent_dict.get('access_control_lists')
- if has_infos:
- for has_info in has_infos:
- if has_info['to'][-1] == 'default-access-control-list':
- default_child_id = has_info['href'].split('/')[-1]
- self.access_control_list_http_delete(default_child_id)
- break
-
- # Delete a default child only if provisioned for
- r_class = self.get_resource_class('floating-ip-pool')
- if r_class and r_class.generate_default_instance:
- # first locate default child then delete it
- has_infos = parent_dict.get('floating_ip_pools')
- if has_infos:
- for has_info in has_infos:
- if has_info['to'][-1] == 'default-floating-ip-pool':
- default_child_id = has_info['href'].split('/')[-1]
- self.floating_ip_pool_http_delete(default_child_id)
- break
-
- # Delete a default child only if provisioned for
- r_class = self.get_resource_class('floating-ip-pool')
- if r_class and r_class.generate_default_instance:
- # first locate default child then delete it
- has_infos = parent_dict.get('routing_instances')
- if has_infos:
- for has_info in has_infos:
- if has_info['to'][-1] == 'default-routing-instance':
- default_child_id = has_info['href'].split('/')[-1]
- self.routing_instance_http_delete(default_child_id)
- break
-
- pass
- #end _virtual_network_delete_default_children
-
- def project_http_get(self, id):
- try:
- self._extension_mgrs['resourceApi'].map_method('pre_project_read', id)
- except Exception as e:
- pass
-
- # TODO get vals from request out of the global ASAP
- etag = request.headers.get('If-None-Match')
- try:
- obj_type = self._db_conn.uuid_to_obj_type(id)
- except NoIdError:
- obj_type = None
- if obj_type != 'project':
- abort(404, 'No project object found for id %s' %(id))
- # common handling for all resource get
- (ok, result) = self._get_common(request, id)
- if not ok:
- (code, msg) = result
- self.config_object_error(id, None, 'project', 'http_get', msg)
- abort(code, msg)
-
- # type-specific hook
- r_class = self.get_resource_class('project')
- if r_class:
- r_class.http_get(id)
-
- db_conn = self._db_conn
- if etag:
- obj_ids = {'uuid': id}
- (ok, result) = db_conn.dbe_is_latest(obj_ids, etag.replace('"', ''))
- if not ok:
- # Not present in DB
- self.config_object_error(id, None, 'project', 'http_get', result)
- abort(404, result)
-
- is_latest = result
- if is_latest:
- # send Not-Modified, caches use this for read optimization
- response.status = 304
- return
- #end if etag
-
- obj_ids = {'uuid': id}
-
- # Generate field list for db layer
- properties = [u'quota', u'id_perms', u'display_name']
- references = [u'namespace_refs', u'floating_ip_pool_refs']
- back_references = [u'domain_back_refs', u'floating_ip_back_refs']
- children = [u'security_groups', u'virtual_networks', u'qos_queues', u'qos_forwarding_classs', u'network_ipams', u'network_policys', 'virtual_machine_interfaces', u'service_instances', u'route_tables', u'interface_route_tables', u'logical_routers', u'loadbalancer_pools', u'loadbalancer_healthmonitors', u'virtual_ips']
- if 'fields' in request.query:
- obj_fields = request.query.fields.split(',')
- else: # default props + children + refs + backrefs
- obj_fields = properties + references
- if 'exclude_back_refs' not in request.query:
- obj_fields = obj_fields + back_references
- if 'exclude_children' not in request.query:
- obj_fields = obj_fields + children
-
- (ok, result) = db_conn.dbe_read('project', obj_ids, obj_fields)
- if not ok:
- # Not present in DB
- self.config_object_error(id, None, 'project', 'http_get', result)
- abort(404, result)
-
- # check visibility
- if (not result['id_perms'].get('user_visible', True) and
- not self.is_admin_request()):
- result = 'This object is not visible by users: %s' % id
- self.config_object_error(id, None, 'project', 'http_get', result)
- abort(404, result)
-
- rsp_body = {}
- rsp_body['uuid'] = id
- rsp_body['href'] = self.generate_url('project', id)
- rsp_body['name'] = result['fq_name'][-1]
- rsp_body.update(result)
- id_perms = result['id_perms']
- response.set_header('ETag', '"' + id_perms['last_modified'] + '"')
- try:
- self._extension_mgrs['resourceApi'].map_method('post_project_read', id, rsp_body)
- except Exception as e:
- pass
-
- return {'project': rsp_body}
- #end project_http_get
-
- def project_http_put(self, id):
- key = 'project'
- obj_dict = request.json[key]
-
- try:
- self._extension_mgrs['resourceApi'].map_method('pre_project_update', id, obj_dict)
- except Exception as e:
- pass
-
- db_conn = self._db_conn
- try:
- obj_type = db_conn.uuid_to_obj_type(id)
- if obj_type != 'project':
- abort(404, 'No project object found for id %s' %(id))
- fq_name = db_conn.uuid_to_fq_name(id)
- except NoIdError as e:
- abort(404, str(e))
- prop_dict = obj_dict.get('quota')
- if prop_dict:
- buf = cStringIO.StringIO()
- xx_quota = QuotaType(**prop_dict)
- xx_quota.export(buf)
- node = etree.fromstring(buf.getvalue())
- xx_quota = QuotaType()
- try:
- xx_quota.build(node)
- except Exception as e:
- abort(400, str(e))
- prop_dict = obj_dict.get('id_perms')
- if prop_dict:
- buf = cStringIO.StringIO()
- xx_id_perms = IdPermsType(**prop_dict)
- xx_id_perms.export(buf)
- node = etree.fromstring(buf.getvalue())
- xx_id_perms = IdPermsType()
- try:
- xx_id_perms.build(node)
- except Exception as e:
- abort(400, str(e))
- for ref_dict in obj_dict.get('namespace_refs') or []:
- buf = cStringIO.StringIO()
- xx_namespace = SubnetType(**ref_dict['attr'])
- xx_namespace.export(buf)
- node = etree.fromstring(buf.getvalue())
- try:
- xx_namespace.build(node)
- except Exception as e:
- abort(400, str(e))
- # common handling for all resource put
- (ok, result) = self._put_common(request, 'project', id, fq_name, obj_dict)
- if not ok:
- (code, msg) = result
- self.config_object_error(id, None, 'project', 'http_put', msg)
- abort(code, msg)
-
- # Validate perms
- objtype_list = [u'namespace', u'security_group', u'virtual_network', u'qos_queue', u'qos_forwarding_class', u'network_ipam', u'network_policy', 'virtual_machine_interface', u'floating_ip_pool', u'service_instance', u'route_table', u'interface_route_table', u'logical_router', u'loadbalancer_pool', u'loadbalancer_healthmonitor', u'virtual_ip']
- for obj_type in objtype_list:
- refs = obj_dict.get('%s_refs'%(obj_type), None)
- if refs:
- for ref in refs:
- try:
- ref_uuid = db_conn.fq_name_to_uuid(obj_type, ref['to'])
- except NoIdError as e:
- abort(404, str(e))
- (ok, status) = self._permissions.check_perms_link(request, ref_uuid)
- if not ok:
- (code, err_msg) = status
- abort(code, err_msg)
-
- # State modification starts from here. Ensure that cleanup is done for all state changes
- cleanup_on_failure = []
- # type-specific hook
- r_class = self.get_resource_class('project')
- if r_class:
- (ok, put_result) = r_class.http_put(id, fq_name, obj_dict, self._db_conn)
- if not ok:
- (code, msg) = put_result
- self.config_object_error(id, None, 'project', 'http_put', msg)
- abort(code, msg)
- callable = getattr(r_class, 'http_put_fail', None)
- if callable:
- cleanup_on_failure.append((callable, [id, fq_name, obj_dict, self._db_conn]))
-
- obj_ids = {'uuid': id}
- try:
- (ok, result) = db_conn.dbe_update('project', obj_ids, obj_dict)
- except Exception as e:
- ok = False
- result = str(e)
- if not ok:
- for fail_cleanup_callable, cleanup_args in cleanup_on_failure:
- fail_cleanup_callable(*cleanup_args)
- self.config_object_error(id, None, 'project', 'http_put', result)
- abort(404, result)
-
- rsp_body = {}
- rsp_body['uuid'] = id
- rsp_body['href'] = self.generate_url('project', id)
-
- try:
- self._extension_mgrs['resourceApi'].map_method('post_project_update', id, obj_dict)
- except Exception as e:
- pass
-
- return {'project': rsp_body}
- #end project_http_put
-
- def project_http_delete(self, id):
- db_conn = self._db_conn
- # if obj doesn't exist return early
- try:
- obj_type = db_conn.uuid_to_obj_type(id)
- if obj_type != 'project':
- abort(404, 'No project object found for id %s' %(id))
- _ = db_conn.uuid_to_fq_name(id)
- except NoIdError:
- abort(404, 'ID %s does not exist' %(id))
-
- try:
- self._extension_mgrs['resourceApi'].map_method('pre_project_delete', id)
- except Exception as e:
- pass
-
- # read in obj from db (accepting error) to get details of it
- obj_ids = {'uuid': id}
- back_references = [u'domain_back_refs', u'floating_ip_back_refs']
- children = [u'security_groups', u'virtual_networks', u'qos_queues', u'qos_forwarding_classs', u'network_ipams', u'network_policys', 'virtual_machine_interfaces', u'service_instances', u'route_tables', u'interface_route_tables', u'logical_routers', u'loadbalancer_pools', u'loadbalancer_healthmonitors', u'virtual_ips']
- obj_fields = children + back_references
- (read_ok, read_result) = db_conn.dbe_read('project', obj_ids, obj_fields)
- if not read_ok:
- if read_result.startswith('Unknown id:'):
- abort(404, 'ID %s does not exist' %(id))
- else:
- self.config_object_error(id, None, 'project', 'http_delete', read_result)
- # proceed down to delete the resource
-
- # common handling for all resource delete
- parent_type = read_result.get('parent_type', None)
- (ok, del_result) = self._delete_common(request, 'project', id, parent_type)
- if not ok:
- (code, msg) = del_result
- self.config_object_error(id, None, 'project', 'http_delete', msg)
- abort(code, msg)
-
- fq_name = read_result['fq_name']
- ifmap_id = cfgm_common.imid.get_ifmap_id_from_fq_name('project', fq_name)
- obj_ids['imid'] = ifmap_id
- if parent_type:
- parent_imid = cfgm_common.imid.get_ifmap_id_from_fq_name(parent_type, fq_name[:-1])
- obj_ids['parent_imid'] = parent_imid
-
- # State modification starts from here. Ensure that cleanup is done for all state changes
- cleanup_on_failure = []
-
- # type-specific hook
- r_class = self.get_resource_class('project')
- if r_class:
- if read_ok:
- # fail if non-default children or backrefs exist
- security_groups = read_result.get('security_groups', None)
- if security_groups:
- has_infos = read_result['security_groups']
- if ((len(has_infos) > 1) or
- (len(has_infos) == 1 and has_infos[0]['to'][-1] != 'default-security-group')):
- has_urls = [has_info['href'] for has_info in has_infos]
- has_str = ', '.join(has_urls)
- err_msg = 'Children ' + has_str + ' still exist'
- self.config_object_error(id, None, 'project', 'http_delete', err_msg)
- abort(409, err_msg)
-
- virtual_networks = read_result.get('virtual_networks', None)
- if virtual_networks:
- has_infos = read_result['virtual_networks']
- if ((len(has_infos) > 1) or
- (len(has_infos) == 1 and has_infos[0]['to'][-1] != 'default-virtual-network')):
- has_urls = [has_info['href'] for has_info in has_infos]
- has_str = ', '.join(has_urls)
- err_msg = 'Children ' + has_str + ' still exist'
- self.config_object_error(id, None, 'project', 'http_delete', err_msg)
- abort(409, err_msg)
-
- qos_queues = read_result.get('qos_queues', None)
- if qos_queues:
- has_infos = read_result['qos_queues']
- if ((len(has_infos) > 1) or
- (len(has_infos) == 1 and has_infos[0]['to'][-1] != 'default-qos-queue')):
- has_urls = [has_info['href'] for has_info in has_infos]
- has_str = ', '.join(has_urls)
- err_msg = 'Children ' + has_str + ' still exist'
- self.config_object_error(id, None, 'project', 'http_delete', err_msg)
- abort(409, err_msg)
-
- qos_forwarding_classs = read_result.get('qos_forwarding_classs', None)
- if qos_forwarding_classs:
- has_infos = read_result['qos_forwarding_classs']
- if ((len(has_infos) > 1) or
- (len(has_infos) == 1 and has_infos[0]['to'][-1] != 'default-qos-forwarding-class')):
- has_urls = [has_info['href'] for has_info in has_infos]
- has_str = ', '.join(has_urls)
- err_msg = 'Children ' + has_str + ' still exist'
- self.config_object_error(id, None, 'project', 'http_delete', err_msg)
- abort(409, err_msg)
-
- network_ipams = read_result.get('network_ipams', None)
- if network_ipams:
- has_infos = read_result['network_ipams']
- if ((len(has_infos) > 1) or
- (len(has_infos) == 1 and has_infos[0]['to'][-1] != 'default-network-ipam')):
- has_urls = [has_info['href'] for has_info in has_infos]
- has_str = ', '.join(has_urls)
- err_msg = 'Children ' + has_str + ' still exist'
- self.config_object_error(id, None, 'project', 'http_delete', err_msg)
- abort(409, err_msg)
-
- network_policys = read_result.get('network_policys', None)
- if network_policys:
- has_infos = read_result['network_policys']
- if ((len(has_infos) > 1) or
- (len(has_infos) == 1 and has_infos[0]['to'][-1] != 'default-network-policy')):
- has_urls = [has_info['href'] for has_info in has_infos]
- has_str = ', '.join(has_urls)
- err_msg = 'Children ' + has_str + ' still exist'
- self.config_object_error(id, None, 'project', 'http_delete', err_msg)
- abort(409, err_msg)
-
- virtual_machine_interfaces = read_result.get('virtual_machine_interfaces', None)
- if virtual_machine_interfaces:
- has_infos = read_result['virtual_machine_interfaces']
- if ((len(has_infos) > 1) or
- (len(has_infos) == 1 and has_infos[0]['to'][-1] != 'default-virtual-machine-interface')):
- has_urls = [has_info['href'] for has_info in has_infos]
- has_str = ', '.join(has_urls)
- err_msg = 'Children ' + has_str + ' still exist'
- self.config_object_error(id, None, 'project', 'http_delete', err_msg)
- abort(409, err_msg)
-
- service_instances = read_result.get('service_instances', None)
- if service_instances:
- has_infos = read_result['service_instances']
- if ((len(has_infos) > 1) or
- (len(has_infos) == 1 and has_infos[0]['to'][-1] != 'default-service-instance')):
- has_urls = [has_info['href'] for has_info in has_infos]
- has_str = ', '.join(has_urls)
- err_msg = 'Children ' + has_str + ' still exist'
- self.config_object_error(id, None, 'project', 'http_delete', err_msg)
- abort(409, err_msg)
-
- route_tables = read_result.get('route_tables', None)
- if route_tables:
- has_infos = read_result['route_tables']
- if ((len(has_infos) > 1) or
- (len(has_infos) == 1 and has_infos[0]['to'][-1] != 'default-route-table')):
- has_urls = [has_info['href'] for has_info in has_infos]
- has_str = ', '.join(has_urls)
- err_msg = 'Children ' + has_str + ' still exist'
- self.config_object_error(id, None, 'project', 'http_delete', err_msg)
- abort(409, err_msg)
-
- interface_route_tables = read_result.get('interface_route_tables', None)
- if interface_route_tables:
- has_infos = read_result['interface_route_tables']
- if ((len(has_infos) > 1) or
- (len(has_infos) == 1 and has_infos[0]['to'][-1] != 'default-interface-route-table')):
- has_urls = [has_info['href'] for has_info in has_infos]
- has_str = ', '.join(has_urls)
- err_msg = 'Children ' + has_str + ' still exist'
- self.config_object_error(id, None, 'project', 'http_delete', err_msg)
- abort(409, err_msg)
-
- logical_routers = read_result.get('logical_routers', None)
- if logical_routers:
- has_infos = read_result['logical_routers']
- if ((len(has_infos) > 1) or
- (len(has_infos) == 1 and has_infos[0]['to'][-1] != 'default-logical-router')):
- has_urls = [has_info['href'] for has_info in has_infos]
- has_str = ', '.join(has_urls)
- err_msg = 'Children ' + has_str + ' still exist'
- self.config_object_error(id, None, 'project', 'http_delete', err_msg)
- abort(409, err_msg)
-
- loadbalancer_pools = read_result.get('loadbalancer_pools', None)
- if loadbalancer_pools:
- has_infos = read_result['loadbalancer_pools']
- if ((len(has_infos) > 1) or
- (len(has_infos) == 1 and has_infos[0]['to'][-1] != 'default-loadbalancer-pool')):
- has_urls = [has_info['href'] for has_info in has_infos]
- has_str = ', '.join(has_urls)
- err_msg = 'Children ' + has_str + ' still exist'
- self.config_object_error(id, None, 'project', 'http_delete', err_msg)
- abort(409, err_msg)
-
- loadbalancer_healthmonitors = read_result.get('loadbalancer_healthmonitors', None)
- if loadbalancer_healthmonitors:
- has_infos = read_result['loadbalancer_healthmonitors']
- if ((len(has_infos) > 1) or
- (len(has_infos) == 1 and has_infos[0]['to'][-1] != 'default-loadbalancer-healthmonitor')):
- has_urls = [has_info['href'] for has_info in has_infos]
- has_str = ', '.join(has_urls)
- err_msg = 'Children ' + has_str + ' still exist'
- self.config_object_error(id, None, 'project', 'http_delete', err_msg)
- abort(409, err_msg)
-
- virtual_ips = read_result.get('virtual_ips', None)
- if virtual_ips:
- has_infos = read_result['virtual_ips']
- if ((len(has_infos) > 1) or
- (len(has_infos) == 1 and has_infos[0]['to'][-1] != 'default-virtual-ip')):
- has_urls = [has_info['href'] for has_info in has_infos]
- has_str = ', '.join(has_urls)
- err_msg = 'Children ' + has_str + ' still exist'
- self.config_object_error(id, None, 'project', 'http_delete', err_msg)
- abort(409, err_msg)
-
- floating_ip_back_refs = read_result.get('floating_ip_back_refs', None)
- if floating_ip_back_refs:
- back_ref_urls = [back_ref_info['href'] for back_ref_info in read_result['floating_ip_back_refs']]
- back_ref_str = ', '.join(back_ref_urls)
- err_msg = 'Back-References from ' + back_ref_str + ' still exist'
- self.config_object_error(id, None, 'project', 'http_delete', err_msg)
- abort(409, err_msg)
-
-
- # Delete default children first
- self._project_delete_default_children(read_result)
-
- (ok, del_result) = r_class.http_delete(id, read_result, db_conn)
- if not ok:
- (code, msg) = del_result
- self.config_object_error(id, None, 'project', 'http_delete', msg)
- abort(code, msg)
- callable = getattr(r_class, 'http_delete_fail', None)
- if callable:
- cleanup_on_failure.append((callable, [id, read_result, db_conn]))
- #end if read_ok
-
- try:
- (ok, del_result) = db_conn.dbe_delete('project', obj_ids, read_result)
- except Exception as e:
- ok = False
- del_result = str(e)
- if not ok:
- for fail_cleanup_callable, cleanup_args in cleanup_on_failure:
- fail_cleanup_callable(*cleanup_args)
- self.config_object_error(id, None, 'project', 'http_delete', del_result)
- abort(409, del_result)
-
- try:
- self._extension_mgrs['resourceApi'].map_method('post_project_delete', id, read_result)
- except Exception as e:
- pass
-
- #end project_http_delete
-
- def projects_http_post(self):
- key = 'project'
- obj_dict = request.json[key]
- self._post_validate(key, obj_dict=obj_dict)
- fq_name = obj_dict['fq_name']
-
- try:
- self._extension_mgrs['resourceApi'].map_method('pre_project_create', obj_dict)
- except Exception as e:
- pass
-
- prop_dict = obj_dict.get('quota')
- if prop_dict:
- buf = cStringIO.StringIO()
- xx_quota = QuotaType(**prop_dict)
- xx_quota.export(buf)
- node = etree.fromstring(buf.getvalue())
- xx_quota = QuotaType()
- try:
- xx_quota.build(node)
- except Exception as e:
- abort(400, str(e))
- prop_dict = obj_dict.get('id_perms')
- if prop_dict:
- buf = cStringIO.StringIO()
- xx_id_perms = IdPermsType(**prop_dict)
- xx_id_perms.export(buf)
- node = etree.fromstring(buf.getvalue())
- xx_id_perms = IdPermsType()
- try:
- xx_id_perms.build(node)
- except Exception as e:
- abort(400, str(e))
- for ref_dict in obj_dict.get('namespace_refs') or []:
- buf = cStringIO.StringIO()
- xx_namespace = SubnetType(**ref_dict['attr'])
- xx_namespace.export(buf)
- node = etree.fromstring(buf.getvalue())
- try:
- xx_namespace.build(node)
- except Exception as e:
- abort(400, str(e))
- # common handling for all resource create
- (ok, result) = self._post_common(request, 'project', obj_dict)
- if not ok:
- (code, msg) = result
- fq_name_str = ':'.join(obj_dict.get('fq_name', []))
- self.config_object_error(None, fq_name_str, 'project', 'http_post', msg)
- abort(code, msg)
-
- name = obj_dict['fq_name'][-1]
- fq_name = obj_dict['fq_name']
-
- db_conn = self._db_conn
-
- # if client gave parent_type of config-root, ignore and remove
- if 'parent_type' in obj_dict and obj_dict['parent_type'] == 'config-root':
- del obj_dict['parent_type']
-
- if 'parent_type' in obj_dict:
- # non config-root child, verify parent exists
- parent_type = obj_dict['parent_type']
- parent_fq_name = obj_dict['fq_name'][:-1]
- try:
- parent_uuid = self._db_conn.fq_name_to_uuid(parent_type, parent_fq_name)
- (ok, status) = self._permissions.check_perms_write(request, parent_uuid)
- if not ok:
- (code, err_msg) = status
- abort(code, err_msg)
- self._permissions.set_user_role(request, obj_dict)
- except NoIdError:
- err_msg = 'Parent ' + pformat(parent_fq_name) + ' type ' + parent_type + ' does not exist'
- fq_name_str = ':'.join(parent_fq_name)
- self.config_object_error(None, fq_name_str, 'project', 'http_post', err_msg)
- abort(400, err_msg)
-
- # Validate perms
- objtype_list = [u'namespace', u'security_group', u'virtual_network', u'qos_queue', u'qos_forwarding_class', u'network_ipam', u'network_policy', 'virtual_machine_interface', u'floating_ip_pool', u'service_instance', u'route_table', u'interface_route_table', u'logical_router', u'loadbalancer_pool', u'loadbalancer_healthmonitor', u'virtual_ip']
- for obj_type in objtype_list:
- refs = obj_dict.get('%s_refs'%(obj_type), None)
- if refs:
- for ref in refs:
- ref_uuid = db_conn.fq_name_to_uuid(obj_type, ref['to'])
- (ok, status) = self._permissions.check_perms_link(request, ref_uuid)
- if not ok:
- (code, err_msg) = status
- abort(code, err_msg)
-
- # State modification starts from here. Ensure that cleanup is done for all state changes
- cleanup_on_failure = []
- # Alloc and Store id-mappings before creating entry on pubsub store.
- # Else a subscriber can ask for an id mapping before we have stored it
- uuid_requested = result
- (ok, result) = db_conn.dbe_alloc('project', obj_dict, uuid_requested)
- if not ok:
- (code, msg) = result
- fq_name_str = ':'.join(obj_dict['fq_name'])
- self.config_object_error(None, fq_name_str, 'project', 'http_post', result)
- abort(code, msg)
- cleanup_on_failure.append((db_conn.dbe_release, ['project', fq_name]))
-
- obj_ids = result
-
- env = request.headers.environ
- tenant_name = env.get(hdr_server_tenant(), 'default-project')
-
-
- # type-specific hook
- r_class = self.get_resource_class('project')
- if r_class:
- try:
- (ok, result) = r_class.http_post_collection(tenant_name, obj_dict, db_conn)
- except Exception as e:
- ok = False
- result = (500, str(e))
- if not ok:
- for fail_cleanup_callable, cleanup_args in cleanup_on_failure:
- fail_cleanup_callable(*cleanup_args)
- (code, msg) = result
- fq_name_str = ':'.join(fq_name)
- self.config_object_error(None, fq_name_str, 'project', 'http_post', msg)
- abort(code, msg)
- callable = getattr(r_class, 'http_post_collection_fail', None)
- if callable:
- cleanup_on_failure.append((callable, [tenant_name, obj_dict, db_conn]))
-
- try:
- (ok, result) = \
- db_conn.dbe_create('project', obj_ids, obj_dict)
- except Exception as e:
- ok = False
- result = str(e)
-
- if not ok:
- for fail_cleanup_callable, cleanup_args in cleanup_on_failure:
- fail_cleanup_callable(*cleanup_args)
- fq_name_str = ':'.join(fq_name)
- self.config_object_error(None, fq_name_str, 'project', 'http_post', result)
- abort(404, result)
-
- rsp_body = {}
- rsp_body['name'] = name
- rsp_body['fq_name'] = fq_name
- rsp_body['uuid'] = obj_ids['uuid']
- rsp_body['href'] = self.generate_url('project', obj_ids['uuid'])
- if 'parent_type' in obj_dict:
- # non config-root child, send back parent uuid/href
- rsp_body['parent_uuid'] = parent_uuid
- rsp_body['parent_href'] = self.generate_url(parent_type, parent_uuid)
-
- try:
- self._extension_mgrs['resourceApi'].map_method('post_project_create', obj_dict)
- except Exception as e:
- pass
-
- return {'project': rsp_body}
- #end projects_http_post
-
- def projects_http_get(self):
- # gather list of uuids using 1. any specified anchors
- # 2. any specified filters
- # if not 'detail' return list with any specified 'fields'
- # if 'detail' return list with props+refs + any specified 'fields'
-
- env = request.headers.environ
- tenant_name = env.get(hdr_server_tenant(), 'default-project')
- parent_uuids = None
- back_ref_uuids = None
- obj_uuids = None
- if (('parent_fq_name_str' in request.query) and
- ('parent_type' in request.query)):
- parent_fq_name = request.query.parent_fq_name_str.split(':')
- parent_type = request.query.parent_type
- parent_uuids = [self._db_conn.fq_name_to_uuid(parent_type, parent_fq_name)]
- elif 'parent_id' in request.query:
- parent_ids = request.query.parent_id.split(',')
- parent_uuids = [str(uuid.UUID(p_uuid)) for p_uuid in parent_ids]
- if 'back_ref_id' in request.query:
- back_ref_ids = request.query.back_ref_id.split(',')
- back_ref_uuids = [str(uuid.UUID(b_uuid)) for b_uuid in back_ref_ids]
- if 'obj_uuids' in request.query:
- obj_uuids = request.query.obj_uuids.split(',')
-
- # common handling for all resource get
- (ok, result) = self._get_common(request, parent_uuids)
- if not ok:
- (code, msg) = result
- self.config_object_error(None, None, 'projects', 'http_get_collection', msg)
- abort(code, msg)
-
- if 'count' in request.query:
- count = 'true' in request.query.count.lower()
- else:
- count = False
-
- filter_params = request.query.filters
- if filter_params:
- try:
- ff_key_vals = filter_params.split(',')
- ff_names = [ff.split('==')[0] for ff in ff_key_vals]
- ff_values = [ff.split('==')[1] for ff in ff_key_vals]
- filters = {'field_names': ff_names, 'field_values': ff_values}
- except Exception as e:
- abort(400, 'Invalid filter ' + filter_params)
- else:
- filters = None
- db_conn = self._db_conn
- (ok, result) = \
- db_conn.dbe_list('project', parent_uuids, back_ref_uuids, obj_uuids, count, filters)
- if not ok:
- self.config_object_error(None, None, 'projects', 'http_get_collection', result)
- abort(404, result)
-
- # If only counting, return early
- if count:
- return {'projects': {'count': result}}
-
- if 'detail' in request.query:
- detail = 'true' in request.query.detail.lower()
- else:
- detail = False
-
- fq_names_uuids = result
- obj_dicts = []
- if not detail:
- if not self.is_admin_request():
- obj_ids_list = [{'uuid': obj_uuid} for _, obj_uuid in fq_names_uuids]
- obj_fields = [u'id_perms']
- (ok, result) = db_conn.dbe_read_multi('project', obj_ids_list, obj_fields)
- if not ok:
- result = []
- for obj_result in result:
- # give chance for zk heartbeat/ping
- gevent.sleep(0)
- if obj_result['id_perms'].get('user_visible', True):
- obj_dict = {}
- obj_dict['uuid'] = obj_result['uuid']
- obj_dict['href'] = self.generate_url('project', obj_result['uuid'])
- obj_dict['fq_name'] = obj_result['fq_name']
- obj_dicts.append(obj_dict)
- else:
- for fq_name, obj_uuid in fq_names_uuids:
- # give chance for zk heartbeat/ping
- gevent.sleep(0)
- obj_dict = {}
- obj_dict['uuid'] = obj_uuid
- obj_dict['href'] = self.generate_url('project', obj_uuid)
- obj_dict['fq_name'] = fq_name
- obj_dicts.append(obj_dict)
- else: #detail
- obj_ids_list = [{'uuid': obj_uuid} for _, obj_uuid in fq_names_uuids]
- obj_fields = [u'quota', u'id_perms', u'display_name'] + [u'namespace_refs', u'floating_ip_pool_refs']
- if 'fields' in request.query:
- obj_fields.extend(request.query.fields.split(','))
- (ok, result) = db_conn.dbe_read_multi('project', obj_ids_list, obj_fields)
-
- if not ok:
- result = []
-
- for obj_result in result:
- # give chance for zk heartbeat/ping
- gevent.sleep(0)
- obj_dict = {}
- obj_dict['name'] = obj_result['fq_name'][-1]
- obj_dict['href'] = self.generate_url('project', obj_result['uuid'])
- obj_dict.update(obj_result)
- if (obj_dict['id_perms'].get('user_visible', True) or
- self.is_admin_request()):
- obj_dicts.append({'project': obj_dict})
-
- return {'projects': obj_dicts}
- #end projects_http_get
-
- def _project_create_default_children(self, parent_obj):
- # Create a default child only if provisioned for
- r_class = self.get_resource_class('security-group')
- if r_class and r_class.generate_default_instance:
- child_obj = SecurityGroup(parent_obj = parent_obj)
- child_dict = child_obj.__dict__
- fq_name = child_dict['fq_name']
- child_dict['id_perms'] = self._get_default_id_perms('security-group')
-
- db_conn = self._db_conn
- (ok, result) = db_conn.dbe_alloc('security-group', child_dict)
- if not ok:
- return (ok, result)
-
- obj_ids = result
- db_conn.dbe_create('security-group', obj_ids, child_dict)
- self._security_group_create_default_children(child_obj)
-
- # Create a default child only if provisioned for
- r_class = self.get_resource_class('virtual-network')
- if r_class and r_class.generate_default_instance:
- child_obj = VirtualNetwork(parent_obj = parent_obj)
- child_dict = child_obj.__dict__
- fq_name = child_dict['fq_name']
- child_dict['id_perms'] = self._get_default_id_perms('virtual-network')
-
- db_conn = self._db_conn
- (ok, result) = db_conn.dbe_alloc('virtual-network', child_dict)
- if not ok:
- return (ok, result)
-
- obj_ids = result
- db_conn.dbe_create('virtual-network', obj_ids, child_dict)
- self._virtual_network_create_default_children(child_obj)
-
- # Create a default child only if provisioned for
- r_class = self.get_resource_class('qos-queue')
- if r_class and r_class.generate_default_instance:
- child_obj = QosQueue(parent_obj = parent_obj)
- child_dict = child_obj.__dict__
- fq_name = child_dict['fq_name']
- child_dict['id_perms'] = self._get_default_id_perms('qos-queue')
-
- db_conn = self._db_conn
- (ok, result) = db_conn.dbe_alloc('qos-queue', child_dict)
- if not ok:
- return (ok, result)
-
- obj_ids = result
- db_conn.dbe_create('qos-queue', obj_ids, child_dict)
- self._qos_queue_create_default_children(child_obj)
-
- # Create a default child only if provisioned for
- r_class = self.get_resource_class('qos-forwarding-class')
- if r_class and r_class.generate_default_instance:
- child_obj = QosForwardingClass(parent_obj = parent_obj)
- child_dict = child_obj.__dict__
- fq_name = child_dict['fq_name']
- child_dict['id_perms'] = self._get_default_id_perms('qos-forwarding-class')
-
- db_conn = self._db_conn
- (ok, result) = db_conn.dbe_alloc('qos-forwarding-class', child_dict)
- if not ok:
- return (ok, result)
-
- obj_ids = result
- db_conn.dbe_create('qos-forwarding-class', obj_ids, child_dict)
- self._qos_forwarding_class_create_default_children(child_obj)
-
- # Create a default child only if provisioned for
- r_class = self.get_resource_class('network-ipam')
- if r_class and r_class.generate_default_instance:
- child_obj = NetworkIpam(parent_obj = parent_obj)
- child_dict = child_obj.__dict__
- fq_name = child_dict['fq_name']
- child_dict['id_perms'] = self._get_default_id_perms('network-ipam')
-
- db_conn = self._db_conn
- (ok, result) = db_conn.dbe_alloc('network-ipam', child_dict)
- if not ok:
- return (ok, result)
-
- obj_ids = result
- db_conn.dbe_create('network-ipam', obj_ids, child_dict)
- self._network_ipam_create_default_children(child_obj)
-
- # Create a default child only if provisioned for
- r_class = self.get_resource_class('network-policy')
- if r_class and r_class.generate_default_instance:
- child_obj = NetworkPolicy(parent_obj = parent_obj)
- child_dict = child_obj.__dict__
- fq_name = child_dict['fq_name']
- child_dict['id_perms'] = self._get_default_id_perms('network-policy')
-
- db_conn = self._db_conn
- (ok, result) = db_conn.dbe_alloc('network-policy', child_dict)
- if not ok:
- return (ok, result)
-
- obj_ids = result
- db_conn.dbe_create('network-policy', obj_ids, child_dict)
- self._network_policy_create_default_children(child_obj)
-
- # Create a default child only if provisioned for
- r_class = self.get_resource_class('virtual-machine-interface')
- if r_class and r_class.generate_default_instance:
- child_obj = VirtualMachineInterface(parent_obj = parent_obj)
- child_dict = child_obj.__dict__
- fq_name = child_dict['fq_name']
- child_dict['id_perms'] = self._get_default_id_perms('virtual-machine-interface')
-
- db_conn = self._db_conn
- (ok, result) = db_conn.dbe_alloc('virtual-machine-interface', child_dict)
- if not ok:
- return (ok, result)
-
- obj_ids = result
- db_conn.dbe_create('virtual-machine-interface', obj_ids, child_dict)
- self._virtual_machine_interface_create_default_children(child_obj)
-
- # Create a default child only if provisioned for
- r_class = self.get_resource_class('service-instance')
- if r_class and r_class.generate_default_instance:
- child_obj = ServiceInstance(parent_obj = parent_obj)
- child_dict = child_obj.__dict__
- fq_name = child_dict['fq_name']
- child_dict['id_perms'] = self._get_default_id_perms('service-instance')
-
- db_conn = self._db_conn
- (ok, result) = db_conn.dbe_alloc('service-instance', child_dict)
- if not ok:
- return (ok, result)
-
- obj_ids = result
- db_conn.dbe_create('service-instance', obj_ids, child_dict)
- self._service_instance_create_default_children(child_obj)
-
- # Create a default child only if provisioned for
- r_class = self.get_resource_class('route-table')
- if r_class and r_class.generate_default_instance:
- child_obj = RouteTable(parent_obj = parent_obj)
- child_dict = child_obj.__dict__
- fq_name = child_dict['fq_name']
- child_dict['id_perms'] = self._get_default_id_perms('route-table')
-
- db_conn = self._db_conn
- (ok, result) = db_conn.dbe_alloc('route-table', child_dict)
- if not ok:
- return (ok, result)
-
- obj_ids = result
- db_conn.dbe_create('route-table', obj_ids, child_dict)
- self._route_table_create_default_children(child_obj)
-
- # Create a default child only if provisioned for
- r_class = self.get_resource_class('interface-route-table')
- if r_class and r_class.generate_default_instance:
- child_obj = InterfaceRouteTable(parent_obj = parent_obj)
- child_dict = child_obj.__dict__
- fq_name = child_dict['fq_name']
- child_dict['id_perms'] = self._get_default_id_perms('interface-route-table')
-
- db_conn = self._db_conn
- (ok, result) = db_conn.dbe_alloc('interface-route-table', child_dict)
- if not ok:
- return (ok, result)
-
- obj_ids = result
- db_conn.dbe_create('interface-route-table', obj_ids, child_dict)
- self._interface_route_table_create_default_children(child_obj)
-
- # Create a default child only if provisioned for
- r_class = self.get_resource_class('logical-router')
- if r_class and r_class.generate_default_instance:
- child_obj = LogicalRouter(parent_obj = parent_obj)
- child_dict = child_obj.__dict__
- fq_name = child_dict['fq_name']
- child_dict['id_perms'] = self._get_default_id_perms('logical-router')
-
- db_conn = self._db_conn
- (ok, result) = db_conn.dbe_alloc('logical-router', child_dict)
- if not ok:
- return (ok, result)
-
- obj_ids = result
- db_conn.dbe_create('logical-router', obj_ids, child_dict)
- self._logical_router_create_default_children(child_obj)
-
- # Create a default child only if provisioned for
- r_class = self.get_resource_class('loadbalancer-pool')
- if r_class and r_class.generate_default_instance:
- child_obj = LoadbalancerPool(parent_obj = parent_obj)
- child_dict = child_obj.__dict__
- fq_name = child_dict['fq_name']
- child_dict['id_perms'] = self._get_default_id_perms('loadbalancer-pool')
-
- db_conn = self._db_conn
- (ok, result) = db_conn.dbe_alloc('loadbalancer-pool', child_dict)
- if not ok:
- return (ok, result)
-
- obj_ids = result
- db_conn.dbe_create('loadbalancer-pool', obj_ids, child_dict)
- self._loadbalancer_pool_create_default_children(child_obj)
-
- # Create a default child only if provisioned for
- r_class = self.get_resource_class('loadbalancer-healthmonitor')
- if r_class and r_class.generate_default_instance:
- child_obj = LoadbalancerHealthmonitor(parent_obj = parent_obj)
- child_dict = child_obj.__dict__
- fq_name = child_dict['fq_name']
- child_dict['id_perms'] = self._get_default_id_perms('loadbalancer-healthmonitor')
-
- db_conn = self._db_conn
- (ok, result) = db_conn.dbe_alloc('loadbalancer-healthmonitor', child_dict)
- if not ok:
- return (ok, result)
-
- obj_ids = result
- db_conn.dbe_create('loadbalancer-healthmonitor', obj_ids, child_dict)
- self._loadbalancer_healthmonitor_create_default_children(child_obj)
-
- # Create a default child only if provisioned for
- r_class = self.get_resource_class('virtual-ip')
- if r_class and r_class.generate_default_instance:
- child_obj = VirtualIp(parent_obj = parent_obj)
- child_dict = child_obj.__dict__
- fq_name = child_dict['fq_name']
- child_dict['id_perms'] = self._get_default_id_perms('virtual-ip')
-
- db_conn = self._db_conn
- (ok, result) = db_conn.dbe_alloc('virtual-ip', child_dict)
- if not ok:
- return (ok, result)
-
- obj_ids = result
- db_conn.dbe_create('virtual-ip', obj_ids, child_dict)
- self._virtual_ip_create_default_children(child_obj)
-
- pass
- #end _project_create_default_children
-
- def _project_delete_default_children(self, parent_dict):
- # Delete a default child only if provisioned for
- r_class = self.get_resource_class('virtual-ip')
- if r_class and r_class.generate_default_instance:
- # first locate default child then delete it
- has_infos = parent_dict.get('security_groups')
- if has_infos:
- for has_info in has_infos:
- if has_info['to'][-1] == 'default-security-group':
- default_child_id = has_info['href'].split('/')[-1]
- self.security_group_http_delete(default_child_id)
- break
-
- # Delete a default child only if provisioned for
- r_class = self.get_resource_class('virtual-ip')
- if r_class and r_class.generate_default_instance:
- # first locate default child then delete it
- has_infos = parent_dict.get('virtual_networks')
- if has_infos:
- for has_info in has_infos:
- if has_info['to'][-1] == 'default-virtual-network':
- default_child_id = has_info['href'].split('/')[-1]
- self.virtual_network_http_delete(default_child_id)
- break
-
- # Delete a default child only if provisioned for
- r_class = self.get_resource_class('virtual-ip')
- if r_class and r_class.generate_default_instance:
- # first locate default child then delete it
- has_infos = parent_dict.get('qos_queues')
- if has_infos:
- for has_info in has_infos:
- if has_info['to'][-1] == 'default-qos-queue':
- default_child_id = has_info['href'].split('/')[-1]
- self.qos_queue_http_delete(default_child_id)
- break
-
- # Delete a default child only if provisioned for
- r_class = self.get_resource_class('virtual-ip')
- if r_class and r_class.generate_default_instance:
- # first locate default child then delete it
- has_infos = parent_dict.get('qos_forwarding_classs')
- if has_infos:
- for has_info in has_infos:
- if has_info['to'][-1] == 'default-qos-forwarding-class':
- default_child_id = has_info['href'].split('/')[-1]
- self.qos_forwarding_class_http_delete(default_child_id)
- break
-
- # Delete a default child only if provisioned for
- r_class = self.get_resource_class('virtual-ip')
- if r_class and r_class.generate_default_instance:
- # first locate default child then delete it
- has_infos = parent_dict.get('network_ipams')
- if has_infos:
- for has_info in has_infos:
- if has_info['to'][-1] == 'default-network-ipam':
- default_child_id = has_info['href'].split('/')[-1]
- self.network_ipam_http_delete(default_child_id)
- break
-
- # Delete a default child only if provisioned for
- r_class = self.get_resource_class('virtual-ip')
- if r_class and r_class.generate_default_instance:
- # first locate default child then delete it
- has_infos = parent_dict.get('network_policys')
- if has_infos:
- for has_info in has_infos:
- if has_info['to'][-1] == 'default-network-policy':
- default_child_id = has_info['href'].split('/')[-1]
- self.network_policy_http_delete(default_child_id)
- break
-
- # Delete a default child only if provisioned for
- r_class = self.get_resource_class('virtual-ip')
- if r_class and r_class.generate_default_instance:
- # first locate default child then delete it
- has_infos = parent_dict.get('virtual_machine_interfaces')
- if has_infos:
- for has_info in has_infos:
- if has_info['to'][-1] == 'default-virtual-machine-interface':
- default_child_id = has_info['href'].split('/')[-1]
- self.virtual_machine_interface_http_delete(default_child_id)
- break
-
- # Delete a default child only if provisioned for
- r_class = self.get_resource_class('virtual-ip')
- if r_class and r_class.generate_default_instance:
- # first locate default child then delete it
- has_infos = parent_dict.get('service_instances')
- if has_infos:
- for has_info in has_infos:
- if has_info['to'][-1] == 'default-service-instance':
- default_child_id = has_info['href'].split('/')[-1]
- self.service_instance_http_delete(default_child_id)
- break
-
- # Delete a default child only if provisioned for
- r_class = self.get_resource_class('virtual-ip')
- if r_class and r_class.generate_default_instance:
- # first locate default child then delete it
- has_infos = parent_dict.get('route_tables')
- if has_infos:
- for has_info in has_infos:
- if has_info['to'][-1] == 'default-route-table':
- default_child_id = has_info['href'].split('/')[-1]
- self.route_table_http_delete(default_child_id)
- break
-
- # Delete a default child only if provisioned for
- r_class = self.get_resource_class('virtual-ip')
- if r_class and r_class.generate_default_instance:
- # first locate default child then delete it
- has_infos = parent_dict.get('interface_route_tables')
- if has_infos:
- for has_info in has_infos:
- if has_info['to'][-1] == 'default-interface-route-table':
- default_child_id = has_info['href'].split('/')[-1]
- self.interface_route_table_http_delete(default_child_id)
- break
-
- # Delete a default child only if provisioned for
- r_class = self.get_resource_class('virtual-ip')
- if r_class and r_class.generate_default_instance:
- # first locate default child then delete it
- has_infos = parent_dict.get('logical_routers')
- if has_infos:
- for has_info in has_infos:
- if has_info['to'][-1] == 'default-logical-router':
- default_child_id = has_info['href'].split('/')[-1]
- self.logical_router_http_delete(default_child_id)
- break
-
- # Delete a default child only if provisioned for
- r_class = self.get_resource_class('virtual-ip')
- if r_class and r_class.generate_default_instance:
- # first locate default child then delete it
- has_infos = parent_dict.get('loadbalancer_pools')
- if has_infos:
- for has_info in has_infos:
- if has_info['to'][-1] == 'default-loadbalancer-pool':
- default_child_id = has_info['href'].split('/')[-1]
- self.loadbalancer_pool_http_delete(default_child_id)
- break
-
- # Delete a default child only if provisioned for
- r_class = self.get_resource_class('virtual-ip')
- if r_class and r_class.generate_default_instance:
- # first locate default child then delete it
- has_infos = parent_dict.get('loadbalancer_healthmonitors')
- if has_infos:
- for has_info in has_infos:
- if has_info['to'][-1] == 'default-loadbalancer-healthmonitor':
- default_child_id = has_info['href'].split('/')[-1]
- self.loadbalancer_healthmonitor_http_delete(default_child_id)
- break
-
- # Delete a default child only if provisioned for
- r_class = self.get_resource_class('virtual-ip')
- if r_class and r_class.generate_default_instance:
- # first locate default child then delete it
- has_infos = parent_dict.get('virtual_ips')
- if has_infos:
- for has_info in has_infos:
- if has_info['to'][-1] == 'default-virtual-ip':
- default_child_id = has_info['href'].split('/')[-1]
- self.virtual_ip_http_delete(default_child_id)
- break
-
- pass
- #end _project_delete_default_children
-
- def qos_forwarding_class_http_get(self, id):
- try:
- self._extension_mgrs['resourceApi'].map_method('pre_qos_forwarding_class_read', id)
- except Exception as e:
- pass
-
- # TODO get vals from request out of the global ASAP
- etag = request.headers.get('If-None-Match')
- try:
- obj_type = self._db_conn.uuid_to_obj_type(id)
- except NoIdError:
- obj_type = None
- if obj_type != 'qos_forwarding_class':
- abort(404, 'No qos-forwarding-class object found for id %s' %(id))
- # common handling for all resource get
- (ok, result) = self._get_common(request, id)
- if not ok:
- (code, msg) = result
- self.config_object_error(id, None, 'qos_forwarding_class', 'http_get', msg)
- abort(code, msg)
-
- # type-specific hook
- r_class = self.get_resource_class('qos-forwarding-class')
- if r_class:
- r_class.http_get(id)
-
- db_conn = self._db_conn
- if etag:
- obj_ids = {'uuid': id}
- (ok, result) = db_conn.dbe_is_latest(obj_ids, etag.replace('"', ''))
- if not ok:
- # Not present in DB
- self.config_object_error(id, None, 'qos_forwarding_class', 'http_get', result)
- abort(404, result)
-
- is_latest = result
- if is_latest:
- # send Not-Modified, caches use this for read optimization
- response.status = 304
- return
- #end if etag
-
- obj_ids = {'uuid': id}
-
- # Generate field list for db layer
- properties = [u'dscp', u'trusted', u'id_perms', u'display_name']
- references = [u'qos_queue_refs']
- back_references = [u'project_back_refs', u'virtual_network_back_refs', 'virtual_machine_interface_back_refs']
- children = []
- if 'fields' in request.query:
- obj_fields = request.query.fields.split(',')
- else: # default props + children + refs + backrefs
- obj_fields = properties + references
- if 'exclude_back_refs' not in request.query:
- obj_fields = obj_fields + back_references
- if 'exclude_children' not in request.query:
- obj_fields = obj_fields + children
-
- (ok, result) = db_conn.dbe_read('qos-forwarding-class', obj_ids, obj_fields)
- if not ok:
- # Not present in DB
- self.config_object_error(id, None, 'qos_forwarding_class', 'http_get', result)
- abort(404, result)
-
- # check visibility
- if (not result['id_perms'].get('user_visible', True) and
- not self.is_admin_request()):
- result = 'This object is not visible by users: %s' % id
- self.config_object_error(id, None, 'qos_forwarding_class', 'http_get', result)
- abort(404, result)
-
- rsp_body = {}
- rsp_body['uuid'] = id
- rsp_body['href'] = self.generate_url('qos-forwarding-class', id)
- rsp_body['name'] = result['fq_name'][-1]
- rsp_body.update(result)
- id_perms = result['id_perms']
- response.set_header('ETag', '"' + id_perms['last_modified'] + '"')
- try:
- self._extension_mgrs['resourceApi'].map_method('post_qos_forwarding_class_read', id, rsp_body)
- except Exception as e:
- pass
-
- return {'qos-forwarding-class': rsp_body}
- #end qos_forwarding_class_http_get
-
- def qos_forwarding_class_http_put(self, id):
- key = 'qos-forwarding-class'
- obj_dict = request.json[key]
-
- try:
- self._extension_mgrs['resourceApi'].map_method('pre_qos_forwarding_class_update', id, obj_dict)
- except Exception as e:
- pass
-
- db_conn = self._db_conn
- try:
- obj_type = db_conn.uuid_to_obj_type(id)
- if obj_type != 'qos_forwarding_class':
- abort(404, 'No qos-forwarding-class object found for id %s' %(id))
- fq_name = db_conn.uuid_to_fq_name(id)
- except NoIdError as e:
- abort(404, str(e))
- prop_dict = obj_dict.get('id_perms')
- if prop_dict:
- buf = cStringIO.StringIO()
- xx_id_perms = IdPermsType(**prop_dict)
- xx_id_perms.export(buf)
- node = etree.fromstring(buf.getvalue())
- xx_id_perms = IdPermsType()
- try:
- xx_id_perms.build(node)
- except Exception as e:
- abort(400, str(e))
- # common handling for all resource put
- (ok, result) = self._put_common(request, 'qos_forwarding_class', id, fq_name, obj_dict)
- if not ok:
- (code, msg) = result
- self.config_object_error(id, None, 'qos_forwarding_class', 'http_put', msg)
- abort(code, msg)
-
- # Validate perms
- objtype_list = [u'qos_queue']
- for obj_type in objtype_list:
- refs = obj_dict.get('%s_refs'%(obj_type), None)
- if refs:
- for ref in refs:
- try:
- ref_uuid = db_conn.fq_name_to_uuid(obj_type, ref['to'])
- except NoIdError as e:
- abort(404, str(e))
- (ok, status) = self._permissions.check_perms_link(request, ref_uuid)
- if not ok:
- (code, err_msg) = status
- abort(code, err_msg)
-
- # State modification starts from here. Ensure that cleanup is done for all state changes
- cleanup_on_failure = []
- # type-specific hook
- r_class = self.get_resource_class('qos-forwarding-class')
- if r_class:
- (ok, put_result) = r_class.http_put(id, fq_name, obj_dict, self._db_conn)
- if not ok:
- (code, msg) = put_result
- self.config_object_error(id, None, 'qos_forwarding_class', 'http_put', msg)
- abort(code, msg)
- callable = getattr(r_class, 'http_put_fail', None)
- if callable:
- cleanup_on_failure.append((callable, [id, fq_name, obj_dict, self._db_conn]))
-
- obj_ids = {'uuid': id}
- try:
- (ok, result) = db_conn.dbe_update('qos-forwarding-class', obj_ids, obj_dict)
- except Exception as e:
- ok = False
- result = str(e)
- if not ok:
- for fail_cleanup_callable, cleanup_args in cleanup_on_failure:
- fail_cleanup_callable(*cleanup_args)
- self.config_object_error(id, None, 'qos_forwarding_class', 'http_put', result)
- abort(404, result)
-
- rsp_body = {}
- rsp_body['uuid'] = id
- rsp_body['href'] = self.generate_url('qos-forwarding-class', id)
-
- try:
- self._extension_mgrs['resourceApi'].map_method('post_qos_forwarding_class_update', id, obj_dict)
- except Exception as e:
- pass
-
- return {'qos-forwarding-class': rsp_body}
- #end qos_forwarding_class_http_put
-
- def qos_forwarding_class_http_delete(self, id):
- db_conn = self._db_conn
- # if obj doesn't exist return early
- try:
- obj_type = db_conn.uuid_to_obj_type(id)
- if obj_type != 'qos_forwarding_class':
- abort(404, 'No qos-forwarding-class object found for id %s' %(id))
- _ = db_conn.uuid_to_fq_name(id)
- except NoIdError:
- abort(404, 'ID %s does not exist' %(id))
-
- try:
- self._extension_mgrs['resourceApi'].map_method('pre_qos_forwarding_class_delete', id)
- except Exception as e:
- pass
-
- # read in obj from db (accepting error) to get details of it
- obj_ids = {'uuid': id}
- back_references = [u'project_back_refs', u'virtual_network_back_refs', 'virtual_machine_interface_back_refs']
- children = []
- obj_fields = children + back_references
- (read_ok, read_result) = db_conn.dbe_read('qos-forwarding-class', obj_ids, obj_fields)
- if not read_ok:
- if read_result.startswith('Unknown id:'):
- abort(404, 'ID %s does not exist' %(id))
- else:
- self.config_object_error(id, None, 'qos_forwarding_class', 'http_delete', read_result)
- # proceed down to delete the resource
-
- # common handling for all resource delete
- parent_type = read_result.get('parent_type', None)
- (ok, del_result) = self._delete_common(request, 'qos_forwarding_class', id, parent_type)
- if not ok:
- (code, msg) = del_result
- self.config_object_error(id, None, 'qos_forwarding_class', 'http_delete', msg)
- abort(code, msg)
-
- fq_name = read_result['fq_name']
- ifmap_id = cfgm_common.imid.get_ifmap_id_from_fq_name('qos-forwarding-class', fq_name)
- obj_ids['imid'] = ifmap_id
- if parent_type:
- parent_imid = cfgm_common.imid.get_ifmap_id_from_fq_name(parent_type, fq_name[:-1])
- obj_ids['parent_imid'] = parent_imid
-
- # State modification starts from here. Ensure that cleanup is done for all state changes
- cleanup_on_failure = []
-
- # type-specific hook
- r_class = self.get_resource_class('qos-forwarding-class')
- if r_class:
- if read_ok:
- # fail if non-default children or backrefs exist
- virtual_network_back_refs = read_result.get('virtual_network_back_refs', None)
- if virtual_network_back_refs:
- back_ref_urls = [back_ref_info['href'] for back_ref_info in read_result['virtual_network_back_refs']]
- back_ref_str = ', '.join(back_ref_urls)
- err_msg = 'Back-References from ' + back_ref_str + ' still exist'
- self.config_object_error(id, None, 'qos_forwarding_class', 'http_delete', err_msg)
- abort(409, err_msg)
-
- virtual_machine_interface_back_refs = read_result.get('virtual_machine_interface_back_refs', None)
- if virtual_machine_interface_back_refs:
- back_ref_urls = [back_ref_info['href'] for back_ref_info in read_result['virtual_machine_interface_back_refs']]
- back_ref_str = ', '.join(back_ref_urls)
- err_msg = 'Back-References from ' + back_ref_str + ' still exist'
- self.config_object_error(id, None, 'qos_forwarding_class', 'http_delete', err_msg)
- abort(409, err_msg)
-
-
- # Delete default children first
- self._qos_forwarding_class_delete_default_children(read_result)
-
- (ok, del_result) = r_class.http_delete(id, read_result, db_conn)
- if not ok:
- (code, msg) = del_result
- self.config_object_error(id, None, 'qos_forwarding_class', 'http_delete', msg)
- abort(code, msg)
- callable = getattr(r_class, 'http_delete_fail', None)
- if callable:
- cleanup_on_failure.append((callable, [id, read_result, db_conn]))
- #end if read_ok
-
- try:
- (ok, del_result) = db_conn.dbe_delete('qos-forwarding-class', obj_ids, read_result)
- except Exception as e:
- ok = False
- del_result = str(e)
- if not ok:
- for fail_cleanup_callable, cleanup_args in cleanup_on_failure:
- fail_cleanup_callable(*cleanup_args)
- self.config_object_error(id, None, 'qos_forwarding_class', 'http_delete', del_result)
- abort(409, del_result)
-
- try:
- self._extension_mgrs['resourceApi'].map_method('post_qos_forwarding_class_delete', id, read_result)
- except Exception as e:
- pass
-
- #end qos_forwarding_class_http_delete
-
- def qos_forwarding_classs_http_post(self):
- key = 'qos-forwarding-class'
- obj_dict = request.json[key]
- self._post_validate(key, obj_dict=obj_dict)
- fq_name = obj_dict['fq_name']
-
- try:
- self._extension_mgrs['resourceApi'].map_method('pre_qos_forwarding_class_create', obj_dict)
- except Exception as e:
- pass
-
- prop_dict = obj_dict.get('id_perms')
- if prop_dict:
- buf = cStringIO.StringIO()
- xx_id_perms = IdPermsType(**prop_dict)
- xx_id_perms.export(buf)
- node = etree.fromstring(buf.getvalue())
- xx_id_perms = IdPermsType()
- try:
- xx_id_perms.build(node)
- except Exception as e:
- abort(400, str(e))
- # common handling for all resource create
- (ok, result) = self._post_common(request, 'qos-forwarding-class', obj_dict)
- if not ok:
- (code, msg) = result
- fq_name_str = ':'.join(obj_dict.get('fq_name', []))
- self.config_object_error(None, fq_name_str, 'qos_forwarding_class', 'http_post', msg)
- abort(code, msg)
-
- name = obj_dict['fq_name'][-1]
- fq_name = obj_dict['fq_name']
-
- db_conn = self._db_conn
-
- # if client gave parent_type of config-root, ignore and remove
- if 'parent_type' in obj_dict and obj_dict['parent_type'] == 'config-root':
- del obj_dict['parent_type']
-
- if 'parent_type' in obj_dict:
- # non config-root child, verify parent exists
- parent_type = obj_dict['parent_type']
- parent_fq_name = obj_dict['fq_name'][:-1]
- try:
- parent_uuid = self._db_conn.fq_name_to_uuid(parent_type, parent_fq_name)
- (ok, status) = self._permissions.check_perms_write(request, parent_uuid)
- if not ok:
- (code, err_msg) = status
- abort(code, err_msg)
- self._permissions.set_user_role(request, obj_dict)
- except NoIdError:
- err_msg = 'Parent ' + pformat(parent_fq_name) + ' type ' + parent_type + ' does not exist'
- fq_name_str = ':'.join(parent_fq_name)
- self.config_object_error(None, fq_name_str, 'qos_forwarding_class', 'http_post', err_msg)
- abort(400, err_msg)
-
- # Validate perms
- objtype_list = [u'qos_queue']
- for obj_type in objtype_list:
- refs = obj_dict.get('%s_refs'%(obj_type), None)
- if refs:
- for ref in refs:
- ref_uuid = db_conn.fq_name_to_uuid(obj_type, ref['to'])
- (ok, status) = self._permissions.check_perms_link(request, ref_uuid)
- if not ok:
- (code, err_msg) = status
- abort(code, err_msg)
-
- # State modification starts from here. Ensure that cleanup is done for all state changes
- cleanup_on_failure = []
- # Alloc and Store id-mappings before creating entry on pubsub store.
- # Else a subscriber can ask for an id mapping before we have stored it
- uuid_requested = result
- (ok, result) = db_conn.dbe_alloc('qos-forwarding-class', obj_dict, uuid_requested)
- if not ok:
- (code, msg) = result
- fq_name_str = ':'.join(obj_dict['fq_name'])
- self.config_object_error(None, fq_name_str, 'qos_forwarding_class', 'http_post', result)
- abort(code, msg)
- cleanup_on_failure.append((db_conn.dbe_release, ['qos_forwarding_class', fq_name]))
-
- obj_ids = result
-
- env = request.headers.environ
- tenant_name = env.get(hdr_server_tenant(), 'default-project')
-
-
- # type-specific hook
- r_class = self.get_resource_class('qos-forwarding-class')
- if r_class:
- try:
- (ok, result) = r_class.http_post_collection(tenant_name, obj_dict, db_conn)
- except Exception as e:
- ok = False
- result = (500, str(e))
- if not ok:
- for fail_cleanup_callable, cleanup_args in cleanup_on_failure:
- fail_cleanup_callable(*cleanup_args)
- (code, msg) = result
- fq_name_str = ':'.join(fq_name)
- self.config_object_error(None, fq_name_str, 'qos_forwarding_class', 'http_post', msg)
- abort(code, msg)
- callable = getattr(r_class, 'http_post_collection_fail', None)
- if callable:
- cleanup_on_failure.append((callable, [tenant_name, obj_dict, db_conn]))
-
- try:
- (ok, result) = \
- db_conn.dbe_create('qos-forwarding-class', obj_ids, obj_dict)
- except Exception as e:
- ok = False
- result = str(e)
-
- if not ok:
- for fail_cleanup_callable, cleanup_args in cleanup_on_failure:
- fail_cleanup_callable(*cleanup_args)
- fq_name_str = ':'.join(fq_name)
- self.config_object_error(None, fq_name_str, 'qos_forwarding_class', 'http_post', result)
- abort(404, result)
-
- rsp_body = {}
- rsp_body['name'] = name
- rsp_body['fq_name'] = fq_name
- rsp_body['uuid'] = obj_ids['uuid']
- rsp_body['href'] = self.generate_url('qos-forwarding-class', obj_ids['uuid'])
- if 'parent_type' in obj_dict:
- # non config-root child, send back parent uuid/href
- rsp_body['parent_uuid'] = parent_uuid
- rsp_body['parent_href'] = self.generate_url(parent_type, parent_uuid)
-
- try:
- self._extension_mgrs['resourceApi'].map_method('post_qos_forwarding_class_create', obj_dict)
- except Exception as e:
- pass
-
- return {'qos-forwarding-class': rsp_body}
- #end qos_forwarding_classs_http_post
-
- def qos_forwarding_classs_http_get(self):
- # gather list of uuids using 1. any specified anchors
- # 2. any specified filters
- # if not 'detail' return list with any specified 'fields'
- # if 'detail' return list with props+refs + any specified 'fields'
-
- env = request.headers.environ
- tenant_name = env.get(hdr_server_tenant(), 'default-project')
- parent_uuids = None
- back_ref_uuids = None
- obj_uuids = None
- if (('parent_fq_name_str' in request.query) and
- ('parent_type' in request.query)):
- parent_fq_name = request.query.parent_fq_name_str.split(':')
- parent_type = request.query.parent_type
- parent_uuids = [self._db_conn.fq_name_to_uuid(parent_type, parent_fq_name)]
- elif 'parent_id' in request.query:
- parent_ids = request.query.parent_id.split(',')
- parent_uuids = [str(uuid.UUID(p_uuid)) for p_uuid in parent_ids]
- if 'back_ref_id' in request.query:
- back_ref_ids = request.query.back_ref_id.split(',')
- back_ref_uuids = [str(uuid.UUID(b_uuid)) for b_uuid in back_ref_ids]
- if 'obj_uuids' in request.query:
- obj_uuids = request.query.obj_uuids.split(',')
-
- # common handling for all resource get
- (ok, result) = self._get_common(request, parent_uuids)
- if not ok:
- (code, msg) = result
- self.config_object_error(None, None, 'qos_forwarding_classs', 'http_get_collection', msg)
- abort(code, msg)
-
- if 'count' in request.query:
- count = 'true' in request.query.count.lower()
- else:
- count = False
-
- filter_params = request.query.filters
- if filter_params:
- try:
- ff_key_vals = filter_params.split(',')
- ff_names = [ff.split('==')[0] for ff in ff_key_vals]
- ff_values = [ff.split('==')[1] for ff in ff_key_vals]
- filters = {'field_names': ff_names, 'field_values': ff_values}
- except Exception as e:
- abort(400, 'Invalid filter ' + filter_params)
- else:
- filters = None
- db_conn = self._db_conn
- (ok, result) = \
- db_conn.dbe_list('qos-forwarding-class', parent_uuids, back_ref_uuids, obj_uuids, count, filters)
- if not ok:
- self.config_object_error(None, None, 'qos_forwarding_classs', 'http_get_collection', result)
- abort(404, result)
-
- # If only counting, return early
- if count:
- return {'qos-forwarding-classs': {'count': result}}
-
- if 'detail' in request.query:
- detail = 'true' in request.query.detail.lower()
- else:
- detail = False
-
- fq_names_uuids = result
- obj_dicts = []
- if not detail:
- if not self.is_admin_request():
- obj_ids_list = [{'uuid': obj_uuid} for _, obj_uuid in fq_names_uuids]
- obj_fields = [u'id_perms']
- (ok, result) = db_conn.dbe_read_multi('qos-forwarding-class', obj_ids_list, obj_fields)
- if not ok:
- result = []
- for obj_result in result:
- # give chance for zk heartbeat/ping
- gevent.sleep(0)
- if obj_result['id_perms'].get('user_visible', True):
- obj_dict = {}
- obj_dict['uuid'] = obj_result['uuid']
- obj_dict['href'] = self.generate_url('qos-forwarding-class', obj_result['uuid'])
- obj_dict['fq_name'] = obj_result['fq_name']
- obj_dicts.append(obj_dict)
- else:
- for fq_name, obj_uuid in fq_names_uuids:
- # give chance for zk heartbeat/ping
- gevent.sleep(0)
- obj_dict = {}
- obj_dict['uuid'] = obj_uuid
- obj_dict['href'] = self.generate_url('qos-forwarding-class', obj_uuid)
- obj_dict['fq_name'] = fq_name
- obj_dicts.append(obj_dict)
- else: #detail
- obj_ids_list = [{'uuid': obj_uuid} for _, obj_uuid in fq_names_uuids]
- obj_fields = [u'dscp', u'trusted', u'id_perms', u'display_name'] + [u'qos_queue_refs']
- if 'fields' in request.query:
- obj_fields.extend(request.query.fields.split(','))
- (ok, result) = db_conn.dbe_read_multi('qos-forwarding-class', obj_ids_list, obj_fields)
-
- if not ok:
- result = []
-
- for obj_result in result:
- # give chance for zk heartbeat/ping
- gevent.sleep(0)
- obj_dict = {}
- obj_dict['name'] = obj_result['fq_name'][-1]
- obj_dict['href'] = self.generate_url('qos-forwarding-class', obj_result['uuid'])
- obj_dict.update(obj_result)
- if (obj_dict['id_perms'].get('user_visible', True) or
- self.is_admin_request()):
- obj_dicts.append({'qos-forwarding-class': obj_dict})
-
- return {'qos-forwarding-classs': obj_dicts}
- #end qos_forwarding_classs_http_get
-
- def _qos_forwarding_class_create_default_children(self, parent_obj):
- pass
- #end _qos_forwarding_class_create_default_children
-
- def _qos_forwarding_class_delete_default_children(self, parent_dict):
- pass
- #end _qos_forwarding_class_delete_default_children
-
- def database_node_http_get(self, id):
- try:
- self._extension_mgrs['resourceApi'].map_method('pre_database_node_read', id)
- except Exception as e:
- pass
-
- # TODO get vals from request out of the global ASAP
- etag = request.headers.get('If-None-Match')
- try:
- obj_type = self._db_conn.uuid_to_obj_type(id)
- except NoIdError:
- obj_type = None
- if obj_type != 'database_node':
- abort(404, 'No database-node object found for id %s' %(id))
- # common handling for all resource get
- (ok, result) = self._get_common(request, id)
- if not ok:
- (code, msg) = result
- self.config_object_error(id, None, 'database_node', 'http_get', msg)
- abort(code, msg)
-
- # type-specific hook
- r_class = self.get_resource_class('database-node')
- if r_class:
- r_class.http_get(id)
-
- db_conn = self._db_conn
- if etag:
- obj_ids = {'uuid': id}
- (ok, result) = db_conn.dbe_is_latest(obj_ids, etag.replace('"', ''))
- if not ok:
- # Not present in DB
- self.config_object_error(id, None, 'database_node', 'http_get', result)
- abort(404, result)
-
- is_latest = result
- if is_latest:
- # send Not-Modified, caches use this for read optimization
- response.status = 304
- return
- #end if etag
-
- obj_ids = {'uuid': id}
-
- # Generate field list for db layer
- properties = [u'database_node_ip_address', u'id_perms', u'display_name']
- references = []
- back_references = [u'global_system_config_back_refs']
- children = []
- if 'fields' in request.query:
- obj_fields = request.query.fields.split(',')
- else: # default props + children + refs + backrefs
- obj_fields = properties + references
- if 'exclude_back_refs' not in request.query:
- obj_fields = obj_fields + back_references
- if 'exclude_children' not in request.query:
- obj_fields = obj_fields + children
-
- (ok, result) = db_conn.dbe_read('database-node', obj_ids, obj_fields)
- if not ok:
- # Not present in DB
- self.config_object_error(id, None, 'database_node', 'http_get', result)
- abort(404, result)
-
- # check visibility
- if (not result['id_perms'].get('user_visible', True) and
- not self.is_admin_request()):
- result = 'This object is not visible by users: %s' % id
- self.config_object_error(id, None, 'database_node', 'http_get', result)
- abort(404, result)
-
- rsp_body = {}
- rsp_body['uuid'] = id
- rsp_body['href'] = self.generate_url('database-node', id)
- rsp_body['name'] = result['fq_name'][-1]
- rsp_body.update(result)
- id_perms = result['id_perms']
- response.set_header('ETag', '"' + id_perms['last_modified'] + '"')
- try:
- self._extension_mgrs['resourceApi'].map_method('post_database_node_read', id, rsp_body)
- except Exception as e:
- pass
-
- return {'database-node': rsp_body}
- #end database_node_http_get
-
- def database_node_http_put(self, id):
- key = 'database-node'
- obj_dict = request.json[key]
-
- try:
- self._extension_mgrs['resourceApi'].map_method('pre_database_node_update', id, obj_dict)
- except Exception as e:
- pass
-
- db_conn = self._db_conn
- try:
- obj_type = db_conn.uuid_to_obj_type(id)
- if obj_type != 'database_node':
- abort(404, 'No database-node object found for id %s' %(id))
- fq_name = db_conn.uuid_to_fq_name(id)
- except NoIdError as e:
- abort(404, str(e))
- prop_dict = obj_dict.get('id_perms')
- if prop_dict:
- buf = cStringIO.StringIO()
- xx_id_perms = IdPermsType(**prop_dict)
- xx_id_perms.export(buf)
- node = etree.fromstring(buf.getvalue())
- xx_id_perms = IdPermsType()
- try:
- xx_id_perms.build(node)
- except Exception as e:
- abort(400, str(e))
- # common handling for all resource put
- (ok, result) = self._put_common(request, 'database_node', id, fq_name, obj_dict)
- if not ok:
- (code, msg) = result
- self.config_object_error(id, None, 'database_node', 'http_put', msg)
- abort(code, msg)
-
-
- # State modification starts from here. Ensure that cleanup is done for all state changes
- cleanup_on_failure = []
- # type-specific hook
- r_class = self.get_resource_class('database-node')
- if r_class:
- (ok, put_result) = r_class.http_put(id, fq_name, obj_dict, self._db_conn)
- if not ok:
- (code, msg) = put_result
- self.config_object_error(id, None, 'database_node', 'http_put', msg)
- abort(code, msg)
- callable = getattr(r_class, 'http_put_fail', None)
- if callable:
- cleanup_on_failure.append((callable, [id, fq_name, obj_dict, self._db_conn]))
-
- obj_ids = {'uuid': id}
- try:
- (ok, result) = db_conn.dbe_update('database-node', obj_ids, obj_dict)
- except Exception as e:
- ok = False
- result = str(e)
- if not ok:
- for fail_cleanup_callable, cleanup_args in cleanup_on_failure:
- fail_cleanup_callable(*cleanup_args)
- self.config_object_error(id, None, 'database_node', 'http_put', result)
- abort(404, result)
-
- rsp_body = {}
- rsp_body['uuid'] = id
- rsp_body['href'] = self.generate_url('database-node', id)
-
- try:
- self._extension_mgrs['resourceApi'].map_method('post_database_node_update', id, obj_dict)
- except Exception as e:
- pass
-
- return {'database-node': rsp_body}
- #end database_node_http_put
-
- def database_node_http_delete(self, id):
- db_conn = self._db_conn
- # if obj doesn't exist return early
- try:
- obj_type = db_conn.uuid_to_obj_type(id)
- if obj_type != 'database_node':
- abort(404, 'No database-node object found for id %s' %(id))
- _ = db_conn.uuid_to_fq_name(id)
- except NoIdError:
- abort(404, 'ID %s does not exist' %(id))
-
- try:
- self._extension_mgrs['resourceApi'].map_method('pre_database_node_delete', id)
- except Exception as e:
- pass
-
- # read in obj from db (accepting error) to get details of it
- obj_ids = {'uuid': id}
- back_references = [u'global_system_config_back_refs']
- children = []
- obj_fields = children + back_references
- (read_ok, read_result) = db_conn.dbe_read('database-node', obj_ids, obj_fields)
- if not read_ok:
- if read_result.startswith('Unknown id:'):
- abort(404, 'ID %s does not exist' %(id))
- else:
- self.config_object_error(id, None, 'database_node', 'http_delete', read_result)
- # proceed down to delete the resource
-
- # common handling for all resource delete
- parent_type = read_result.get('parent_type', None)
- (ok, del_result) = self._delete_common(request, 'database_node', id, parent_type)
- if not ok:
- (code, msg) = del_result
- self.config_object_error(id, None, 'database_node', 'http_delete', msg)
- abort(code, msg)
-
- fq_name = read_result['fq_name']
- ifmap_id = cfgm_common.imid.get_ifmap_id_from_fq_name('database-node', fq_name)
- obj_ids['imid'] = ifmap_id
- if parent_type:
- parent_imid = cfgm_common.imid.get_ifmap_id_from_fq_name(parent_type, fq_name[:-1])
- obj_ids['parent_imid'] = parent_imid
-
- # State modification starts from here. Ensure that cleanup is done for all state changes
- cleanup_on_failure = []
-
- # type-specific hook
- r_class = self.get_resource_class('database-node')
- if r_class:
- if read_ok:
- # fail if non-default children or backrefs exist
-
- # Delete default children first
- self._database_node_delete_default_children(read_result)
-
- (ok, del_result) = r_class.http_delete(id, read_result, db_conn)
- if not ok:
- (code, msg) = del_result
- self.config_object_error(id, None, 'database_node', 'http_delete', msg)
- abort(code, msg)
- callable = getattr(r_class, 'http_delete_fail', None)
- if callable:
- cleanup_on_failure.append((callable, [id, read_result, db_conn]))
- #end if read_ok
-
- try:
- (ok, del_result) = db_conn.dbe_delete('database-node', obj_ids, read_result)
- except Exception as e:
- ok = False
- del_result = str(e)
- if not ok:
- for fail_cleanup_callable, cleanup_args in cleanup_on_failure:
- fail_cleanup_callable(*cleanup_args)
- self.config_object_error(id, None, 'database_node', 'http_delete', del_result)
- abort(409, del_result)
-
- try:
- self._extension_mgrs['resourceApi'].map_method('post_database_node_delete', id, read_result)
- except Exception as e:
- pass
-
- #end database_node_http_delete
-
- def database_nodes_http_post(self):
- key = 'database-node'
- obj_dict = request.json[key]
- self._post_validate(key, obj_dict=obj_dict)
- fq_name = obj_dict['fq_name']
-
- try:
- self._extension_mgrs['resourceApi'].map_method('pre_database_node_create', obj_dict)
- except Exception as e:
- pass
-
- prop_dict = obj_dict.get('id_perms')
- if prop_dict:
- buf = cStringIO.StringIO()
- xx_id_perms = IdPermsType(**prop_dict)
- xx_id_perms.export(buf)
- node = etree.fromstring(buf.getvalue())
- xx_id_perms = IdPermsType()
- try:
- xx_id_perms.build(node)
- except Exception as e:
- abort(400, str(e))
- # common handling for all resource create
- (ok, result) = self._post_common(request, 'database-node', obj_dict)
- if not ok:
- (code, msg) = result
- fq_name_str = ':'.join(obj_dict.get('fq_name', []))
- self.config_object_error(None, fq_name_str, 'database_node', 'http_post', msg)
- abort(code, msg)
-
- name = obj_dict['fq_name'][-1]
- fq_name = obj_dict['fq_name']
-
- db_conn = self._db_conn
-
- # if client gave parent_type of config-root, ignore and remove
- if 'parent_type' in obj_dict and obj_dict['parent_type'] == 'config-root':
- del obj_dict['parent_type']
-
- if 'parent_type' in obj_dict:
- # non config-root child, verify parent exists
- parent_type = obj_dict['parent_type']
- parent_fq_name = obj_dict['fq_name'][:-1]
- try:
- parent_uuid = self._db_conn.fq_name_to_uuid(parent_type, parent_fq_name)
- (ok, status) = self._permissions.check_perms_write(request, parent_uuid)
- if not ok:
- (code, err_msg) = status
- abort(code, err_msg)
- self._permissions.set_user_role(request, obj_dict)
- except NoIdError:
- err_msg = 'Parent ' + pformat(parent_fq_name) + ' type ' + parent_type + ' does not exist'
- fq_name_str = ':'.join(parent_fq_name)
- self.config_object_error(None, fq_name_str, 'database_node', 'http_post', err_msg)
- abort(400, err_msg)
-
-
- # State modification starts from here. Ensure that cleanup is done for all state changes
- cleanup_on_failure = []
- # Alloc and Store id-mappings before creating entry on pubsub store.
- # Else a subscriber can ask for an id mapping before we have stored it
- uuid_requested = result
- (ok, result) = db_conn.dbe_alloc('database-node', obj_dict, uuid_requested)
- if not ok:
- (code, msg) = result
- fq_name_str = ':'.join(obj_dict['fq_name'])
- self.config_object_error(None, fq_name_str, 'database_node', 'http_post', result)
- abort(code, msg)
- cleanup_on_failure.append((db_conn.dbe_release, ['database_node', fq_name]))
-
- obj_ids = result
-
- env = request.headers.environ
- tenant_name = env.get(hdr_server_tenant(), 'default-project')
-
-
- # type-specific hook
- r_class = self.get_resource_class('database-node')
- if r_class:
- try:
- (ok, result) = r_class.http_post_collection(tenant_name, obj_dict, db_conn)
- except Exception as e:
- ok = False
- result = (500, str(e))
- if not ok:
- for fail_cleanup_callable, cleanup_args in cleanup_on_failure:
- fail_cleanup_callable(*cleanup_args)
- (code, msg) = result
- fq_name_str = ':'.join(fq_name)
- self.config_object_error(None, fq_name_str, 'database_node', 'http_post', msg)
- abort(code, msg)
- callable = getattr(r_class, 'http_post_collection_fail', None)
- if callable:
- cleanup_on_failure.append((callable, [tenant_name, obj_dict, db_conn]))
-
- try:
- (ok, result) = \
- db_conn.dbe_create('database-node', obj_ids, obj_dict)
- except Exception as e:
- ok = False
- result = str(e)
-
- if not ok:
- for fail_cleanup_callable, cleanup_args in cleanup_on_failure:
- fail_cleanup_callable(*cleanup_args)
- fq_name_str = ':'.join(fq_name)
- self.config_object_error(None, fq_name_str, 'database_node', 'http_post', result)
- abort(404, result)
-
- rsp_body = {}
- rsp_body['name'] = name
- rsp_body['fq_name'] = fq_name
- rsp_body['uuid'] = obj_ids['uuid']
- rsp_body['href'] = self.generate_url('database-node', obj_ids['uuid'])
- if 'parent_type' in obj_dict:
- # non config-root child, send back parent uuid/href
- rsp_body['parent_uuid'] = parent_uuid
- rsp_body['parent_href'] = self.generate_url(parent_type, parent_uuid)
-
- try:
- self._extension_mgrs['resourceApi'].map_method('post_database_node_create', obj_dict)
- except Exception as e:
- pass
-
- return {'database-node': rsp_body}
- #end database_nodes_http_post
-
- def database_nodes_http_get(self):
- # gather list of uuids using 1. any specified anchors
- # 2. any specified filters
- # if not 'detail' return list with any specified 'fields'
- # if 'detail' return list with props+refs + any specified 'fields'
-
- env = request.headers.environ
- tenant_name = env.get(hdr_server_tenant(), 'default-project')
- parent_uuids = None
- back_ref_uuids = None
- obj_uuids = None
- if (('parent_fq_name_str' in request.query) and
- ('parent_type' in request.query)):
- parent_fq_name = request.query.parent_fq_name_str.split(':')
- parent_type = request.query.parent_type
- parent_uuids = [self._db_conn.fq_name_to_uuid(parent_type, parent_fq_name)]
- elif 'parent_id' in request.query:
- parent_ids = request.query.parent_id.split(',')
- parent_uuids = [str(uuid.UUID(p_uuid)) for p_uuid in parent_ids]
- if 'back_ref_id' in request.query:
- back_ref_ids = request.query.back_ref_id.split(',')
- back_ref_uuids = [str(uuid.UUID(b_uuid)) for b_uuid in back_ref_ids]
- if 'obj_uuids' in request.query:
- obj_uuids = request.query.obj_uuids.split(',')
-
- # common handling for all resource get
- (ok, result) = self._get_common(request, parent_uuids)
- if not ok:
- (code, msg) = result
- self.config_object_error(None, None, 'database_nodes', 'http_get_collection', msg)
- abort(code, msg)
-
- if 'count' in request.query:
- count = 'true' in request.query.count.lower()
- else:
- count = False
-
- filter_params = request.query.filters
- if filter_params:
- try:
- ff_key_vals = filter_params.split(',')
- ff_names = [ff.split('==')[0] for ff in ff_key_vals]
- ff_values = [ff.split('==')[1] for ff in ff_key_vals]
- filters = {'field_names': ff_names, 'field_values': ff_values}
- except Exception as e:
- abort(400, 'Invalid filter ' + filter_params)
- else:
- filters = None
- db_conn = self._db_conn
- (ok, result) = \
- db_conn.dbe_list('database-node', parent_uuids, back_ref_uuids, obj_uuids, count, filters)
- if not ok:
- self.config_object_error(None, None, 'database_nodes', 'http_get_collection', result)
- abort(404, result)
-
- # If only counting, return early
- if count:
- return {'database-nodes': {'count': result}}
-
- if 'detail' in request.query:
- detail = 'true' in request.query.detail.lower()
- else:
- detail = False
-
- fq_names_uuids = result
- obj_dicts = []
- if not detail:
- if not self.is_admin_request():
- obj_ids_list = [{'uuid': obj_uuid} for _, obj_uuid in fq_names_uuids]
- obj_fields = [u'id_perms']
- (ok, result) = db_conn.dbe_read_multi('database-node', obj_ids_list, obj_fields)
- if not ok:
- result = []
- for obj_result in result:
- # give chance for zk heartbeat/ping
- gevent.sleep(0)
- if obj_result['id_perms'].get('user_visible', True):
- obj_dict = {}
- obj_dict['uuid'] = obj_result['uuid']
- obj_dict['href'] = self.generate_url('database-node', obj_result['uuid'])
- obj_dict['fq_name'] = obj_result['fq_name']
- obj_dicts.append(obj_dict)
- else:
- for fq_name, obj_uuid in fq_names_uuids:
- # give chance for zk heartbeat/ping
- gevent.sleep(0)
- obj_dict = {}
- obj_dict['uuid'] = obj_uuid
- obj_dict['href'] = self.generate_url('database-node', obj_uuid)
- obj_dict['fq_name'] = fq_name
- obj_dicts.append(obj_dict)
- else: #detail
- obj_ids_list = [{'uuid': obj_uuid} for _, obj_uuid in fq_names_uuids]
- obj_fields = [u'database_node_ip_address', u'id_perms', u'display_name'] + []
- if 'fields' in request.query:
- obj_fields.extend(request.query.fields.split(','))
- (ok, result) = db_conn.dbe_read_multi('database-node', obj_ids_list, obj_fields)
-
- if not ok:
- result = []
-
- for obj_result in result:
- # give chance for zk heartbeat/ping
- gevent.sleep(0)
- obj_dict = {}
- obj_dict['name'] = obj_result['fq_name'][-1]
- obj_dict['href'] = self.generate_url('database-node', obj_result['uuid'])
- obj_dict.update(obj_result)
- if (obj_dict['id_perms'].get('user_visible', True) or
- self.is_admin_request()):
- obj_dicts.append({'database-node': obj_dict})
-
- return {'database-nodes': obj_dicts}
- #end database_nodes_http_get
-
- def _database_node_create_default_children(self, parent_obj):
- pass
- #end _database_node_create_default_children
-
- def _database_node_delete_default_children(self, parent_dict):
- pass
- #end _database_node_delete_default_children
-
- def routing_instance_http_get(self, id):
- try:
- self._extension_mgrs['resourceApi'].map_method('pre_routing_instance_read', id)
- except Exception as e:
- pass
-
- # TODO get vals from request out of the global ASAP
- etag = request.headers.get('If-None-Match')
- try:
- obj_type = self._db_conn.uuid_to_obj_type(id)
- except NoIdError:
- obj_type = None
- if obj_type != 'routing_instance':
- abort(404, 'No routing-instance object found for id %s' %(id))
- # common handling for all resource get
- (ok, result) = self._get_common(request, id)
- if not ok:
- (code, msg) = result
- self.config_object_error(id, None, 'routing_instance', 'http_get', msg)
- abort(code, msg)
-
- # type-specific hook
- r_class = self.get_resource_class('routing-instance')
- if r_class:
- r_class.http_get(id)
-
- db_conn = self._db_conn
- if etag:
- obj_ids = {'uuid': id}
- (ok, result) = db_conn.dbe_is_latest(obj_ids, etag.replace('"', ''))
- if not ok:
- # Not present in DB
- self.config_object_error(id, None, 'routing_instance', 'http_get', result)
- abort(404, result)
-
- is_latest = result
- if is_latest:
- # send Not-Modified, caches use this for read optimization
- response.status = 304
- return
- #end if etag
-
- obj_ids = {'uuid': id}
-
- # Generate field list for db layer
- properties = [u'service_chain_information', u'routing_instance_is_default', u'static_route_entries', u'default_ce_protocol', u'id_perms', u'display_name']
- references = ['routing_instance_refs', 'route_target_refs']
- back_references = ['virtual_machine_interface_back_refs', u'virtual_network_back_refs', 'routing_instance_back_refs', 'customer_attachment_back_refs']
- children = ['bgp_routers']
- if 'fields' in request.query:
- obj_fields = request.query.fields.split(',')
- else: # default props + children + refs + backrefs
- obj_fields = properties + references
- if 'exclude_back_refs' not in request.query:
- obj_fields = obj_fields + back_references
- if 'exclude_children' not in request.query:
- obj_fields = obj_fields + children
-
- (ok, result) = db_conn.dbe_read('routing-instance', obj_ids, obj_fields)
- if not ok:
- # Not present in DB
- self.config_object_error(id, None, 'routing_instance', 'http_get', result)
- abort(404, result)
-
- # check visibility
- if (not result['id_perms'].get('user_visible', True) and
- not self.is_admin_request()):
- result = 'This object is not visible by users: %s' % id
- self.config_object_error(id, None, 'routing_instance', 'http_get', result)
- abort(404, result)
-
- rsp_body = {}
- rsp_body['uuid'] = id
- rsp_body['href'] = self.generate_url('routing-instance', id)
- rsp_body['name'] = result['fq_name'][-1]
- rsp_body.update(result)
- id_perms = result['id_perms']
- response.set_header('ETag', '"' + id_perms['last_modified'] + '"')
- try:
- self._extension_mgrs['resourceApi'].map_method('post_routing_instance_read', id, rsp_body)
- except Exception as e:
- pass
-
- return {'routing-instance': rsp_body}
- #end routing_instance_http_get
-
- def routing_instance_http_put(self, id):
- key = 'routing-instance'
- obj_dict = request.json[key]
-
- try:
- self._extension_mgrs['resourceApi'].map_method('pre_routing_instance_update', id, obj_dict)
- except Exception as e:
- pass
-
- db_conn = self._db_conn
- try:
- obj_type = db_conn.uuid_to_obj_type(id)
- if obj_type != 'routing_instance':
- abort(404, 'No routing-instance object found for id %s' %(id))
- fq_name = db_conn.uuid_to_fq_name(id)
- except NoIdError as e:
- abort(404, str(e))
- prop_dict = obj_dict.get('service_chain_information')
- if prop_dict:
- buf = cStringIO.StringIO()
- xx_service_chain_information = ServiceChainInfo(**prop_dict)
- xx_service_chain_information.export(buf)
- node = etree.fromstring(buf.getvalue())
- xx_service_chain_information = ServiceChainInfo()
- try:
- xx_service_chain_information.build(node)
- except Exception as e:
- abort(400, str(e))
- prop_dict = obj_dict.get('static_route_entries')
- if prop_dict:
- buf = cStringIO.StringIO()
- xx_static_route_entries = StaticRouteEntriesType(**prop_dict)
- xx_static_route_entries.export(buf)
- node = etree.fromstring(buf.getvalue())
- xx_static_route_entries = StaticRouteEntriesType()
- try:
- xx_static_route_entries.build(node)
- except Exception as e:
- abort(400, str(e))
- prop_dict = obj_dict.get('default_ce_protocol')
- if prop_dict:
- buf = cStringIO.StringIO()
- xx_default_ce_protocol = DefaultProtocolType(**prop_dict)
- xx_default_ce_protocol.export(buf)
- node = etree.fromstring(buf.getvalue())
- xx_default_ce_protocol = DefaultProtocolType()
- try:
- xx_default_ce_protocol.build(node)
- except Exception as e:
- abort(400, str(e))
- prop_dict = obj_dict.get('id_perms')
- if prop_dict:
- buf = cStringIO.StringIO()
- xx_id_perms = IdPermsType(**prop_dict)
- xx_id_perms.export(buf)
- node = etree.fromstring(buf.getvalue())
- xx_id_perms = IdPermsType()
- try:
- xx_id_perms.build(node)
- except Exception as e:
- abort(400, str(e))
- for ref_dict in obj_dict.get('routing_instance_refs') or []:
- if fq_name == ref_dict['to']:
- abort(404, 'Cannot add reference to self')
- buf = cStringIO.StringIO()
- xx_routing_instance = ConnectionType(**ref_dict['attr'])
- xx_routing_instance.export(buf)
- node = etree.fromstring(buf.getvalue())
- try:
- xx_routing_instance.build(node)
- except Exception as e:
- abort(400, str(e))
- for ref_dict in obj_dict.get('route_target_refs') or []:
- buf = cStringIO.StringIO()
- xx_route_target = InstanceTargetType(**ref_dict['attr'])
- xx_route_target.export(buf)
- node = etree.fromstring(buf.getvalue())
- try:
- xx_route_target.build(node)
- except Exception as e:
- abort(400, str(e))
- # common handling for all resource put
- (ok, result) = self._put_common(request, 'routing_instance', id, fq_name, obj_dict)
- if not ok:
- (code, msg) = result
- self.config_object_error(id, None, 'routing_instance', 'http_put', msg)
- abort(code, msg)
-
- # Validate perms
- objtype_list = ['bgp_router', 'routing_instance', 'route_target']
- for obj_type in objtype_list:
- refs = obj_dict.get('%s_refs'%(obj_type), None)
- if refs:
- for ref in refs:
- try:
- ref_uuid = db_conn.fq_name_to_uuid(obj_type, ref['to'])
- except NoIdError as e:
- abort(404, str(e))
- (ok, status) = self._permissions.check_perms_link(request, ref_uuid)
- if not ok:
- (code, err_msg) = status
- abort(code, err_msg)
-
- # State modification starts from here. Ensure that cleanup is done for all state changes
- cleanup_on_failure = []
- # type-specific hook
- r_class = self.get_resource_class('routing-instance')
- if r_class:
- (ok, put_result) = r_class.http_put(id, fq_name, obj_dict, self._db_conn)
- if not ok:
- (code, msg) = put_result
- self.config_object_error(id, None, 'routing_instance', 'http_put', msg)
- abort(code, msg)
- callable = getattr(r_class, 'http_put_fail', None)
- if callable:
- cleanup_on_failure.append((callable, [id, fq_name, obj_dict, self._db_conn]))
-
- obj_ids = {'uuid': id}
- try:
- (ok, result) = db_conn.dbe_update('routing-instance', obj_ids, obj_dict)
- except Exception as e:
- ok = False
- result = str(e)
- if not ok:
- for fail_cleanup_callable, cleanup_args in cleanup_on_failure:
- fail_cleanup_callable(*cleanup_args)
- self.config_object_error(id, None, 'routing_instance', 'http_put', result)
- abort(404, result)
-
- rsp_body = {}
- rsp_body['uuid'] = id
- rsp_body['href'] = self.generate_url('routing-instance', id)
-
- try:
- self._extension_mgrs['resourceApi'].map_method('post_routing_instance_update', id, obj_dict)
- except Exception as e:
- pass
-
- return {'routing-instance': rsp_body}
- #end routing_instance_http_put
-
- def routing_instance_http_delete(self, id):
- db_conn = self._db_conn
- # if obj doesn't exist return early
- try:
- obj_type = db_conn.uuid_to_obj_type(id)
- if obj_type != 'routing_instance':
- abort(404, 'No routing-instance object found for id %s' %(id))
- _ = db_conn.uuid_to_fq_name(id)
- except NoIdError:
- abort(404, 'ID %s does not exist' %(id))
-
- try:
- self._extension_mgrs['resourceApi'].map_method('pre_routing_instance_delete', id)
- except Exception as e:
- pass
-
- # read in obj from db (accepting error) to get details of it
- obj_ids = {'uuid': id}
- back_references = ['virtual_machine_interface_back_refs', u'virtual_network_back_refs', 'routing_instance_back_refs', 'customer_attachment_back_refs']
- children = ['bgp_routers']
- obj_fields = children + back_references
- (read_ok, read_result) = db_conn.dbe_read('routing-instance', obj_ids, obj_fields)
- if not read_ok:
- if read_result.startswith('Unknown id:'):
- abort(404, 'ID %s does not exist' %(id))
- else:
- self.config_object_error(id, None, 'routing_instance', 'http_delete', read_result)
- # proceed down to delete the resource
-
- # common handling for all resource delete
- parent_type = read_result.get('parent_type', None)
- (ok, del_result) = self._delete_common(request, 'routing_instance', id, parent_type)
- if not ok:
- (code, msg) = del_result
- self.config_object_error(id, None, 'routing_instance', 'http_delete', msg)
- abort(code, msg)
-
- fq_name = read_result['fq_name']
- ifmap_id = cfgm_common.imid.get_ifmap_id_from_fq_name('routing-instance', fq_name)
- obj_ids['imid'] = ifmap_id
- if parent_type:
- parent_imid = cfgm_common.imid.get_ifmap_id_from_fq_name(parent_type, fq_name[:-1])
- obj_ids['parent_imid'] = parent_imid
-
- # State modification starts from here. Ensure that cleanup is done for all state changes
- cleanup_on_failure = []
-
- # type-specific hook
- r_class = self.get_resource_class('routing-instance')
- if r_class:
- if read_ok:
- # fail if non-default children or backrefs exist
- bgp_routers = read_result.get('bgp_routers', None)
- if bgp_routers:
- has_infos = read_result['bgp_routers']
- if ((len(has_infos) > 1) or
- (len(has_infos) == 1 and has_infos[0]['to'][-1] != 'default-bgp-router')):
- has_urls = [has_info['href'] for has_info in has_infos]
- has_str = ', '.join(has_urls)
- err_msg = 'Children ' + has_str + ' still exist'
- self.config_object_error(id, None, 'routing_instance', 'http_delete', err_msg)
- abort(409, err_msg)
-
- virtual_machine_interface_back_refs = read_result.get('virtual_machine_interface_back_refs', None)
- if virtual_machine_interface_back_refs:
- back_ref_urls = [back_ref_info['href'] for back_ref_info in read_result['virtual_machine_interface_back_refs']]
- back_ref_str = ', '.join(back_ref_urls)
- err_msg = 'Back-References from ' + back_ref_str + ' still exist'
- self.config_object_error(id, None, 'routing_instance', 'http_delete', err_msg)
- abort(409, err_msg)
-
- routing_instance_back_refs = read_result.get('routing_instance_back_refs', None)
- if routing_instance_back_refs:
- back_ref_urls = [back_ref_info['href'] for back_ref_info in read_result['routing_instance_back_refs']]
- back_ref_str = ', '.join(back_ref_urls)
- err_msg = 'Back-References from ' + back_ref_str + ' still exist'
- self.config_object_error(id, None, 'routing_instance', 'http_delete', err_msg)
- abort(409, err_msg)
-
-
- # Delete default children first
- self._routing_instance_delete_default_children(read_result)
-
- (ok, del_result) = r_class.http_delete(id, read_result, db_conn)
- if not ok:
- (code, msg) = del_result
- self.config_object_error(id, None, 'routing_instance', 'http_delete', msg)
- abort(code, msg)
- callable = getattr(r_class, 'http_delete_fail', None)
- if callable:
- cleanup_on_failure.append((callable, [id, read_result, db_conn]))
- #end if read_ok
-
- try:
- (ok, del_result) = db_conn.dbe_delete('routing-instance', obj_ids, read_result)
- except Exception as e:
- ok = False
- del_result = str(e)
- if not ok:
- for fail_cleanup_callable, cleanup_args in cleanup_on_failure:
- fail_cleanup_callable(*cleanup_args)
- self.config_object_error(id, None, 'routing_instance', 'http_delete', del_result)
- abort(409, del_result)
-
- try:
- self._extension_mgrs['resourceApi'].map_method('post_routing_instance_delete', id, read_result)
- except Exception as e:
- pass
-
- #end routing_instance_http_delete
-
- def routing_instances_http_post(self):
- key = 'routing-instance'
- obj_dict = request.json[key]
- self._post_validate(key, obj_dict=obj_dict)
- fq_name = obj_dict['fq_name']
-
- try:
- self._extension_mgrs['resourceApi'].map_method('pre_routing_instance_create', obj_dict)
- except Exception as e:
- pass
-
- prop_dict = obj_dict.get('service_chain_information')
- if prop_dict:
- buf = cStringIO.StringIO()
- xx_service_chain_information = ServiceChainInfo(**prop_dict)
- xx_service_chain_information.export(buf)
- node = etree.fromstring(buf.getvalue())
- xx_service_chain_information = ServiceChainInfo()
- try:
- xx_service_chain_information.build(node)
- except Exception as e:
- abort(400, str(e))
- prop_dict = obj_dict.get('static_route_entries')
- if prop_dict:
- buf = cStringIO.StringIO()
- xx_static_route_entries = StaticRouteEntriesType(**prop_dict)
- xx_static_route_entries.export(buf)
- node = etree.fromstring(buf.getvalue())
- xx_static_route_entries = StaticRouteEntriesType()
- try:
- xx_static_route_entries.build(node)
- except Exception as e:
- abort(400, str(e))
- prop_dict = obj_dict.get('default_ce_protocol')
- if prop_dict:
- buf = cStringIO.StringIO()
- xx_default_ce_protocol = DefaultProtocolType(**prop_dict)
- xx_default_ce_protocol.export(buf)
- node = etree.fromstring(buf.getvalue())
- xx_default_ce_protocol = DefaultProtocolType()
- try:
- xx_default_ce_protocol.build(node)
- except Exception as e:
- abort(400, str(e))
- prop_dict = obj_dict.get('id_perms')
- if prop_dict:
- buf = cStringIO.StringIO()
- xx_id_perms = IdPermsType(**prop_dict)
- xx_id_perms.export(buf)
- node = etree.fromstring(buf.getvalue())
- xx_id_perms = IdPermsType()
- try:
- xx_id_perms.build(node)
- except Exception as e:
- abort(400, str(e))
- for ref_dict in obj_dict.get('routing_instance_refs') or []:
- if fq_name == ref_dict['to']:
- abort(404, 'Cannot add reference to self')
- buf = cStringIO.StringIO()
- xx_routing_instance = ConnectionType(**ref_dict['attr'])
- xx_routing_instance.export(buf)
- node = etree.fromstring(buf.getvalue())
- try:
- xx_routing_instance.build(node)
- except Exception as e:
- abort(400, str(e))
- for ref_dict in obj_dict.get('route_target_refs') or []:
- buf = cStringIO.StringIO()
- xx_route_target = InstanceTargetType(**ref_dict['attr'])
- xx_route_target.export(buf)
- node = etree.fromstring(buf.getvalue())
- try:
- xx_route_target.build(node)
- except Exception as e:
- abort(400, str(e))
- # common handling for all resource create
- (ok, result) = self._post_common(request, 'routing-instance', obj_dict)
- if not ok:
- (code, msg) = result
- fq_name_str = ':'.join(obj_dict.get('fq_name', []))
- self.config_object_error(None, fq_name_str, 'routing_instance', 'http_post', msg)
- abort(code, msg)
-
- name = obj_dict['fq_name'][-1]
- fq_name = obj_dict['fq_name']
-
- db_conn = self._db_conn
-
- # if client gave parent_type of config-root, ignore and remove
- if 'parent_type' in obj_dict and obj_dict['parent_type'] == 'config-root':
- del obj_dict['parent_type']
-
- if 'parent_type' in obj_dict:
- # non config-root child, verify parent exists
- parent_type = obj_dict['parent_type']
- parent_fq_name = obj_dict['fq_name'][:-1]
- try:
- parent_uuid = self._db_conn.fq_name_to_uuid(parent_type, parent_fq_name)
- (ok, status) = self._permissions.check_perms_write(request, parent_uuid)
- if not ok:
- (code, err_msg) = status
- abort(code, err_msg)
- self._permissions.set_user_role(request, obj_dict)
- except NoIdError:
- err_msg = 'Parent ' + pformat(parent_fq_name) + ' type ' + parent_type + ' does not exist'
- fq_name_str = ':'.join(parent_fq_name)
- self.config_object_error(None, fq_name_str, 'routing_instance', 'http_post', err_msg)
- abort(400, err_msg)
-
- # Validate perms
- objtype_list = ['bgp_router', 'routing_instance', 'route_target']
- for obj_type in objtype_list:
- refs = obj_dict.get('%s_refs'%(obj_type), None)
- if refs:
- for ref in refs:
- ref_uuid = db_conn.fq_name_to_uuid(obj_type, ref['to'])
- (ok, status) = self._permissions.check_perms_link(request, ref_uuid)
- if not ok:
- (code, err_msg) = status
- abort(code, err_msg)
-
- # State modification starts from here. Ensure that cleanup is done for all state changes
- cleanup_on_failure = []
- # Alloc and Store id-mappings before creating entry on pubsub store.
- # Else a subscriber can ask for an id mapping before we have stored it
- uuid_requested = result
- (ok, result) = db_conn.dbe_alloc('routing-instance', obj_dict, uuid_requested)
- if not ok:
- (code, msg) = result
- fq_name_str = ':'.join(obj_dict['fq_name'])
- self.config_object_error(None, fq_name_str, 'routing_instance', 'http_post', result)
- abort(code, msg)
- cleanup_on_failure.append((db_conn.dbe_release, ['routing_instance', fq_name]))
-
- obj_ids = result
-
- env = request.headers.environ
- tenant_name = env.get(hdr_server_tenant(), 'default-project')
-
-
- # type-specific hook
- r_class = self.get_resource_class('routing-instance')
- if r_class:
- try:
- (ok, result) = r_class.http_post_collection(tenant_name, obj_dict, db_conn)
- except Exception as e:
- ok = False
- result = (500, str(e))
- if not ok:
- for fail_cleanup_callable, cleanup_args in cleanup_on_failure:
- fail_cleanup_callable(*cleanup_args)
- (code, msg) = result
- fq_name_str = ':'.join(fq_name)
- self.config_object_error(None, fq_name_str, 'routing_instance', 'http_post', msg)
- abort(code, msg)
- callable = getattr(r_class, 'http_post_collection_fail', None)
- if callable:
- cleanup_on_failure.append((callable, [tenant_name, obj_dict, db_conn]))
-
- try:
- (ok, result) = \
- db_conn.dbe_create('routing-instance', obj_ids, obj_dict)
- except Exception as e:
- ok = False
- result = str(e)
-
- if not ok:
- for fail_cleanup_callable, cleanup_args in cleanup_on_failure:
- fail_cleanup_callable(*cleanup_args)
- fq_name_str = ':'.join(fq_name)
- self.config_object_error(None, fq_name_str, 'routing_instance', 'http_post', result)
- abort(404, result)
-
- rsp_body = {}
- rsp_body['name'] = name
- rsp_body['fq_name'] = fq_name
- rsp_body['uuid'] = obj_ids['uuid']
- rsp_body['href'] = self.generate_url('routing-instance', obj_ids['uuid'])
- if 'parent_type' in obj_dict:
- # non config-root child, send back parent uuid/href
- rsp_body['parent_uuid'] = parent_uuid
- rsp_body['parent_href'] = self.generate_url(parent_type, parent_uuid)
-
- try:
- self._extension_mgrs['resourceApi'].map_method('post_routing_instance_create', obj_dict)
- except Exception as e:
- pass
-
- return {'routing-instance': rsp_body}
- #end routing_instances_http_post
-
- def routing_instances_http_get(self):
- # gather list of uuids using 1. any specified anchors
- # 2. any specified filters
- # if not 'detail' return list with any specified 'fields'
- # if 'detail' return list with props+refs + any specified 'fields'
-
- env = request.headers.environ
- tenant_name = env.get(hdr_server_tenant(), 'default-project')
- parent_uuids = None
- back_ref_uuids = None
- obj_uuids = None
- if (('parent_fq_name_str' in request.query) and
- ('parent_type' in request.query)):
- parent_fq_name = request.query.parent_fq_name_str.split(':')
- parent_type = request.query.parent_type
- parent_uuids = [self._db_conn.fq_name_to_uuid(parent_type, parent_fq_name)]
- elif 'parent_id' in request.query:
- parent_ids = request.query.parent_id.split(',')
- parent_uuids = [str(uuid.UUID(p_uuid)) for p_uuid in parent_ids]
- if 'back_ref_id' in request.query:
- back_ref_ids = request.query.back_ref_id.split(',')
- back_ref_uuids = [str(uuid.UUID(b_uuid)) for b_uuid in back_ref_ids]
- if 'obj_uuids' in request.query:
- obj_uuids = request.query.obj_uuids.split(',')
-
- # common handling for all resource get
- (ok, result) = self._get_common(request, parent_uuids)
- if not ok:
- (code, msg) = result
- self.config_object_error(None, None, 'routing_instances', 'http_get_collection', msg)
- abort(code, msg)
-
- if 'count' in request.query:
- count = 'true' in request.query.count.lower()
- else:
- count = False
-
- filter_params = request.query.filters
- if filter_params:
- try:
- ff_key_vals = filter_params.split(',')
- ff_names = [ff.split('==')[0] for ff in ff_key_vals]
- ff_values = [ff.split('==')[1] for ff in ff_key_vals]
- filters = {'field_names': ff_names, 'field_values': ff_values}
- except Exception as e:
- abort(400, 'Invalid filter ' + filter_params)
- else:
- filters = None
- db_conn = self._db_conn
- (ok, result) = \
- db_conn.dbe_list('routing-instance', parent_uuids, back_ref_uuids, obj_uuids, count, filters)
- if not ok:
- self.config_object_error(None, None, 'routing_instances', 'http_get_collection', result)
- abort(404, result)
-
- # If only counting, return early
- if count:
- return {'routing-instances': {'count': result}}
-
- if 'detail' in request.query:
- detail = 'true' in request.query.detail.lower()
- else:
- detail = False
-
- fq_names_uuids = result
- obj_dicts = []
- if not detail:
- if not self.is_admin_request():
- obj_ids_list = [{'uuid': obj_uuid} for _, obj_uuid in fq_names_uuids]
- obj_fields = [u'id_perms']
- (ok, result) = db_conn.dbe_read_multi('routing-instance', obj_ids_list, obj_fields)
- if not ok:
- result = []
- for obj_result in result:
- # give chance for zk heartbeat/ping
- gevent.sleep(0)
- if obj_result['id_perms'].get('user_visible', True):
- obj_dict = {}
- obj_dict['uuid'] = obj_result['uuid']
- obj_dict['href'] = self.generate_url('routing-instance', obj_result['uuid'])
- obj_dict['fq_name'] = obj_result['fq_name']
- obj_dicts.append(obj_dict)
- else:
- for fq_name, obj_uuid in fq_names_uuids:
- # give chance for zk heartbeat/ping
- gevent.sleep(0)
- obj_dict = {}
- obj_dict['uuid'] = obj_uuid
- obj_dict['href'] = self.generate_url('routing-instance', obj_uuid)
- obj_dict['fq_name'] = fq_name
- obj_dicts.append(obj_dict)
- else: #detail
- obj_ids_list = [{'uuid': obj_uuid} for _, obj_uuid in fq_names_uuids]
- obj_fields = [u'service_chain_information', u'routing_instance_is_default', u'static_route_entries', u'default_ce_protocol', u'id_perms', u'display_name'] + ['routing_instance_refs', 'route_target_refs']
- if 'fields' in request.query:
- obj_fields.extend(request.query.fields.split(','))
- (ok, result) = db_conn.dbe_read_multi('routing-instance', obj_ids_list, obj_fields)
-
- if not ok:
- result = []
-
- for obj_result in result:
- # give chance for zk heartbeat/ping
- gevent.sleep(0)
- obj_dict = {}
- obj_dict['name'] = obj_result['fq_name'][-1]
- obj_dict['href'] = self.generate_url('routing-instance', obj_result['uuid'])
- obj_dict.update(obj_result)
- if (obj_dict['id_perms'].get('user_visible', True) or
- self.is_admin_request()):
- obj_dicts.append({'routing-instance': obj_dict})
-
- return {'routing-instances': obj_dicts}
- #end routing_instances_http_get
-
- def _routing_instance_create_default_children(self, parent_obj):
- # Create a default child only if provisioned for
- r_class = self.get_resource_class('bgp-router')
- if r_class and r_class.generate_default_instance:
- child_obj = BgpRouter(parent_obj = parent_obj)
- child_dict = child_obj.__dict__
- fq_name = child_dict['fq_name']
- child_dict['id_perms'] = self._get_default_id_perms('bgp-router')
-
- db_conn = self._db_conn
- (ok, result) = db_conn.dbe_alloc('bgp-router', child_dict)
- if not ok:
- return (ok, result)
-
- obj_ids = result
- db_conn.dbe_create('bgp-router', obj_ids, child_dict)
- self._bgp_router_create_default_children(child_obj)
-
- pass
- #end _routing_instance_create_default_children
-
- def _routing_instance_delete_default_children(self, parent_dict):
- # Delete a default child only if provisioned for
- r_class = self.get_resource_class('bgp-router')
- if r_class and r_class.generate_default_instance:
- # first locate default child then delete it
- has_infos = parent_dict.get('bgp_routers')
- if has_infos:
- for has_info in has_infos:
- if has_info['to'][-1] == 'default-bgp-router':
- default_child_id = has_info['href'].split('/')[-1]
- self.bgp_router_http_delete(default_child_id)
- break
-
- pass
- #end _routing_instance_delete_default_children
-
- def network_ipam_http_get(self, id):
- try:
- self._extension_mgrs['resourceApi'].map_method('pre_network_ipam_read', id)
- except Exception as e:
- pass
-
- # TODO get vals from request out of the global ASAP
- etag = request.headers.get('If-None-Match')
- try:
- obj_type = self._db_conn.uuid_to_obj_type(id)
- except NoIdError:
- obj_type = None
- if obj_type != 'network_ipam':
- abort(404, 'No network-ipam object found for id %s' %(id))
- # common handling for all resource get
- (ok, result) = self._get_common(request, id)
- if not ok:
- (code, msg) = result
- self.config_object_error(id, None, 'network_ipam', 'http_get', msg)
- abort(code, msg)
-
- # type-specific hook
- r_class = self.get_resource_class('network-ipam')
- if r_class:
- r_class.http_get(id)
-
- db_conn = self._db_conn
- if etag:
- obj_ids = {'uuid': id}
- (ok, result) = db_conn.dbe_is_latest(obj_ids, etag.replace('"', ''))
- if not ok:
- # Not present in DB
- self.config_object_error(id, None, 'network_ipam', 'http_get', result)
- abort(404, result)
-
- is_latest = result
- if is_latest:
- # send Not-Modified, caches use this for read optimization
- response.status = 304
- return
- #end if etag
-
- obj_ids = {'uuid': id}
-
- # Generate field list for db layer
- properties = [u'network_ipam_mgmt', u'id_perms', u'display_name']
- references = [u'virtual_DNS_refs']
- back_references = [u'project_back_refs', u'virtual_network_back_refs']
- children = []
- if 'fields' in request.query:
- obj_fields = request.query.fields.split(',')
- else: # default props + children + refs + backrefs
- obj_fields = properties + references
- if 'exclude_back_refs' not in request.query:
- obj_fields = obj_fields + back_references
- if 'exclude_children' not in request.query:
- obj_fields = obj_fields + children
-
- (ok, result) = db_conn.dbe_read('network-ipam', obj_ids, obj_fields)
- if not ok:
- # Not present in DB
- self.config_object_error(id, None, 'network_ipam', 'http_get', result)
- abort(404, result)
-
- # check visibility
- if (not result['id_perms'].get('user_visible', True) and
- not self.is_admin_request()):
- result = 'This object is not visible by users: %s' % id
- self.config_object_error(id, None, 'network_ipam', 'http_get', result)
- abort(404, result)
-
- rsp_body = {}
- rsp_body['uuid'] = id
- rsp_body['href'] = self.generate_url('network-ipam', id)
- rsp_body['name'] = result['fq_name'][-1]
- rsp_body.update(result)
- id_perms = result['id_perms']
- response.set_header('ETag', '"' + id_perms['last_modified'] + '"')
- try:
- self._extension_mgrs['resourceApi'].map_method('post_network_ipam_read', id, rsp_body)
- except Exception as e:
- pass
-
- return {'network-ipam': rsp_body}
- #end network_ipam_http_get
-
- def network_ipam_http_put(self, id):
- key = 'network-ipam'
- obj_dict = request.json[key]
-
- try:
- self._extension_mgrs['resourceApi'].map_method('pre_network_ipam_update', id, obj_dict)
- except Exception as e:
- pass
-
- db_conn = self._db_conn
- try:
- obj_type = db_conn.uuid_to_obj_type(id)
- if obj_type != 'network_ipam':
- abort(404, 'No network-ipam object found for id %s' %(id))
- fq_name = db_conn.uuid_to_fq_name(id)
- except NoIdError as e:
- abort(404, str(e))
- prop_dict = obj_dict.get('network_ipam_mgmt')
- if prop_dict:
- buf = cStringIO.StringIO()
- xx_network_ipam_mgmt = IpamType(**prop_dict)
- xx_network_ipam_mgmt.export(buf)
- node = etree.fromstring(buf.getvalue())
- xx_network_ipam_mgmt = IpamType()
- try:
- xx_network_ipam_mgmt.build(node)
- except Exception as e:
- abort(400, str(e))
- prop_dict = obj_dict.get('id_perms')
- if prop_dict:
- buf = cStringIO.StringIO()
- xx_id_perms = IdPermsType(**prop_dict)
- xx_id_perms.export(buf)
- node = etree.fromstring(buf.getvalue())
- xx_id_perms = IdPermsType()
- try:
- xx_id_perms.build(node)
- except Exception as e:
- abort(400, str(e))
- # common handling for all resource put
- (ok, result) = self._put_common(request, 'network_ipam', id, fq_name, obj_dict)
- if not ok:
- (code, msg) = result
- self.config_object_error(id, None, 'network_ipam', 'http_put', msg)
- abort(code, msg)
-
- # Validate perms
- objtype_list = [u'virtual_DNS']
- for obj_type in objtype_list:
- refs = obj_dict.get('%s_refs'%(obj_type), None)
- if refs:
- for ref in refs:
- try:
- ref_uuid = db_conn.fq_name_to_uuid(obj_type, ref['to'])
- except NoIdError as e:
- abort(404, str(e))
- (ok, status) = self._permissions.check_perms_link(request, ref_uuid)
- if not ok:
- (code, err_msg) = status
- abort(code, err_msg)
-
- # State modification starts from here. Ensure that cleanup is done for all state changes
- cleanup_on_failure = []
- # type-specific hook
- r_class = self.get_resource_class('network-ipam')
- if r_class:
- (ok, put_result) = r_class.http_put(id, fq_name, obj_dict, self._db_conn)
- if not ok:
- (code, msg) = put_result
- self.config_object_error(id, None, 'network_ipam', 'http_put', msg)
- abort(code, msg)
- callable = getattr(r_class, 'http_put_fail', None)
- if callable:
- cleanup_on_failure.append((callable, [id, fq_name, obj_dict, self._db_conn]))
-
- obj_ids = {'uuid': id}
- try:
- (ok, result) = db_conn.dbe_update('network-ipam', obj_ids, obj_dict)
- except Exception as e:
- ok = False
- result = str(e)
- if not ok:
- for fail_cleanup_callable, cleanup_args in cleanup_on_failure:
- fail_cleanup_callable(*cleanup_args)
- self.config_object_error(id, None, 'network_ipam', 'http_put', result)
- abort(404, result)
-
- rsp_body = {}
- rsp_body['uuid'] = id
- rsp_body['href'] = self.generate_url('network-ipam', id)
-
- try:
- self._extension_mgrs['resourceApi'].map_method('post_network_ipam_update', id, obj_dict)
- except Exception as e:
- pass
-
- return {'network-ipam': rsp_body}
- #end network_ipam_http_put
-
- def network_ipam_http_delete(self, id):
- db_conn = self._db_conn
- # if obj doesn't exist return early
- try:
- obj_type = db_conn.uuid_to_obj_type(id)
- if obj_type != 'network_ipam':
- abort(404, 'No network-ipam object found for id %s' %(id))
- _ = db_conn.uuid_to_fq_name(id)
- except NoIdError:
- abort(404, 'ID %s does not exist' %(id))
-
- try:
- self._extension_mgrs['resourceApi'].map_method('pre_network_ipam_delete', id)
- except Exception as e:
- pass
-
- # read in obj from db (accepting error) to get details of it
- obj_ids = {'uuid': id}
- back_references = [u'project_back_refs', u'virtual_network_back_refs']
- children = []
- obj_fields = children + back_references
- (read_ok, read_result) = db_conn.dbe_read('network-ipam', obj_ids, obj_fields)
- if not read_ok:
- if read_result.startswith('Unknown id:'):
- abort(404, 'ID %s does not exist' %(id))
- else:
- self.config_object_error(id, None, 'network_ipam', 'http_delete', read_result)
- # proceed down to delete the resource
-
- # common handling for all resource delete
- parent_type = read_result.get('parent_type', None)
- (ok, del_result) = self._delete_common(request, 'network_ipam', id, parent_type)
- if not ok:
- (code, msg) = del_result
- self.config_object_error(id, None, 'network_ipam', 'http_delete', msg)
- abort(code, msg)
-
- fq_name = read_result['fq_name']
- ifmap_id = cfgm_common.imid.get_ifmap_id_from_fq_name('network-ipam', fq_name)
- obj_ids['imid'] = ifmap_id
- if parent_type:
- parent_imid = cfgm_common.imid.get_ifmap_id_from_fq_name(parent_type, fq_name[:-1])
- obj_ids['parent_imid'] = parent_imid
-
- # State modification starts from here. Ensure that cleanup is done for all state changes
- cleanup_on_failure = []
-
- # type-specific hook
- r_class = self.get_resource_class('network-ipam')
- if r_class:
- if read_ok:
- # fail if non-default children or backrefs exist
- virtual_network_back_refs = read_result.get('virtual_network_back_refs', None)
- if virtual_network_back_refs:
- back_ref_urls = [back_ref_info['href'] for back_ref_info in read_result['virtual_network_back_refs']]
- back_ref_str = ', '.join(back_ref_urls)
- err_msg = 'Back-References from ' + back_ref_str + ' still exist'
- self.config_object_error(id, None, 'network_ipam', 'http_delete', err_msg)
- abort(409, err_msg)
-
-
- # Delete default children first
- self._network_ipam_delete_default_children(read_result)
-
- (ok, del_result) = r_class.http_delete(id, read_result, db_conn)
- if not ok:
- (code, msg) = del_result
- self.config_object_error(id, None, 'network_ipam', 'http_delete', msg)
- abort(code, msg)
- callable = getattr(r_class, 'http_delete_fail', None)
- if callable:
- cleanup_on_failure.append((callable, [id, read_result, db_conn]))
- #end if read_ok
-
- try:
- (ok, del_result) = db_conn.dbe_delete('network-ipam', obj_ids, read_result)
- except Exception as e:
- ok = False
- del_result = str(e)
- if not ok:
- for fail_cleanup_callable, cleanup_args in cleanup_on_failure:
- fail_cleanup_callable(*cleanup_args)
- self.config_object_error(id, None, 'network_ipam', 'http_delete', del_result)
- abort(409, del_result)
-
- try:
- self._extension_mgrs['resourceApi'].map_method('post_network_ipam_delete', id, read_result)
- except Exception as e:
- pass
-
- #end network_ipam_http_delete
-
- def network_ipams_http_post(self):
- key = 'network-ipam'
- obj_dict = request.json[key]
- self._post_validate(key, obj_dict=obj_dict)
- fq_name = obj_dict['fq_name']
-
- try:
- self._extension_mgrs['resourceApi'].map_method('pre_network_ipam_create', obj_dict)
- except Exception as e:
- pass
-
- prop_dict = obj_dict.get('network_ipam_mgmt')
- if prop_dict:
- buf = cStringIO.StringIO()
- xx_network_ipam_mgmt = IpamType(**prop_dict)
- xx_network_ipam_mgmt.export(buf)
- node = etree.fromstring(buf.getvalue())
- xx_network_ipam_mgmt = IpamType()
- try:
- xx_network_ipam_mgmt.build(node)
- except Exception as e:
- abort(400, str(e))
- prop_dict = obj_dict.get('id_perms')
- if prop_dict:
- buf = cStringIO.StringIO()
- xx_id_perms = IdPermsType(**prop_dict)
- xx_id_perms.export(buf)
- node = etree.fromstring(buf.getvalue())
- xx_id_perms = IdPermsType()
- try:
- xx_id_perms.build(node)
- except Exception as e:
- abort(400, str(e))
- # common handling for all resource create
- (ok, result) = self._post_common(request, 'network-ipam', obj_dict)
- if not ok:
- (code, msg) = result
- fq_name_str = ':'.join(obj_dict.get('fq_name', []))
- self.config_object_error(None, fq_name_str, 'network_ipam', 'http_post', msg)
- abort(code, msg)
-
- name = obj_dict['fq_name'][-1]
- fq_name = obj_dict['fq_name']
-
- db_conn = self._db_conn
-
- # if client gave parent_type of config-root, ignore and remove
- if 'parent_type' in obj_dict and obj_dict['parent_type'] == 'config-root':
- del obj_dict['parent_type']
-
- if 'parent_type' in obj_dict:
- # non config-root child, verify parent exists
- parent_type = obj_dict['parent_type']
- parent_fq_name = obj_dict['fq_name'][:-1]
- try:
- parent_uuid = self._db_conn.fq_name_to_uuid(parent_type, parent_fq_name)
- (ok, status) = self._permissions.check_perms_write(request, parent_uuid)
- if not ok:
- (code, err_msg) = status
- abort(code, err_msg)
- self._permissions.set_user_role(request, obj_dict)
- except NoIdError:
- err_msg = 'Parent ' + pformat(parent_fq_name) + ' type ' + parent_type + ' does not exist'
- fq_name_str = ':'.join(parent_fq_name)
- self.config_object_error(None, fq_name_str, 'network_ipam', 'http_post', err_msg)
- abort(400, err_msg)
-
- # Validate perms
- objtype_list = [u'virtual_DNS']
- for obj_type in objtype_list:
- refs = obj_dict.get('%s_refs'%(obj_type), None)
- if refs:
- for ref in refs:
- ref_uuid = db_conn.fq_name_to_uuid(obj_type, ref['to'])
- (ok, status) = self._permissions.check_perms_link(request, ref_uuid)
- if not ok:
- (code, err_msg) = status
- abort(code, err_msg)
-
- # State modification starts from here. Ensure that cleanup is done for all state changes
- cleanup_on_failure = []
- # Alloc and Store id-mappings before creating entry on pubsub store.
- # Else a subscriber can ask for an id mapping before we have stored it
- uuid_requested = result
- (ok, result) = db_conn.dbe_alloc('network-ipam', obj_dict, uuid_requested)
- if not ok:
- (code, msg) = result
- fq_name_str = ':'.join(obj_dict['fq_name'])
- self.config_object_error(None, fq_name_str, 'network_ipam', 'http_post', result)
- abort(code, msg)
- cleanup_on_failure.append((db_conn.dbe_release, ['network_ipam', fq_name]))
-
- obj_ids = result
-
- env = request.headers.environ
- tenant_name = env.get(hdr_server_tenant(), 'default-project')
-
-
- # type-specific hook
- r_class = self.get_resource_class('network-ipam')
- if r_class:
- try:
- (ok, result) = r_class.http_post_collection(tenant_name, obj_dict, db_conn)
- except Exception as e:
- ok = False
- result = (500, str(e))
- if not ok:
- for fail_cleanup_callable, cleanup_args in cleanup_on_failure:
- fail_cleanup_callable(*cleanup_args)
- (code, msg) = result
- fq_name_str = ':'.join(fq_name)
- self.config_object_error(None, fq_name_str, 'network_ipam', 'http_post', msg)
- abort(code, msg)
- callable = getattr(r_class, 'http_post_collection_fail', None)
- if callable:
- cleanup_on_failure.append((callable, [tenant_name, obj_dict, db_conn]))
-
- try:
- (ok, result) = \
- db_conn.dbe_create('network-ipam', obj_ids, obj_dict)
- except Exception as e:
- ok = False
- result = str(e)
-
- if not ok:
- for fail_cleanup_callable, cleanup_args in cleanup_on_failure:
- fail_cleanup_callable(*cleanup_args)
- fq_name_str = ':'.join(fq_name)
- self.config_object_error(None, fq_name_str, 'network_ipam', 'http_post', result)
- abort(404, result)
-
- rsp_body = {}
- rsp_body['name'] = name
- rsp_body['fq_name'] = fq_name
- rsp_body['uuid'] = obj_ids['uuid']
- rsp_body['href'] = self.generate_url('network-ipam', obj_ids['uuid'])
- if 'parent_type' in obj_dict:
- # non config-root child, send back parent uuid/href
- rsp_body['parent_uuid'] = parent_uuid
- rsp_body['parent_href'] = self.generate_url(parent_type, parent_uuid)
-
- try:
- self._extension_mgrs['resourceApi'].map_method('post_network_ipam_create', obj_dict)
- except Exception as e:
- pass
-
- return {'network-ipam': rsp_body}
- #end network_ipams_http_post
-
- def network_ipams_http_get(self):
- # gather list of uuids using 1. any specified anchors
- # 2. any specified filters
- # if not 'detail' return list with any specified 'fields'
- # if 'detail' return list with props+refs + any specified 'fields'
-
- env = request.headers.environ
- tenant_name = env.get(hdr_server_tenant(), 'default-project')
- parent_uuids = None
- back_ref_uuids = None
- obj_uuids = None
- if (('parent_fq_name_str' in request.query) and
- ('parent_type' in request.query)):
- parent_fq_name = request.query.parent_fq_name_str.split(':')
- parent_type = request.query.parent_type
- parent_uuids = [self._db_conn.fq_name_to_uuid(parent_type, parent_fq_name)]
- elif 'parent_id' in request.query:
- parent_ids = request.query.parent_id.split(',')
- parent_uuids = [str(uuid.UUID(p_uuid)) for p_uuid in parent_ids]
- if 'back_ref_id' in request.query:
- back_ref_ids = request.query.back_ref_id.split(',')
- back_ref_uuids = [str(uuid.UUID(b_uuid)) for b_uuid in back_ref_ids]
- if 'obj_uuids' in request.query:
- obj_uuids = request.query.obj_uuids.split(',')
-
- # common handling for all resource get
- (ok, result) = self._get_common(request, parent_uuids)
- if not ok:
- (code, msg) = result
- self.config_object_error(None, None, 'network_ipams', 'http_get_collection', msg)
- abort(code, msg)
-
- if 'count' in request.query:
- count = 'true' in request.query.count.lower()
- else:
- count = False
-
- filter_params = request.query.filters
- if filter_params:
- try:
- ff_key_vals = filter_params.split(',')
- ff_names = [ff.split('==')[0] for ff in ff_key_vals]
- ff_values = [ff.split('==')[1] for ff in ff_key_vals]
- filters = {'field_names': ff_names, 'field_values': ff_values}
- except Exception as e:
- abort(400, 'Invalid filter ' + filter_params)
- else:
- filters = None
- db_conn = self._db_conn
- (ok, result) = \
- db_conn.dbe_list('network-ipam', parent_uuids, back_ref_uuids, obj_uuids, count, filters)
- if not ok:
- self.config_object_error(None, None, 'network_ipams', 'http_get_collection', result)
- abort(404, result)
-
- # If only counting, return early
- if count:
- return {'network-ipams': {'count': result}}
-
- if 'detail' in request.query:
- detail = 'true' in request.query.detail.lower()
- else:
- detail = False
-
- fq_names_uuids = result
- obj_dicts = []
- if not detail:
- if not self.is_admin_request():
- obj_ids_list = [{'uuid': obj_uuid} for _, obj_uuid in fq_names_uuids]
- obj_fields = [u'id_perms']
- (ok, result) = db_conn.dbe_read_multi('network-ipam', obj_ids_list, obj_fields)
- if not ok:
- result = []
- for obj_result in result:
- # give chance for zk heartbeat/ping
- gevent.sleep(0)
- if obj_result['id_perms'].get('user_visible', True):
- obj_dict = {}
- obj_dict['uuid'] = obj_result['uuid']
- obj_dict['href'] = self.generate_url('network-ipam', obj_result['uuid'])
- obj_dict['fq_name'] = obj_result['fq_name']
- obj_dicts.append(obj_dict)
- else:
- for fq_name, obj_uuid in fq_names_uuids:
- # give chance for zk heartbeat/ping
- gevent.sleep(0)
- obj_dict = {}
- obj_dict['uuid'] = obj_uuid
- obj_dict['href'] = self.generate_url('network-ipam', obj_uuid)
- obj_dict['fq_name'] = fq_name
- obj_dicts.append(obj_dict)
- else: #detail
- obj_ids_list = [{'uuid': obj_uuid} for _, obj_uuid in fq_names_uuids]
- obj_fields = [u'network_ipam_mgmt', u'id_perms', u'display_name'] + [u'virtual_DNS_refs']
- if 'fields' in request.query:
- obj_fields.extend(request.query.fields.split(','))
- (ok, result) = db_conn.dbe_read_multi('network-ipam', obj_ids_list, obj_fields)
-
- if not ok:
- result = []
-
- for obj_result in result:
- # give chance for zk heartbeat/ping
- gevent.sleep(0)
- obj_dict = {}
- obj_dict['name'] = obj_result['fq_name'][-1]
- obj_dict['href'] = self.generate_url('network-ipam', obj_result['uuid'])
- obj_dict.update(obj_result)
- if (obj_dict['id_perms'].get('user_visible', True) or
- self.is_admin_request()):
- obj_dicts.append({'network-ipam': obj_dict})
-
- return {'network-ipams': obj_dicts}
- #end network_ipams_http_get
-
- def _network_ipam_create_default_children(self, parent_obj):
- pass
- #end _network_ipam_create_default_children
-
- def _network_ipam_delete_default_children(self, parent_dict):
- pass
- #end _network_ipam_delete_default_children
-
- def logical_router_http_get(self, id):
- try:
- self._extension_mgrs['resourceApi'].map_method('pre_logical_router_read', id)
- except Exception as e:
- pass
-
- # TODO get vals from request out of the global ASAP
- etag = request.headers.get('If-None-Match')
- try:
- obj_type = self._db_conn.uuid_to_obj_type(id)
- except NoIdError:
- obj_type = None
- if obj_type != 'logical_router':
- abort(404, 'No logical-router object found for id %s' %(id))
- # common handling for all resource get
- (ok, result) = self._get_common(request, id)
- if not ok:
- (code, msg) = result
- self.config_object_error(id, None, 'logical_router', 'http_get', msg)
- abort(code, msg)
-
- # type-specific hook
- r_class = self.get_resource_class('logical-router')
- if r_class:
- r_class.http_get(id)
-
- db_conn = self._db_conn
- if etag:
- obj_ids = {'uuid': id}
- (ok, result) = db_conn.dbe_is_latest(obj_ids, etag.replace('"', ''))
- if not ok:
- # Not present in DB
- self.config_object_error(id, None, 'logical_router', 'http_get', result)
- abort(404, result)
-
- is_latest = result
- if is_latest:
- # send Not-Modified, caches use this for read optimization
- response.status = 304
- return
- #end if etag
-
- obj_ids = {'uuid': id}
-
- # Generate field list for db layer
- properties = [u'id_perms', u'display_name']
- references = ['virtual_machine_interface_refs', 'route_target_refs', u'virtual_network_refs', u'service_instance_refs']
- back_references = [u'project_back_refs']
- children = []
- if 'fields' in request.query:
- obj_fields = request.query.fields.split(',')
- else: # default props + children + refs + backrefs
- obj_fields = properties + references
- if 'exclude_back_refs' not in request.query:
- obj_fields = obj_fields + back_references
- if 'exclude_children' not in request.query:
- obj_fields = obj_fields + children
-
- (ok, result) = db_conn.dbe_read('logical-router', obj_ids, obj_fields)
- if not ok:
- # Not present in DB
- self.config_object_error(id, None, 'logical_router', 'http_get', result)
- abort(404, result)
-
- # check visibility
- if (not result['id_perms'].get('user_visible', True) and
- not self.is_admin_request()):
- result = 'This object is not visible by users: %s' % id
- self.config_object_error(id, None, 'logical_router', 'http_get', result)
- abort(404, result)
-
- rsp_body = {}
- rsp_body['uuid'] = id
- rsp_body['href'] = self.generate_url('logical-router', id)
- rsp_body['name'] = result['fq_name'][-1]
- rsp_body.update(result)
- id_perms = result['id_perms']
- response.set_header('ETag', '"' + id_perms['last_modified'] + '"')
- try:
- self._extension_mgrs['resourceApi'].map_method('post_logical_router_read', id, rsp_body)
- except Exception as e:
- pass
-
- return {'logical-router': rsp_body}
- #end logical_router_http_get
-
- def logical_router_http_put(self, id):
- key = 'logical-router'
- obj_dict = request.json[key]
-
- try:
- self._extension_mgrs['resourceApi'].map_method('pre_logical_router_update', id, obj_dict)
- except Exception as e:
- pass
-
- db_conn = self._db_conn
- try:
- obj_type = db_conn.uuid_to_obj_type(id)
- if obj_type != 'logical_router':
- abort(404, 'No logical-router object found for id %s' %(id))
- fq_name = db_conn.uuid_to_fq_name(id)
- except NoIdError as e:
- abort(404, str(e))
- prop_dict = obj_dict.get('id_perms')
- if prop_dict:
- buf = cStringIO.StringIO()
- xx_id_perms = IdPermsType(**prop_dict)
- xx_id_perms.export(buf)
- node = etree.fromstring(buf.getvalue())
- xx_id_perms = IdPermsType()
- try:
- xx_id_perms.build(node)
- except Exception as e:
- abort(400, str(e))
- # common handling for all resource put
- (ok, result) = self._put_common(request, 'logical_router', id, fq_name, obj_dict)
- if not ok:
- (code, msg) = result
- self.config_object_error(id, None, 'logical_router', 'http_put', msg)
- abort(code, msg)
-
- # Validate perms
- objtype_list = ['virtual_machine_interface', 'route_target', u'virtual_network', u'service_instance']
- for obj_type in objtype_list:
- refs = obj_dict.get('%s_refs'%(obj_type), None)
- if refs:
- for ref in refs:
- try:
- ref_uuid = db_conn.fq_name_to_uuid(obj_type, ref['to'])
- except NoIdError as e:
- abort(404, str(e))
- (ok, status) = self._permissions.check_perms_link(request, ref_uuid)
- if not ok:
- (code, err_msg) = status
- abort(code, err_msg)
-
- # State modification starts from here. Ensure that cleanup is done for all state changes
- cleanup_on_failure = []
- # type-specific hook
- r_class = self.get_resource_class('logical-router')
- if r_class:
- (ok, put_result) = r_class.http_put(id, fq_name, obj_dict, self._db_conn)
- if not ok:
- (code, msg) = put_result
- self.config_object_error(id, None, 'logical_router', 'http_put', msg)
- abort(code, msg)
- callable = getattr(r_class, 'http_put_fail', None)
- if callable:
- cleanup_on_failure.append((callable, [id, fq_name, obj_dict, self._db_conn]))
-
- obj_ids = {'uuid': id}
- try:
- (ok, result) = db_conn.dbe_update('logical-router', obj_ids, obj_dict)
- except Exception as e:
- ok = False
- result = str(e)
- if not ok:
- for fail_cleanup_callable, cleanup_args in cleanup_on_failure:
- fail_cleanup_callable(*cleanup_args)
- self.config_object_error(id, None, 'logical_router', 'http_put', result)
- abort(404, result)
-
- rsp_body = {}
- rsp_body['uuid'] = id
- rsp_body['href'] = self.generate_url('logical-router', id)
-
- try:
- self._extension_mgrs['resourceApi'].map_method('post_logical_router_update', id, obj_dict)
- except Exception as e:
- pass
-
- return {'logical-router': rsp_body}
- #end logical_router_http_put
-
- def logical_router_http_delete(self, id):
- db_conn = self._db_conn
- # if obj doesn't exist return early
- try:
- obj_type = db_conn.uuid_to_obj_type(id)
- if obj_type != 'logical_router':
- abort(404, 'No logical-router object found for id %s' %(id))
- _ = db_conn.uuid_to_fq_name(id)
- except NoIdError:
- abort(404, 'ID %s does not exist' %(id))
-
- try:
- self._extension_mgrs['resourceApi'].map_method('pre_logical_router_delete', id)
- except Exception as e:
- pass
-
- # read in obj from db (accepting error) to get details of it
- obj_ids = {'uuid': id}
- back_references = [u'project_back_refs']
- children = []
- obj_fields = children + back_references
- (read_ok, read_result) = db_conn.dbe_read('logical-router', obj_ids, obj_fields)
- if not read_ok:
- if read_result.startswith('Unknown id:'):
- abort(404, 'ID %s does not exist' %(id))
- else:
- self.config_object_error(id, None, 'logical_router', 'http_delete', read_result)
- # proceed down to delete the resource
-
- # common handling for all resource delete
- parent_type = read_result.get('parent_type', None)
- (ok, del_result) = self._delete_common(request, 'logical_router', id, parent_type)
- if not ok:
- (code, msg) = del_result
- self.config_object_error(id, None, 'logical_router', 'http_delete', msg)
- abort(code, msg)
-
- fq_name = read_result['fq_name']
- ifmap_id = cfgm_common.imid.get_ifmap_id_from_fq_name('logical-router', fq_name)
- obj_ids['imid'] = ifmap_id
- if parent_type:
- parent_imid = cfgm_common.imid.get_ifmap_id_from_fq_name(parent_type, fq_name[:-1])
- obj_ids['parent_imid'] = parent_imid
-
- # State modification starts from here. Ensure that cleanup is done for all state changes
- cleanup_on_failure = []
-
- # type-specific hook
- r_class = self.get_resource_class('logical-router')
- if r_class:
- if read_ok:
- # fail if non-default children or backrefs exist
-
- # Delete default children first
- self._logical_router_delete_default_children(read_result)
-
- (ok, del_result) = r_class.http_delete(id, read_result, db_conn)
- if not ok:
- (code, msg) = del_result
- self.config_object_error(id, None, 'logical_router', 'http_delete', msg)
- abort(code, msg)
- callable = getattr(r_class, 'http_delete_fail', None)
- if callable:
- cleanup_on_failure.append((callable, [id, read_result, db_conn]))
- #end if read_ok
-
- try:
- (ok, del_result) = db_conn.dbe_delete('logical-router', obj_ids, read_result)
- except Exception as e:
- ok = False
- del_result = str(e)
- if not ok:
- for fail_cleanup_callable, cleanup_args in cleanup_on_failure:
- fail_cleanup_callable(*cleanup_args)
- self.config_object_error(id, None, 'logical_router', 'http_delete', del_result)
- abort(409, del_result)
-
- try:
- self._extension_mgrs['resourceApi'].map_method('post_logical_router_delete', id, read_result)
- except Exception as e:
- pass
-
- #end logical_router_http_delete
-
- def logical_routers_http_post(self):
- key = 'logical-router'
- obj_dict = request.json[key]
- self._post_validate(key, obj_dict=obj_dict)
- fq_name = obj_dict['fq_name']
-
- try:
- self._extension_mgrs['resourceApi'].map_method('pre_logical_router_create', obj_dict)
- except Exception as e:
- pass
-
- prop_dict = obj_dict.get('id_perms')
- if prop_dict:
- buf = cStringIO.StringIO()
- xx_id_perms = IdPermsType(**prop_dict)
- xx_id_perms.export(buf)
- node = etree.fromstring(buf.getvalue())
- xx_id_perms = IdPermsType()
- try:
- xx_id_perms.build(node)
- except Exception as e:
- abort(400, str(e))
- # common handling for all resource create
- (ok, result) = self._post_common(request, 'logical-router', obj_dict)
- if not ok:
- (code, msg) = result
- fq_name_str = ':'.join(obj_dict.get('fq_name', []))
- self.config_object_error(None, fq_name_str, 'logical_router', 'http_post', msg)
- abort(code, msg)
-
- name = obj_dict['fq_name'][-1]
- fq_name = obj_dict['fq_name']
-
- db_conn = self._db_conn
-
- # if client gave parent_type of config-root, ignore and remove
- if 'parent_type' in obj_dict and obj_dict['parent_type'] == 'config-root':
- del obj_dict['parent_type']
-
- if 'parent_type' in obj_dict:
- # non config-root child, verify parent exists
- parent_type = obj_dict['parent_type']
- parent_fq_name = obj_dict['fq_name'][:-1]
- try:
- parent_uuid = self._db_conn.fq_name_to_uuid(parent_type, parent_fq_name)
- (ok, status) = self._permissions.check_perms_write(request, parent_uuid)
- if not ok:
- (code, err_msg) = status
- abort(code, err_msg)
- self._permissions.set_user_role(request, obj_dict)
- except NoIdError:
- err_msg = 'Parent ' + pformat(parent_fq_name) + ' type ' + parent_type + ' does not exist'
- fq_name_str = ':'.join(parent_fq_name)
- self.config_object_error(None, fq_name_str, 'logical_router', 'http_post', err_msg)
- abort(400, err_msg)
-
- # Validate perms
- objtype_list = ['virtual_machine_interface', 'route_target', u'virtual_network', u'service_instance']
- for obj_type in objtype_list:
- refs = obj_dict.get('%s_refs'%(obj_type), None)
- if refs:
- for ref in refs:
- ref_uuid = db_conn.fq_name_to_uuid(obj_type, ref['to'])
- (ok, status) = self._permissions.check_perms_link(request, ref_uuid)
- if not ok:
- (code, err_msg) = status
- abort(code, err_msg)
-
- # State modification starts from here. Ensure that cleanup is done for all state changes
- cleanup_on_failure = []
- # Alloc and Store id-mappings before creating entry on pubsub store.
- # Else a subscriber can ask for an id mapping before we have stored it
- uuid_requested = result
- (ok, result) = db_conn.dbe_alloc('logical-router', obj_dict, uuid_requested)
- if not ok:
- (code, msg) = result
- fq_name_str = ':'.join(obj_dict['fq_name'])
- self.config_object_error(None, fq_name_str, 'logical_router', 'http_post', result)
- abort(code, msg)
- cleanup_on_failure.append((db_conn.dbe_release, ['logical_router', fq_name]))
-
- obj_ids = result
-
- env = request.headers.environ
- tenant_name = env.get(hdr_server_tenant(), 'default-project')
-
-
- # type-specific hook
- r_class = self.get_resource_class('logical-router')
- if r_class:
- try:
- (ok, result) = r_class.http_post_collection(tenant_name, obj_dict, db_conn)
- except Exception as e:
- ok = False
- result = (500, str(e))
- if not ok:
- for fail_cleanup_callable, cleanup_args in cleanup_on_failure:
- fail_cleanup_callable(*cleanup_args)
- (code, msg) = result
- fq_name_str = ':'.join(fq_name)
- self.config_object_error(None, fq_name_str, 'logical_router', 'http_post', msg)
- abort(code, msg)
- callable = getattr(r_class, 'http_post_collection_fail', None)
- if callable:
- cleanup_on_failure.append((callable, [tenant_name, obj_dict, db_conn]))
-
- try:
- (ok, result) = \
- db_conn.dbe_create('logical-router', obj_ids, obj_dict)
- except Exception as e:
- ok = False
- result = str(e)
-
- if not ok:
- for fail_cleanup_callable, cleanup_args in cleanup_on_failure:
- fail_cleanup_callable(*cleanup_args)
- fq_name_str = ':'.join(fq_name)
- self.config_object_error(None, fq_name_str, 'logical_router', 'http_post', result)
- abort(404, result)
-
- rsp_body = {}
- rsp_body['name'] = name
- rsp_body['fq_name'] = fq_name
- rsp_body['uuid'] = obj_ids['uuid']
- rsp_body['href'] = self.generate_url('logical-router', obj_ids['uuid'])
- if 'parent_type' in obj_dict:
- # non config-root child, send back parent uuid/href
- rsp_body['parent_uuid'] = parent_uuid
- rsp_body['parent_href'] = self.generate_url(parent_type, parent_uuid)
-
- try:
- self._extension_mgrs['resourceApi'].map_method('post_logical_router_create', obj_dict)
- except Exception as e:
- pass
-
- return {'logical-router': rsp_body}
- #end logical_routers_http_post
-
- def logical_routers_http_get(self):
- # gather list of uuids using 1. any specified anchors
- # 2. any specified filters
- # if not 'detail' return list with any specified 'fields'
- # if 'detail' return list with props+refs + any specified 'fields'
-
- env = request.headers.environ
- tenant_name = env.get(hdr_server_tenant(), 'default-project')
- parent_uuids = None
- back_ref_uuids = None
- obj_uuids = None
- if (('parent_fq_name_str' in request.query) and
- ('parent_type' in request.query)):
- parent_fq_name = request.query.parent_fq_name_str.split(':')
- parent_type = request.query.parent_type
- parent_uuids = [self._db_conn.fq_name_to_uuid(parent_type, parent_fq_name)]
- elif 'parent_id' in request.query:
- parent_ids = request.query.parent_id.split(',')
- parent_uuids = [str(uuid.UUID(p_uuid)) for p_uuid in parent_ids]
- if 'back_ref_id' in request.query:
- back_ref_ids = request.query.back_ref_id.split(',')
- back_ref_uuids = [str(uuid.UUID(b_uuid)) for b_uuid in back_ref_ids]
- if 'obj_uuids' in request.query:
- obj_uuids = request.query.obj_uuids.split(',')
-
- # common handling for all resource get
- (ok, result) = self._get_common(request, parent_uuids)
- if not ok:
- (code, msg) = result
- self.config_object_error(None, None, 'logical_routers', 'http_get_collection', msg)
- abort(code, msg)
-
- if 'count' in request.query:
- count = 'true' in request.query.count.lower()
- else:
- count = False
-
- filter_params = request.query.filters
- if filter_params:
- try:
- ff_key_vals = filter_params.split(',')
- ff_names = [ff.split('==')[0] for ff in ff_key_vals]
- ff_values = [ff.split('==')[1] for ff in ff_key_vals]
- filters = {'field_names': ff_names, 'field_values': ff_values}
- except Exception as e:
- abort(400, 'Invalid filter ' + filter_params)
- else:
- filters = None
- db_conn = self._db_conn
- (ok, result) = \
- db_conn.dbe_list('logical-router', parent_uuids, back_ref_uuids, obj_uuids, count, filters)
- if not ok:
- self.config_object_error(None, None, 'logical_routers', 'http_get_collection', result)
- abort(404, result)
-
- # If only counting, return early
- if count:
- return {'logical-routers': {'count': result}}
-
- if 'detail' in request.query:
- detail = 'true' in request.query.detail.lower()
- else:
- detail = False
-
- fq_names_uuids = result
- obj_dicts = []
- if not detail:
- if not self.is_admin_request():
- obj_ids_list = [{'uuid': obj_uuid} for _, obj_uuid in fq_names_uuids]
- obj_fields = [u'id_perms']
- (ok, result) = db_conn.dbe_read_multi('logical-router', obj_ids_list, obj_fields)
- if not ok:
- result = []
- for obj_result in result:
- # give chance for zk heartbeat/ping
- gevent.sleep(0)
- if obj_result['id_perms'].get('user_visible', True):
- obj_dict = {}
- obj_dict['uuid'] = obj_result['uuid']
- obj_dict['href'] = self.generate_url('logical-router', obj_result['uuid'])
- obj_dict['fq_name'] = obj_result['fq_name']
- obj_dicts.append(obj_dict)
- else:
- for fq_name, obj_uuid in fq_names_uuids:
- # give chance for zk heartbeat/ping
- gevent.sleep(0)
- obj_dict = {}
- obj_dict['uuid'] = obj_uuid
- obj_dict['href'] = self.generate_url('logical-router', obj_uuid)
- obj_dict['fq_name'] = fq_name
- obj_dicts.append(obj_dict)
- else: #detail
- obj_ids_list = [{'uuid': obj_uuid} for _, obj_uuid in fq_names_uuids]
- obj_fields = [u'id_perms', u'display_name'] + ['virtual_machine_interface_refs', 'route_target_refs', u'virtual_network_refs', u'service_instance_refs']
- if 'fields' in request.query:
- obj_fields.extend(request.query.fields.split(','))
- (ok, result) = db_conn.dbe_read_multi('logical-router', obj_ids_list, obj_fields)
-
- if not ok:
- result = []
-
- for obj_result in result:
- # give chance for zk heartbeat/ping
- gevent.sleep(0)
- obj_dict = {}
- obj_dict['name'] = obj_result['fq_name'][-1]
- obj_dict['href'] = self.generate_url('logical-router', obj_result['uuid'])
- obj_dict.update(obj_result)
- if (obj_dict['id_perms'].get('user_visible', True) or
- self.is_admin_request()):
- obj_dicts.append({'logical-router': obj_dict})
-
- return {'logical-routers': obj_dicts}
- #end logical_routers_http_get
-
- def _logical_router_create_default_children(self, parent_obj):
- pass
- #end _logical_router_create_default_children
-
- def _logical_router_delete_default_children(self, parent_dict):
- pass
- #end _logical_router_delete_default_children
-
-#end class VncApiServerGen
-
-class DefaultsGen(object):
- def __init__(self):
- self.perms = {}
- default_perms = self._common_default_perms
- id_perms = IdPermsType(default_perms, None, True, 0, 0)
- self.perms['domain'] = id_perms
- self.perms['global-vrouter-config'] = id_perms
- self.perms['instance-ip'] = id_perms
- self.perms['network-policy'] = id_perms
- self.perms['loadbalancer-pool'] = id_perms
- self.perms['virtual-DNS-record'] = id_perms
- self.perms['route-target'] = id_perms
- self.perms['floating-ip'] = id_perms
- self.perms['floating-ip-pool'] = id_perms
- self.perms['physical-router'] = id_perms
- self.perms['bgp-router'] = id_perms
- self.perms['virtual-router'] = id_perms
- self.perms['config-root'] = id_perms
- self.perms['subnet'] = id_perms
- self.perms['global-system-config'] = id_perms
- self.perms['service-appliance'] = id_perms
- self.perms['service-instance'] = id_perms
- self.perms['namespace'] = id_perms
- self.perms['logical-interface'] = id_perms
- self.perms['route-table'] = id_perms
- self.perms['physical-interface'] = id_perms
- self.perms['access-control-list'] = id_perms
- self.perms['analytics-node'] = id_perms
- self.perms['virtual-DNS'] = id_perms
- self.perms['customer-attachment'] = id_perms
- self.perms['service-appliance-set'] = id_perms
- self.perms['config-node'] = id_perms
- self.perms['qos-queue'] = id_perms
- self.perms['virtual-machine'] = id_perms
- self.perms['interface-route-table'] = id_perms
- self.perms['service-template'] = id_perms
- self.perms['virtual-ip'] = id_perms
- self.perms['loadbalancer-member'] = id_perms
- self.perms['security-group'] = id_perms
- self.perms['provider-attachment'] = id_perms
- self.perms['virtual-machine-interface'] = id_perms
- self.perms['loadbalancer-healthmonitor'] = id_perms
- self.perms['virtual-network'] = id_perms
- self.perms['project'] = id_perms
- self.perms['qos-forwarding-class'] = id_perms
- self.perms['database-node'] = id_perms
- self.perms['routing-instance'] = id_perms
- self.perms['network-ipam'] = id_perms
- self.perms['logical-router'] = id_perms
-
- self.resource = {}
-
- #end __init__
-#end class DefaultsGen
diff --git a/Testcases/vnc_api/gen/vnc_api_server_gen.pyc b/Testcases/vnc_api/gen/vnc_api_server_gen.pyc
deleted file mode 100644
index 6df141d..0000000
--- a/Testcases/vnc_api/gen/vnc_api_server_gen.pyc
+++ /dev/null
Binary files differ
diff --git a/Testcases/vnc_api/gen/vnc_api_test_gen.py b/Testcases/vnc_api/gen/vnc_api_test_gen.py
deleted file mode 100644
index a05b722..0000000
--- a/Testcases/vnc_api/gen/vnc_api_test_gen.py
+++ /dev/null
@@ -1,182 +0,0 @@
-
-# AUTO-GENERATED file from IFMapApiGenerator. Do Not Edit!
-
-import fixtures
-import testtools
-
-from resource_test import *
-
-class VncApiTestGen(testtools.TestCase, fixtures.TestWithFixtures):
- def test_domain_crud(self):
- self.useFixture(DomainTestFixtureGen(self._vnc_lib))
- #end test_domain_crud
-
- def test_global_vrouter_config_crud(self):
- self.useFixture(GlobalVrouterConfigTestFixtureGen(self._vnc_lib))
- #end test_global_vrouter_config_crud
-
- def test_instance_ip_crud(self):
- self.useFixture(InstanceIpTestFixtureGen(self._vnc_lib))
- #end test_instance_ip_crud
-
- def test_network_policy_crud(self):
- self.useFixture(NetworkPolicyTestFixtureGen(self._vnc_lib))
- #end test_network_policy_crud
-
- def test_loadbalancer_pool_crud(self):
- self.useFixture(LoadbalancerPoolTestFixtureGen(self._vnc_lib))
- #end test_loadbalancer_pool_crud
-
- def test_virtual_DNS_record_crud(self):
- self.useFixture(VirtualDnsRecordTestFixtureGen(self._vnc_lib))
- #end test_virtual_DNS_record_crud
-
- def test_route_target_crud(self):
- self.useFixture(RouteTargetTestFixtureGen(self._vnc_lib))
- #end test_route_target_crud
-
- def test_floating_ip_crud(self):
- self.useFixture(FloatingIpTestFixtureGen(self._vnc_lib))
- #end test_floating_ip_crud
-
- def test_floating_ip_pool_crud(self):
- self.useFixture(FloatingIpPoolTestFixtureGen(self._vnc_lib))
- #end test_floating_ip_pool_crud
-
- def test_physical_router_crud(self):
- self.useFixture(PhysicalRouterTestFixtureGen(self._vnc_lib))
- #end test_physical_router_crud
-
- def test_bgp_router_crud(self):
- self.useFixture(BgpRouterTestFixtureGen(self._vnc_lib))
- #end test_bgp_router_crud
-
- def test_virtual_router_crud(self):
- self.useFixture(VirtualRouterTestFixtureGen(self._vnc_lib))
- #end test_virtual_router_crud
-
- def test_subnet_crud(self):
- self.useFixture(SubnetTestFixtureGen(self._vnc_lib))
- #end test_subnet_crud
-
- def test_global_system_config_crud(self):
- self.useFixture(GlobalSystemConfigTestFixtureGen(self._vnc_lib))
- #end test_global_system_config_crud
-
- def test_service_appliance_crud(self):
- self.useFixture(ServiceApplianceTestFixtureGen(self._vnc_lib))
- #end test_service_appliance_crud
-
- def test_service_instance_crud(self):
- self.useFixture(ServiceInstanceTestFixtureGen(self._vnc_lib))
- #end test_service_instance_crud
-
- def test_namespace_crud(self):
- self.useFixture(NamespaceTestFixtureGen(self._vnc_lib))
- #end test_namespace_crud
-
- def test_logical_interface_crud(self):
- self.useFixture(LogicalInterfaceTestFixtureGen(self._vnc_lib))
- #end test_logical_interface_crud
-
- def test_route_table_crud(self):
- self.useFixture(RouteTableTestFixtureGen(self._vnc_lib))
- #end test_route_table_crud
-
- def test_physical_interface_crud(self):
- self.useFixture(PhysicalInterfaceTestFixtureGen(self._vnc_lib))
- #end test_physical_interface_crud
-
- def test_access_control_list_crud(self):
- self.useFixture(AccessControlListTestFixtureGen(self._vnc_lib))
- #end test_access_control_list_crud
-
- def test_analytics_node_crud(self):
- self.useFixture(AnalyticsNodeTestFixtureGen(self._vnc_lib))
- #end test_analytics_node_crud
-
- def test_virtual_DNS_crud(self):
- self.useFixture(VirtualDnsTestFixtureGen(self._vnc_lib))
- #end test_virtual_DNS_crud
-
- def test_customer_attachment_crud(self):
- self.useFixture(CustomerAttachmentTestFixtureGen(self._vnc_lib))
- #end test_customer_attachment_crud
-
- def test_service_appliance_set_crud(self):
- self.useFixture(ServiceApplianceSetTestFixtureGen(self._vnc_lib))
- #end test_service_appliance_set_crud
-
- def test_config_node_crud(self):
- self.useFixture(ConfigNodeTestFixtureGen(self._vnc_lib))
- #end test_config_node_crud
-
- def test_qos_queue_crud(self):
- self.useFixture(QosQueueTestFixtureGen(self._vnc_lib))
- #end test_qos_queue_crud
-
- def test_virtual_machine_crud(self):
- self.useFixture(VirtualMachineTestFixtureGen(self._vnc_lib))
- #end test_virtual_machine_crud
-
- def test_interface_route_table_crud(self):
- self.useFixture(InterfaceRouteTableTestFixtureGen(self._vnc_lib))
- #end test_interface_route_table_crud
-
- def test_service_template_crud(self):
- self.useFixture(ServiceTemplateTestFixtureGen(self._vnc_lib))
- #end test_service_template_crud
-
- def test_virtual_ip_crud(self):
- self.useFixture(VirtualIpTestFixtureGen(self._vnc_lib))
- #end test_virtual_ip_crud
-
- def test_loadbalancer_member_crud(self):
- self.useFixture(LoadbalancerMemberTestFixtureGen(self._vnc_lib))
- #end test_loadbalancer_member_crud
-
- def test_security_group_crud(self):
- self.useFixture(SecurityGroupTestFixtureGen(self._vnc_lib))
- #end test_security_group_crud
-
- def test_provider_attachment_crud(self):
- self.useFixture(ProviderAttachmentTestFixtureGen(self._vnc_lib))
- #end test_provider_attachment_crud
-
- def test_virtual_machine_interface_crud(self):
- self.useFixture(VirtualMachineInterfaceTestFixtureGen(self._vnc_lib))
- #end test_virtual_machine_interface_crud
-
- def test_loadbalancer_healthmonitor_crud(self):
- self.useFixture(LoadbalancerHealthmonitorTestFixtureGen(self._vnc_lib))
- #end test_loadbalancer_healthmonitor_crud
-
- def test_virtual_network_crud(self):
- self.useFixture(VirtualNetworkTestFixtureGen(self._vnc_lib))
- #end test_virtual_network_crud
-
- def test_project_crud(self):
- self.useFixture(ProjectTestFixtureGen(self._vnc_lib))
- #end test_project_crud
-
- def test_qos_forwarding_class_crud(self):
- self.useFixture(QosForwardingClassTestFixtureGen(self._vnc_lib))
- #end test_qos_forwarding_class_crud
-
- def test_database_node_crud(self):
- self.useFixture(DatabaseNodeTestFixtureGen(self._vnc_lib))
- #end test_database_node_crud
-
- def test_routing_instance_crud(self):
- self.useFixture(RoutingInstanceTestFixtureGen(self._vnc_lib))
- #end test_routing_instance_crud
-
- def test_network_ipam_crud(self):
- self.useFixture(NetworkIpamTestFixtureGen(self._vnc_lib))
- #end test_network_ipam_crud
-
- def test_logical_router_crud(self):
- self.useFixture(LogicalRouterTestFixtureGen(self._vnc_lib))
- #end test_logical_router_crud
-
-#end class VncApiTestGen
diff --git a/Testcases/vnc_api/gen/vnc_api_test_gen.pyc b/Testcases/vnc_api/gen/vnc_api_test_gen.pyc
deleted file mode 100644
index abb1874..0000000
--- a/Testcases/vnc_api/gen/vnc_api_test_gen.pyc
+++ /dev/null
Binary files differ
diff --git a/Testcases/vnc_api/gen/vnc_cassandra_client_gen.py b/Testcases/vnc_api/gen/vnc_cassandra_client_gen.py
deleted file mode 100644
index 3fcc097..0000000
--- a/Testcases/vnc_api/gen/vnc_cassandra_client_gen.py
+++ /dev/null
@@ -1,20248 +0,0 @@
-
-# AUTO-GENERATED file from IFMapApiGenerator. Do Not Edit!
-
-import re
-import gevent
-import json
-import pycassa
-import datetime
-from operator import itemgetter
-
-from pysandesh.gen_py.sandesh.ttypes import SandeshLevel
-import cfgm_common.exceptions
-from cfgm_common import utils
-from resource_xsd import *
-from resource_common import *
-from resource_server import *
-
-class VncCassandraClientGen(object):
- def __init__(self):
- self._re_match_parent = re.compile('parent:')
- self._re_match_prop = re.compile('prop:')
- self._re_match_ref = re.compile('ref:')
- self._re_match_backref = re.compile('backref:')
- self._re_match_children = re.compile('children:')
- #end __init__
-
- def _cassandra_domain_alloc(self, fq_name):
- return (True, '')
- #end _cassandra_domain_alloc
-
- def _cassandra_domain_create(self, obj_ids, obj_dict):
- # Gather column values for obj and updates to backrefs
- # in a batch and write it at the end
- bch = self._obj_uuid_cf.batch()
-
- obj_cols = {}
- obj_cols['fq_name'] = json.dumps(obj_dict['fq_name'])
- obj_cols['type'] = json.dumps('domain')
- if 'parent_type' in obj_dict:
- # non config-root child
- parent_type = obj_dict['parent_type']
- parent_method_type = parent_type.replace('-', '_')
- parent_fq_name = obj_dict['fq_name'][:-1]
- obj_cols['parent_type'] = json.dumps(parent_type)
- parent_uuid = self.fq_name_to_uuid(parent_method_type, parent_fq_name)
- self._create_child(bch, parent_method_type, parent_uuid, 'domain', obj_ids['uuid'])
-
- # Properties
- field = obj_dict.get('domain_limits', None)
- if field is not None:
- self._create_prop(bch, obj_ids['uuid'], 'domain_limits', field)
-
- field = obj_dict.get('api_access_list', None)
- if field is not None:
- self._create_prop(bch, obj_ids['uuid'], 'api_access_list', field)
-
- field = obj_dict.get('id_perms', None)
- if field is not None:
- field['created'] = datetime.datetime.utcnow().isoformat()
- field['last_modified'] = field['created']
- self._create_prop(bch, obj_ids['uuid'], 'id_perms', field)
-
- field = obj_dict.get('display_name', None)
- if field is not None:
- self._create_prop(bch, obj_ids['uuid'], 'display_name', field)
-
-
- # References
-
- bch.insert(obj_ids['uuid'], obj_cols)
- bch.send()
-
- # Update fqname table
- fq_name_str = ':'.join(obj_dict['fq_name'])
- fq_name_cols = {utils.encode_string(fq_name_str) + ':' + obj_ids['uuid']: json.dumps(None)}
- self._obj_fq_name_cf.insert('domain', fq_name_cols)
-
- return (True, '')
- #end _cassandra_domain_create
-
- def _cassandra_domain_read(self, obj_uuids, field_names = None):
- # if field_names = None, all fields will be read/returned
-
- obj_uuid_cf = self._obj_uuid_cf
-
- # optimize for common case of reading non-backref, non-children fields
- # ignoring columns starting from 'b' and 'c' - significant performance
- # impact in scaled setting. e.g. read of project
- if (field_names is None or
- (set(field_names) & (Domain.backref_fields | Domain.children_fields))):
- # atleast one backref/children field is needed
- obj_rows = obj_uuid_cf.multiget(obj_uuids,
- column_count = 10000000,
- include_timestamp = True)
- else: # ignore reading backref + children columns
- obj_rows = obj_uuid_cf.multiget(obj_uuids,
- column_start = 'd',
- column_count = 10000000,
- include_timestamp = True)
-
- if (len(obj_uuids) == 1) and not obj_rows:
- raise cfgm_common.exceptions.NoIdError(obj_uuids[0])
-
- results = []
- for row_key in obj_rows:
- # give chance for zk heartbeat/ping
- gevent.sleep(0)
- obj_uuid = row_key
- obj_cols = obj_rows[obj_uuid]
- result = {}
- result['uuid'] = obj_uuid
- result['fq_name'] = json.loads(obj_cols['fq_name'][0])
- for col_name in obj_cols.keys():
- if self._re_match_parent.match(col_name):
- # non config-root child
- (_, _, parent_uuid) = col_name.split(':')
- parent_type = json.loads(obj_cols['parent_type'][0])
- result['parent_type'] = parent_type
- try:
- result['parent_uuid'] = parent_uuid
- result['parent_href'] = self._generate_url(parent_type, parent_uuid)
- except cfgm_common.exceptions.NoIdError:
- err_msg = 'Unknown uuid for parent ' + result['fq_name'][-2]
- return (False, err_msg)
-
- # TODO use compiled RE
- if self._re_match_prop.match(col_name):
- (_, prop_name) = col_name.split(':')
- result[prop_name] = json.loads(obj_cols[col_name][0])
-
- # TODO use compiled RE
- if self._re_match_children.match(col_name):
- (_, child_type, child_uuid) = col_name.split(':')
- if field_names and '%ss' %(child_type) not in field_names:
- continue
-
- child_tstamp = obj_cols[col_name][1]
- try:
- self._read_child(result, obj_uuid, child_type, child_uuid, child_tstamp)
- except cfgm_common.exceptions.NoIdError:
- continue
-
- # TODO use compiled RE
- if self._re_match_ref.match(col_name):
- (_, ref_type, ref_uuid) = col_name.split(':')
- self._read_ref(result, obj_uuid, ref_type, ref_uuid, obj_cols[col_name][0])
-
- if self._re_match_backref.match(col_name):
- (_, back_ref_type, back_ref_uuid) = col_name.split(':')
- if field_names and '%s_back_refs' %(back_ref_type) not in field_names:
- continue
-
- try:
- self._read_back_ref(result, obj_uuid, back_ref_type, back_ref_uuid,
- obj_cols[col_name][0])
- except cfgm_common.exceptions.NoIdError:
- continue
-
- # for all column names
-
- if 'projects' in result:
- # sort children; TODO do this based on schema
- sorted_children = sorted(result['projects'], key = itemgetter('tstamp'))
- # re-write result's children without timestamp
- result['projects'] = sorted_children
- [child.pop('tstamp') for child in result['projects']]
-
- if 'namespaces' in result:
- # sort children; TODO do this based on schema
- sorted_children = sorted(result['namespaces'], key = itemgetter('tstamp'))
- # re-write result's children without timestamp
- result['namespaces'] = sorted_children
- [child.pop('tstamp') for child in result['namespaces']]
-
- if 'service_templates' in result:
- # sort children; TODO do this based on schema
- sorted_children = sorted(result['service_templates'], key = itemgetter('tstamp'))
- # re-write result's children without timestamp
- result['service_templates'] = sorted_children
- [child.pop('tstamp') for child in result['service_templates']]
-
- if 'virtual_DNSs' in result:
- # sort children; TODO do this based on schema
- sorted_children = sorted(result['virtual_DNSs'], key = itemgetter('tstamp'))
- # re-write result's children without timestamp
- result['virtual_DNSs'] = sorted_children
- [child.pop('tstamp') for child in result['virtual_DNSs']]
-
- results.append(result)
- # end for all rows
-
- return (True, results)
- #end _cassandra_domain_read
-
- def _cassandra_domain_count_children(self, obj_uuid, child_type):
- # if child_type = None, return
- if child_type is None:
- return (False, '')
-
- obj_uuid_cf = self._obj_uuid_cf
- if child_type not in Domain.children_fields:
- return (False, '%s is not a valid children of Domain' %(child_type))
-
- col_start = 'children:'+child_type[:-1]+':'
- col_finish = 'children:'+child_type[:-1]+';'
- num_children = obj_uuid_cf.get_count(obj_uuid,
- column_start = col_start,
- column_finish = col_finish,
- max_count = 10000000)
- return (True, num_children)
- #end _cassandra_domain_count_children
-
- def _cassandra_domain_update(self, obj_uuid, new_obj_dict):
- # Grab ref-uuids and properties in new version
- new_ref_infos = {}
-
- new_props = {}
- if 'domain_limits' in new_obj_dict:
- new_props['domain_limits'] = new_obj_dict['domain_limits']
- if 'api_access_list' in new_obj_dict:
- new_props['api_access_list'] = new_obj_dict['api_access_list']
- if 'id_perms' in new_obj_dict:
- new_props['id_perms'] = new_obj_dict['id_perms']
- if 'display_name' in new_obj_dict:
- new_props['display_name'] = new_obj_dict['display_name']
- # Gather column values for obj and updates to backrefs
- # in a batch and write it at the end
- obj_uuid_cf = self._obj_uuid_cf
- obj_cols_iter = obj_uuid_cf.xget(obj_uuid)
- # TODO optimize this (converts tuple to dict)
- obj_cols = {}
- for col_info in obj_cols_iter:
- obj_cols[col_info[0]] = col_info[1]
-
- bch = obj_uuid_cf.batch()
- for col_name in obj_cols.keys():
- # TODO use compiled RE
- if re.match('prop:', col_name):
- (_, prop_name) = col_name.split(':')
- if prop_name == 'id_perms':
- # id-perms always has to be updated for last-mod timestamp
- # get it from request dict(or from db if not in request dict)
- new_id_perms = new_obj_dict.get(prop_name, json.loads(obj_cols[col_name]))
- self.update_last_modified(bch, obj_uuid, new_id_perms)
- elif prop_name in new_obj_dict:
- self._update_prop(bch, obj_uuid, prop_name, new_props)
-
- # TODO use compiled RE
- if re.match('ref:', col_name):
- (_, ref_type, ref_uuid) = col_name.split(':')
- self._update_ref(bch, 'domain', obj_uuid, ref_type, ref_uuid, new_ref_infos)
- # for all column names
-
- # create new refs
- for ref_type in new_ref_infos.keys():
- for ref_uuid in new_ref_infos[ref_type].keys():
- ref_data = new_ref_infos[ref_type][ref_uuid]
- self._create_ref(bch, 'domain', obj_uuid, ref_type, ref_uuid, ref_data)
-
- # create new props
- for prop_name in new_props.keys():
- self._create_prop(bch, obj_uuid, prop_name, new_props[prop_name])
-
- bch.send()
-
- return (True, '')
- #end _cassandra_domain_update
-
- def _cassandra_domain_list(self, parent_uuids=None, back_ref_uuids=None,
- obj_uuids=None, count=False, filters=None):
- children_fq_names_uuids = []
- if filters:
- fnames = filters.get('field_names', [])
- fvalues = filters.get('field_values', [])
- filter_fields = [(fnames[i], fvalues[i]) for i in range(len(fnames))]
- else:
- filter_fields = []
-
- def filter_rows(coll_infos, filter_cols, filter_params):
- filt_infos = {}
- coll_rows = obj_uuid_cf.multiget(coll_infos.keys(),
- columns=filter_cols,
- column_count=10000000)
- for row in coll_rows:
- # give chance for zk heartbeat/ping
- gevent.sleep(0)
- full_match = True
- for fname, fval in filter_params:
- if coll_rows[row]['prop:%s' %(fname)] != fval:
- full_match = False
- break
- if full_match:
- filt_infos[row] = coll_infos[row]
- return filt_infos
- # end filter_rows
-
- def get_fq_name_uuid_list(obj_uuids):
- ret_list = []
- for obj_uuid in obj_uuids:
- try:
- obj_fq_name = self.uuid_to_fq_name(obj_uuid)
- ret_list.append((obj_fq_name, obj_uuid))
- except cfgm_common.exceptions.NoIdError:
- pass
- return ret_list
- # end get_fq_name_uuid_list
-
- if parent_uuids:
- # go from parent to child
- obj_uuid_cf = self._obj_uuid_cf
- col_start = 'children:domain:'
- col_fin = 'children:domain;'
- try:
- obj_rows = obj_uuid_cf.multiget(parent_uuids,
- column_start = col_start,
- column_finish = col_fin,
- column_count = 10000000,
- include_timestamp = True)
- except pycassa.NotFoundException:
- if count:
- return (True, 0)
- else:
- return (True, children_fq_names_uuids)
-
- def filter_rows_parent_anchor(sort=False):
- # flatten to [('children:<type>:<uuid>', (<val>,<ts>), *]
- all_cols = [cols for obj_key in obj_rows.keys() for cols in obj_rows[obj_key].items()]
- all_child_infos = {}
- for col_name, col_val_ts in all_cols:
- # give chance for zk heartbeat/ping
- gevent.sleep(0)
- child_uuid = col_name.split(':')[2]
- if obj_uuids and child_uuid not in obj_uuids:
- continue
- all_child_infos[child_uuid] = {'uuid': child_uuid, 'tstamp': col_val_ts[1]}
-
- filter_cols = ['prop:%s' %(fname) for fname, _ in filter_fields]
- if filter_cols:
- filt_child_infos = filter_rows(all_child_infos, filter_cols, filter_fields)
- else: # no filter specified
- filt_child_infos = all_child_infos
-
- if not sort:
- ret_child_infos = filt_child_infos.values()
- else:
- ret_child_infos = sorted(filt_child_infos.values(), key=itemgetter('tstamp'))
-
- return get_fq_name_uuid_list(r['uuid'] for r in ret_child_infos)
- # end filter_rows_parent_anchor
-
- if count:
- return (True, len(filter_rows_parent_anchor()))
-
- children_fq_names_uuids = filter_rows_parent_anchor(sort=True)
-
- if back_ref_uuids:
- # go from anchor to backrefs
- obj_uuid_cf = self._obj_uuid_cf
- col_start = 'backref:domain:'
- col_fin = 'backref:domain;'
- try:
- obj_rows = obj_uuid_cf.multiget(back_ref_uuids,
- column_start = col_start,
- column_finish = col_fin,
- column_count = 10000000,
- include_timestamp = True)
- except pycassa.NotFoundException:
- if count:
- return (True, 0)
- else:
- return (True, children_fq_names_uuids)
-
- def filter_rows_backref_anchor():
- # flatten to [('<fqnstr>:<uuid>', (<val>,<ts>), *]
- all_cols = [cols for obj_key in obj_rows.keys() for cols in obj_rows[obj_key].items()]
- all_backref_infos = {}
- for col_name, col_val_ts in all_cols:
- # give chance for zk heartbeat/ping
- gevent.sleep(0)
- col_name_arr = col_name.split(':')
- fq_name = col_name_arr[:-1]
- obj_uuid = col_name_arr[-1]
- if obj_uuids and obj_uuid not in obj_uuids:
- continue
- all_backref_infos[obj_uuid] = \
- {'uuid': obj_uuid, 'fq_name': fq_name, 'tstamp': col_val_ts[1]}
-
- filter_cols = ['prop:%s' %(fname) for fname, _ in filter_fields]
- if filter_cols:
- filt_backref_infos = filter_rows(all_backref_infos, filter_cols, filter_fields)
- else: # no filter specified
- filt_backref_infos = all_backref_infos
-
- return [(br_info['fq_name'], br_info['uuid']) for br_info in filt_backref_infos.values()]
- # end filter_rows_backref_anchor
-
- if count:
- return (True, len(filter_rows_backref_anchor()))
-
- children_fq_names_uuids = filter_rows_backref_anchor()
-
- if not parent_uuids and not back_ref_uuids:
- obj_uuid_cf = self._obj_uuid_cf
- if obj_uuids:
- # exact objects specified
- def filter_rows_object_list():
- all_obj_infos = {}
- for obj_uuid in obj_uuids:
- all_obj_infos[obj_uuid] = None
-
- filter_cols = ['prop:%s' %(fname) for fname, _ in filter_fields]
- if filter_cols:
- filt_obj_infos = filter_rows(all_obj_infos, filter_cols, filter_fields)
- else: # no filters specified
- filt_obj_infos = all_obj_infos
-
- return get_fq_name_uuid_list(filt_obj_infos.keys())
- # end filter_rows_object_list
-
- if count:
- return (True, len(filter_rows_object_list()))
- children_fq_names_uuids = filter_rows_object_list()
-
- else: # grab all resources of this type
- obj_fq_name_cf = self._obj_fq_name_cf
- try:
- cols = obj_fq_name_cf.get('domain', column_count = 10000000)
- except pycassa.NotFoundException:
- if count:
- return (True, 0)
- else:
- return (True, children_fq_names_uuids)
-
- def filter_rows_no_anchor():
- all_obj_infos = {}
- for col_name, col_val in cols.items():
- # give chance for zk heartbeat/ping
- gevent.sleep(0)
- col_name_arr = utils.decode_string(col_name).split(':')
- obj_uuid = col_name_arr[-1]
- all_obj_infos[obj_uuid] = (col_name_arr[:-1], obj_uuid)
-
- filter_cols = ['prop:%s' %(fname) for fname, _ in filter_fields]
- if filter_cols:
- filt_obj_infos = filter_rows(all_obj_infos, filter_cols, filter_fields)
- else: # no filters specified
- filt_obj_infos = all_obj_infos
-
- return filt_obj_infos.values()
- # end filter_rows_no_anchor
-
- if count:
- return (True, len(filter_rows_no_anchor()))
-
- children_fq_names_uuids = filter_rows_no_anchor()
-
- return (True, children_fq_names_uuids)
- #end _cassandra_domain_list
-
- def _cassandra_domain_delete(self, obj_uuid):
- obj_uuid_cf = self._obj_uuid_cf
- fq_name = json.loads(obj_uuid_cf.get(obj_uuid, columns = ['fq_name'])['fq_name'])
- bch = obj_uuid_cf.batch()
-
- # unlink from parent
- col_start = 'parent:'
- col_fin = 'parent;'
- col_name_iter = obj_uuid_cf.xget(obj_uuid, column_start = col_start, column_finish = col_fin)
- for (col_name, col_val) in col_name_iter:
- (_, parent_type, parent_uuid) = col_name.split(':')
- self._delete_child(bch, parent_type, parent_uuid, 'domain', obj_uuid)
-
- # remove refs
- col_start = 'ref:'
- col_fin = 'ref;'
- col_name_iter = obj_uuid_cf.xget(obj_uuid, column_start = col_start, column_finish = col_fin)
- for (col_name, col_val) in col_name_iter:
- (_, ref_type, ref_uuid) = col_name.split(':')
- self._delete_ref(bch, 'domain', obj_uuid, ref_type, ref_uuid)
-
- bch.remove(obj_uuid)
- bch.send()
-
- # Update fqname table
- fq_name_str = ':'.join(fq_name)
- fq_name_col = utils.encode_string(fq_name_str) + ':' + obj_uuid
- self._obj_fq_name_cf.remove('domain', columns = [fq_name_col])
-
-
- return (True, '')
- #end _cassandra_domain_delete
-
- def _cassandra_global_vrouter_config_alloc(self, fq_name):
- return (True, '')
- #end _cassandra_global_vrouter_config_alloc
-
- def _cassandra_global_vrouter_config_create(self, obj_ids, obj_dict):
- # Gather column values for obj and updates to backrefs
- # in a batch and write it at the end
- bch = self._obj_uuid_cf.batch()
-
- obj_cols = {}
- obj_cols['fq_name'] = json.dumps(obj_dict['fq_name'])
- obj_cols['type'] = json.dumps('global_vrouter_config')
- if 'parent_type' in obj_dict:
- # non config-root child
- parent_type = obj_dict['parent_type']
- parent_method_type = parent_type.replace('-', '_')
- parent_fq_name = obj_dict['fq_name'][:-1]
- obj_cols['parent_type'] = json.dumps(parent_type)
- parent_uuid = self.fq_name_to_uuid(parent_method_type, parent_fq_name)
- self._create_child(bch, parent_method_type, parent_uuid, 'global_vrouter_config', obj_ids['uuid'])
-
- # Properties
- field = obj_dict.get('linklocal_services', None)
- if field is not None:
- self._create_prop(bch, obj_ids['uuid'], 'linklocal_services', field)
-
- field = obj_dict.get('encapsulation_priorities', None)
- if field is not None:
- self._create_prop(bch, obj_ids['uuid'], 'encapsulation_priorities', field)
-
- field = obj_dict.get('vxlan_network_identifier_mode', None)
- if field is not None:
- self._create_prop(bch, obj_ids['uuid'], 'vxlan_network_identifier_mode', field)
-
- field = obj_dict.get('id_perms', None)
- if field is not None:
- field['created'] = datetime.datetime.utcnow().isoformat()
- field['last_modified'] = field['created']
- self._create_prop(bch, obj_ids['uuid'], 'id_perms', field)
-
- field = obj_dict.get('display_name', None)
- if field is not None:
- self._create_prop(bch, obj_ids['uuid'], 'display_name', field)
-
-
- # References
-
- bch.insert(obj_ids['uuid'], obj_cols)
- bch.send()
-
- # Update fqname table
- fq_name_str = ':'.join(obj_dict['fq_name'])
- fq_name_cols = {utils.encode_string(fq_name_str) + ':' + obj_ids['uuid']: json.dumps(None)}
- self._obj_fq_name_cf.insert('global_vrouter_config', fq_name_cols)
-
- return (True, '')
- #end _cassandra_global_vrouter_config_create
-
- def _cassandra_global_vrouter_config_read(self, obj_uuids, field_names = None):
- # if field_names = None, all fields will be read/returned
-
- obj_uuid_cf = self._obj_uuid_cf
-
- # optimize for common case of reading non-backref, non-children fields
- # ignoring columns starting from 'b' and 'c' - significant performance
- # impact in scaled setting. e.g. read of project
- if (field_names is None or
- (set(field_names) & (GlobalVrouterConfig.backref_fields | GlobalVrouterConfig.children_fields))):
- # atleast one backref/children field is needed
- obj_rows = obj_uuid_cf.multiget(obj_uuids,
- column_count = 10000000,
- include_timestamp = True)
- else: # ignore reading backref + children columns
- obj_rows = obj_uuid_cf.multiget(obj_uuids,
- column_start = 'd',
- column_count = 10000000,
- include_timestamp = True)
-
- if (len(obj_uuids) == 1) and not obj_rows:
- raise cfgm_common.exceptions.NoIdError(obj_uuids[0])
-
- results = []
- for row_key in obj_rows:
- # give chance for zk heartbeat/ping
- gevent.sleep(0)
- obj_uuid = row_key
- obj_cols = obj_rows[obj_uuid]
- result = {}
- result['uuid'] = obj_uuid
- result['fq_name'] = json.loads(obj_cols['fq_name'][0])
- for col_name in obj_cols.keys():
- if self._re_match_parent.match(col_name):
- # non config-root child
- (_, _, parent_uuid) = col_name.split(':')
- parent_type = json.loads(obj_cols['parent_type'][0])
- result['parent_type'] = parent_type
- try:
- result['parent_uuid'] = parent_uuid
- result['parent_href'] = self._generate_url(parent_type, parent_uuid)
- except cfgm_common.exceptions.NoIdError:
- err_msg = 'Unknown uuid for parent ' + result['fq_name'][-2]
- return (False, err_msg)
-
- # TODO use compiled RE
- if self._re_match_prop.match(col_name):
- (_, prop_name) = col_name.split(':')
- result[prop_name] = json.loads(obj_cols[col_name][0])
-
- # TODO use compiled RE
- if self._re_match_children.match(col_name):
- (_, child_type, child_uuid) = col_name.split(':')
- if field_names and '%ss' %(child_type) not in field_names:
- continue
-
- child_tstamp = obj_cols[col_name][1]
- try:
- self._read_child(result, obj_uuid, child_type, child_uuid, child_tstamp)
- except cfgm_common.exceptions.NoIdError:
- continue
-
- # TODO use compiled RE
- if self._re_match_ref.match(col_name):
- (_, ref_type, ref_uuid) = col_name.split(':')
- self._read_ref(result, obj_uuid, ref_type, ref_uuid, obj_cols[col_name][0])
-
- if self._re_match_backref.match(col_name):
- (_, back_ref_type, back_ref_uuid) = col_name.split(':')
- if field_names and '%s_back_refs' %(back_ref_type) not in field_names:
- continue
-
- try:
- self._read_back_ref(result, obj_uuid, back_ref_type, back_ref_uuid,
- obj_cols[col_name][0])
- except cfgm_common.exceptions.NoIdError:
- continue
-
- # for all column names
-
-
- results.append(result)
- # end for all rows
-
- return (True, results)
- #end _cassandra_global_vrouter_config_read
-
- def _cassandra_global_vrouter_config_count_children(self, obj_uuid, child_type):
- # if child_type = None, return
- if child_type is None:
- return (False, '')
-
- obj_uuid_cf = self._obj_uuid_cf
- if child_type not in GlobalVrouterConfig.children_fields:
- return (False, '%s is not a valid children of GlobalVrouterConfig' %(child_type))
-
- col_start = 'children:'+child_type[:-1]+':'
- col_finish = 'children:'+child_type[:-1]+';'
- num_children = obj_uuid_cf.get_count(obj_uuid,
- column_start = col_start,
- column_finish = col_finish,
- max_count = 10000000)
- return (True, num_children)
- #end _cassandra_global_vrouter_config_count_children
-
- def _cassandra_global_vrouter_config_update(self, obj_uuid, new_obj_dict):
- # Grab ref-uuids and properties in new version
- new_ref_infos = {}
-
- new_props = {}
- if 'linklocal_services' in new_obj_dict:
- new_props['linklocal_services'] = new_obj_dict['linklocal_services']
- if 'encapsulation_priorities' in new_obj_dict:
- new_props['encapsulation_priorities'] = new_obj_dict['encapsulation_priorities']
- if 'vxlan_network_identifier_mode' in new_obj_dict:
- new_props['vxlan_network_identifier_mode'] = new_obj_dict['vxlan_network_identifier_mode']
- if 'id_perms' in new_obj_dict:
- new_props['id_perms'] = new_obj_dict['id_perms']
- if 'display_name' in new_obj_dict:
- new_props['display_name'] = new_obj_dict['display_name']
- # Gather column values for obj and updates to backrefs
- # in a batch and write it at the end
- obj_uuid_cf = self._obj_uuid_cf
- obj_cols_iter = obj_uuid_cf.xget(obj_uuid)
- # TODO optimize this (converts tuple to dict)
- obj_cols = {}
- for col_info in obj_cols_iter:
- obj_cols[col_info[0]] = col_info[1]
-
- bch = obj_uuid_cf.batch()
- for col_name in obj_cols.keys():
- # TODO use compiled RE
- if re.match('prop:', col_name):
- (_, prop_name) = col_name.split(':')
- if prop_name == 'id_perms':
- # id-perms always has to be updated for last-mod timestamp
- # get it from request dict(or from db if not in request dict)
- new_id_perms = new_obj_dict.get(prop_name, json.loads(obj_cols[col_name]))
- self.update_last_modified(bch, obj_uuid, new_id_perms)
- elif prop_name in new_obj_dict:
- self._update_prop(bch, obj_uuid, prop_name, new_props)
-
- # TODO use compiled RE
- if re.match('ref:', col_name):
- (_, ref_type, ref_uuid) = col_name.split(':')
- self._update_ref(bch, 'global_vrouter_config', obj_uuid, ref_type, ref_uuid, new_ref_infos)
- # for all column names
-
- # create new refs
- for ref_type in new_ref_infos.keys():
- for ref_uuid in new_ref_infos[ref_type].keys():
- ref_data = new_ref_infos[ref_type][ref_uuid]
- self._create_ref(bch, 'global_vrouter_config', obj_uuid, ref_type, ref_uuid, ref_data)
-
- # create new props
- for prop_name in new_props.keys():
- self._create_prop(bch, obj_uuid, prop_name, new_props[prop_name])
-
- bch.send()
-
- return (True, '')
- #end _cassandra_global_vrouter_config_update
-
- def _cassandra_global_vrouter_config_list(self, parent_uuids=None, back_ref_uuids=None,
- obj_uuids=None, count=False, filters=None):
- children_fq_names_uuids = []
- if filters:
- fnames = filters.get('field_names', [])
- fvalues = filters.get('field_values', [])
- filter_fields = [(fnames[i], fvalues[i]) for i in range(len(fnames))]
- else:
- filter_fields = []
-
- def filter_rows(coll_infos, filter_cols, filter_params):
- filt_infos = {}
- coll_rows = obj_uuid_cf.multiget(coll_infos.keys(),
- columns=filter_cols,
- column_count=10000000)
- for row in coll_rows:
- # give chance for zk heartbeat/ping
- gevent.sleep(0)
- full_match = True
- for fname, fval in filter_params:
- if coll_rows[row]['prop:%s' %(fname)] != fval:
- full_match = False
- break
- if full_match:
- filt_infos[row] = coll_infos[row]
- return filt_infos
- # end filter_rows
-
- def get_fq_name_uuid_list(obj_uuids):
- ret_list = []
- for obj_uuid in obj_uuids:
- try:
- obj_fq_name = self.uuid_to_fq_name(obj_uuid)
- ret_list.append((obj_fq_name, obj_uuid))
- except cfgm_common.exceptions.NoIdError:
- pass
- return ret_list
- # end get_fq_name_uuid_list
-
- if parent_uuids:
- # go from parent to child
- obj_uuid_cf = self._obj_uuid_cf
- col_start = 'children:global_vrouter_config:'
- col_fin = 'children:global_vrouter_config;'
- try:
- obj_rows = obj_uuid_cf.multiget(parent_uuids,
- column_start = col_start,
- column_finish = col_fin,
- column_count = 10000000,
- include_timestamp = True)
- except pycassa.NotFoundException:
- if count:
- return (True, 0)
- else:
- return (True, children_fq_names_uuids)
-
- def filter_rows_parent_anchor(sort=False):
- # flatten to [('children:<type>:<uuid>', (<val>,<ts>), *]
- all_cols = [cols for obj_key in obj_rows.keys() for cols in obj_rows[obj_key].items()]
- all_child_infos = {}
- for col_name, col_val_ts in all_cols:
- # give chance for zk heartbeat/ping
- gevent.sleep(0)
- child_uuid = col_name.split(':')[2]
- if obj_uuids and child_uuid not in obj_uuids:
- continue
- all_child_infos[child_uuid] = {'uuid': child_uuid, 'tstamp': col_val_ts[1]}
-
- filter_cols = ['prop:%s' %(fname) for fname, _ in filter_fields]
- if filter_cols:
- filt_child_infos = filter_rows(all_child_infos, filter_cols, filter_fields)
- else: # no filter specified
- filt_child_infos = all_child_infos
-
- if not sort:
- ret_child_infos = filt_child_infos.values()
- else:
- ret_child_infos = sorted(filt_child_infos.values(), key=itemgetter('tstamp'))
-
- return get_fq_name_uuid_list(r['uuid'] for r in ret_child_infos)
- # end filter_rows_parent_anchor
-
- if count:
- return (True, len(filter_rows_parent_anchor()))
-
- children_fq_names_uuids = filter_rows_parent_anchor(sort=True)
-
- if back_ref_uuids:
- # go from anchor to backrefs
- obj_uuid_cf = self._obj_uuid_cf
- col_start = 'backref:global_vrouter_config:'
- col_fin = 'backref:global_vrouter_config;'
- try:
- obj_rows = obj_uuid_cf.multiget(back_ref_uuids,
- column_start = col_start,
- column_finish = col_fin,
- column_count = 10000000,
- include_timestamp = True)
- except pycassa.NotFoundException:
- if count:
- return (True, 0)
- else:
- return (True, children_fq_names_uuids)
-
- def filter_rows_backref_anchor():
- # flatten to [('<fqnstr>:<uuid>', (<val>,<ts>), *]
- all_cols = [cols for obj_key in obj_rows.keys() for cols in obj_rows[obj_key].items()]
- all_backref_infos = {}
- for col_name, col_val_ts in all_cols:
- # give chance for zk heartbeat/ping
- gevent.sleep(0)
- col_name_arr = col_name.split(':')
- fq_name = col_name_arr[:-1]
- obj_uuid = col_name_arr[-1]
- if obj_uuids and obj_uuid not in obj_uuids:
- continue
- all_backref_infos[obj_uuid] = \
- {'uuid': obj_uuid, 'fq_name': fq_name, 'tstamp': col_val_ts[1]}
-
- filter_cols = ['prop:%s' %(fname) for fname, _ in filter_fields]
- if filter_cols:
- filt_backref_infos = filter_rows(all_backref_infos, filter_cols, filter_fields)
- else: # no filter specified
- filt_backref_infos = all_backref_infos
-
- return [(br_info['fq_name'], br_info['uuid']) for br_info in filt_backref_infos.values()]
- # end filter_rows_backref_anchor
-
- if count:
- return (True, len(filter_rows_backref_anchor()))
-
- children_fq_names_uuids = filter_rows_backref_anchor()
-
- if not parent_uuids and not back_ref_uuids:
- obj_uuid_cf = self._obj_uuid_cf
- if obj_uuids:
- # exact objects specified
- def filter_rows_object_list():
- all_obj_infos = {}
- for obj_uuid in obj_uuids:
- all_obj_infos[obj_uuid] = None
-
- filter_cols = ['prop:%s' %(fname) for fname, _ in filter_fields]
- if filter_cols:
- filt_obj_infos = filter_rows(all_obj_infos, filter_cols, filter_fields)
- else: # no filters specified
- filt_obj_infos = all_obj_infos
-
- return get_fq_name_uuid_list(filt_obj_infos.keys())
- # end filter_rows_object_list
-
- if count:
- return (True, len(filter_rows_object_list()))
- children_fq_names_uuids = filter_rows_object_list()
-
- else: # grab all resources of this type
- obj_fq_name_cf = self._obj_fq_name_cf
- try:
- cols = obj_fq_name_cf.get('global_vrouter_config', column_count = 10000000)
- except pycassa.NotFoundException:
- if count:
- return (True, 0)
- else:
- return (True, children_fq_names_uuids)
-
- def filter_rows_no_anchor():
- all_obj_infos = {}
- for col_name, col_val in cols.items():
- # give chance for zk heartbeat/ping
- gevent.sleep(0)
- col_name_arr = utils.decode_string(col_name).split(':')
- obj_uuid = col_name_arr[-1]
- all_obj_infos[obj_uuid] = (col_name_arr[:-1], obj_uuid)
-
- filter_cols = ['prop:%s' %(fname) for fname, _ in filter_fields]
- if filter_cols:
- filt_obj_infos = filter_rows(all_obj_infos, filter_cols, filter_fields)
- else: # no filters specified
- filt_obj_infos = all_obj_infos
-
- return filt_obj_infos.values()
- # end filter_rows_no_anchor
-
- if count:
- return (True, len(filter_rows_no_anchor()))
-
- children_fq_names_uuids = filter_rows_no_anchor()
-
- return (True, children_fq_names_uuids)
- #end _cassandra_global_vrouter_config_list
-
- def _cassandra_global_vrouter_config_delete(self, obj_uuid):
- obj_uuid_cf = self._obj_uuid_cf
- fq_name = json.loads(obj_uuid_cf.get(obj_uuid, columns = ['fq_name'])['fq_name'])
- bch = obj_uuid_cf.batch()
-
- # unlink from parent
- col_start = 'parent:'
- col_fin = 'parent;'
- col_name_iter = obj_uuid_cf.xget(obj_uuid, column_start = col_start, column_finish = col_fin)
- for (col_name, col_val) in col_name_iter:
- (_, parent_type, parent_uuid) = col_name.split(':')
- self._delete_child(bch, parent_type, parent_uuid, 'global_vrouter_config', obj_uuid)
-
- # remove refs
- col_start = 'ref:'
- col_fin = 'ref;'
- col_name_iter = obj_uuid_cf.xget(obj_uuid, column_start = col_start, column_finish = col_fin)
- for (col_name, col_val) in col_name_iter:
- (_, ref_type, ref_uuid) = col_name.split(':')
- self._delete_ref(bch, 'global_vrouter_config', obj_uuid, ref_type, ref_uuid)
-
- bch.remove(obj_uuid)
- bch.send()
-
- # Update fqname table
- fq_name_str = ':'.join(fq_name)
- fq_name_col = utils.encode_string(fq_name_str) + ':' + obj_uuid
- self._obj_fq_name_cf.remove('global_vrouter_config', columns = [fq_name_col])
-
-
- return (True, '')
- #end _cassandra_global_vrouter_config_delete
-
- def _cassandra_instance_ip_alloc(self, fq_name):
- return (True, '')
- #end _cassandra_instance_ip_alloc
-
- def _cassandra_instance_ip_create(self, obj_ids, obj_dict):
- # Gather column values for obj and updates to backrefs
- # in a batch and write it at the end
- bch = self._obj_uuid_cf.batch()
-
- obj_cols = {}
- obj_cols['fq_name'] = json.dumps(obj_dict['fq_name'])
- obj_cols['type'] = json.dumps('instance_ip')
-
- # Properties
- field = obj_dict.get('instance_ip_address', None)
- if field is not None:
- self._create_prop(bch, obj_ids['uuid'], 'instance_ip_address', field)
-
- field = obj_dict.get('instance_ip_family', None)
- if field is not None:
- self._create_prop(bch, obj_ids['uuid'], 'instance_ip_family', field)
-
- field = obj_dict.get('instance_ip_mode', None)
- if field is not None:
- self._create_prop(bch, obj_ids['uuid'], 'instance_ip_mode', field)
-
- field = obj_dict.get('subnet_uuid', None)
- if field is not None:
- self._create_prop(bch, obj_ids['uuid'], 'subnet_uuid', field)
-
- field = obj_dict.get('id_perms', None)
- if field is not None:
- field['created'] = datetime.datetime.utcnow().isoformat()
- field['last_modified'] = field['created']
- self._create_prop(bch, obj_ids['uuid'], 'id_perms', field)
-
- field = obj_dict.get('display_name', None)
- if field is not None:
- self._create_prop(bch, obj_ids['uuid'], 'display_name', field)
-
-
- # References
- refs = obj_dict.get('virtual_network_refs', [])
- for ref in refs:
- ref_uuid = self.fq_name_to_uuid('virtual_network', ref['to'])
- ref_attr = ref.get('attr', None)
- ref_data = {'attr': ref_attr, 'is_weakref': False}
- self._create_ref(bch, 'instance_ip', obj_ids['uuid'], 'virtual_network', ref_uuid, ref_data)
- refs = obj_dict.get('virtual_machine_interface_refs', [])
- for ref in refs:
- ref_uuid = self.fq_name_to_uuid('virtual_machine_interface', ref['to'])
- ref_attr = ref.get('attr', None)
- ref_data = {'attr': ref_attr, 'is_weakref': False}
- self._create_ref(bch, 'instance_ip', obj_ids['uuid'], 'virtual_machine_interface', ref_uuid, ref_data)
-
- bch.insert(obj_ids['uuid'], obj_cols)
- bch.send()
-
- # Update fqname table
- fq_name_str = ':'.join(obj_dict['fq_name'])
- fq_name_cols = {utils.encode_string(fq_name_str) + ':' + obj_ids['uuid']: json.dumps(None)}
- self._obj_fq_name_cf.insert('instance_ip', fq_name_cols)
-
- return (True, '')
- #end _cassandra_instance_ip_create
-
- def _cassandra_instance_ip_read(self, obj_uuids, field_names = None):
- # if field_names = None, all fields will be read/returned
-
- obj_uuid_cf = self._obj_uuid_cf
-
- # optimize for common case of reading non-backref, non-children fields
- # ignoring columns starting from 'b' and 'c' - significant performance
- # impact in scaled setting. e.g. read of project
- if (field_names is None or
- (set(field_names) & (InstanceIp.backref_fields | InstanceIp.children_fields))):
- # atleast one backref/children field is needed
- obj_rows = obj_uuid_cf.multiget(obj_uuids,
- column_count = 10000000,
- include_timestamp = True)
- else: # ignore reading backref + children columns
- obj_rows = obj_uuid_cf.multiget(obj_uuids,
- column_start = 'd',
- column_count = 10000000,
- include_timestamp = True)
-
- if (len(obj_uuids) == 1) and not obj_rows:
- raise cfgm_common.exceptions.NoIdError(obj_uuids[0])
-
- results = []
- for row_key in obj_rows:
- # give chance for zk heartbeat/ping
- gevent.sleep(0)
- obj_uuid = row_key
- obj_cols = obj_rows[obj_uuid]
- result = {}
- result['uuid'] = obj_uuid
- result['fq_name'] = json.loads(obj_cols['fq_name'][0])
- for col_name in obj_cols.keys():
- if self._re_match_parent.match(col_name):
- # non config-root child
- (_, _, parent_uuid) = col_name.split(':')
- parent_type = json.loads(obj_cols['parent_type'][0])
- result['parent_type'] = parent_type
- try:
- result['parent_uuid'] = parent_uuid
- result['parent_href'] = self._generate_url(parent_type, parent_uuid)
- except cfgm_common.exceptions.NoIdError:
- err_msg = 'Unknown uuid for parent ' + result['fq_name'][-2]
- return (False, err_msg)
-
- # TODO use compiled RE
- if self._re_match_prop.match(col_name):
- (_, prop_name) = col_name.split(':')
- result[prop_name] = json.loads(obj_cols[col_name][0])
-
- # TODO use compiled RE
- if self._re_match_children.match(col_name):
- (_, child_type, child_uuid) = col_name.split(':')
- if field_names and '%ss' %(child_type) not in field_names:
- continue
-
- child_tstamp = obj_cols[col_name][1]
- try:
- self._read_child(result, obj_uuid, child_type, child_uuid, child_tstamp)
- except cfgm_common.exceptions.NoIdError:
- continue
-
- # TODO use compiled RE
- if self._re_match_ref.match(col_name):
- (_, ref_type, ref_uuid) = col_name.split(':')
- self._read_ref(result, obj_uuid, ref_type, ref_uuid, obj_cols[col_name][0])
-
- if self._re_match_backref.match(col_name):
- (_, back_ref_type, back_ref_uuid) = col_name.split(':')
- if field_names and '%s_back_refs' %(back_ref_type) not in field_names:
- continue
-
- try:
- self._read_back_ref(result, obj_uuid, back_ref_type, back_ref_uuid,
- obj_cols[col_name][0])
- except cfgm_common.exceptions.NoIdError:
- continue
-
- # for all column names
-
-
- results.append(result)
- # end for all rows
-
- return (True, results)
- #end _cassandra_instance_ip_read
-
- def _cassandra_instance_ip_count_children(self, obj_uuid, child_type):
- # if child_type = None, return
- if child_type is None:
- return (False, '')
-
- obj_uuid_cf = self._obj_uuid_cf
- if child_type not in InstanceIp.children_fields:
- return (False, '%s is not a valid children of InstanceIp' %(child_type))
-
- col_start = 'children:'+child_type[:-1]+':'
- col_finish = 'children:'+child_type[:-1]+';'
- num_children = obj_uuid_cf.get_count(obj_uuid,
- column_start = col_start,
- column_finish = col_finish,
- max_count = 10000000)
- return (True, num_children)
- #end _cassandra_instance_ip_count_children
-
- def _cassandra_instance_ip_update(self, obj_uuid, new_obj_dict):
- # Grab ref-uuids and properties in new version
- new_ref_infos = {}
-
- if 'virtual_network_refs' in new_obj_dict:
- new_ref_infos['virtual_network'] = {}
- new_refs = new_obj_dict['virtual_network_refs']
- if new_refs:
- for new_ref in new_refs:
- new_ref_uuid = self.fq_name_to_uuid('virtual_network', new_ref['to'])
- new_ref_attr = new_ref.get('attr', None)
- new_ref_data = {'attr': new_ref_attr, 'is_weakref': False}
- new_ref_infos['virtual_network'][new_ref_uuid] = new_ref_data
-
- if 'virtual_machine_interface_refs' in new_obj_dict:
- new_ref_infos['virtual_machine_interface'] = {}
- new_refs = new_obj_dict['virtual_machine_interface_refs']
- if new_refs:
- for new_ref in new_refs:
- new_ref_uuid = self.fq_name_to_uuid('virtual_machine_interface', new_ref['to'])
- new_ref_attr = new_ref.get('attr', None)
- new_ref_data = {'attr': new_ref_attr, 'is_weakref': False}
- new_ref_infos['virtual_machine_interface'][new_ref_uuid] = new_ref_data
-
- new_props = {}
- if 'instance_ip_address' in new_obj_dict:
- new_props['instance_ip_address'] = new_obj_dict['instance_ip_address']
- if 'instance_ip_family' in new_obj_dict:
- new_props['instance_ip_family'] = new_obj_dict['instance_ip_family']
- if 'instance_ip_mode' in new_obj_dict:
- new_props['instance_ip_mode'] = new_obj_dict['instance_ip_mode']
- if 'subnet_uuid' in new_obj_dict:
- new_props['subnet_uuid'] = new_obj_dict['subnet_uuid']
- if 'id_perms' in new_obj_dict:
- new_props['id_perms'] = new_obj_dict['id_perms']
- if 'display_name' in new_obj_dict:
- new_props['display_name'] = new_obj_dict['display_name']
- # Gather column values for obj and updates to backrefs
- # in a batch and write it at the end
- obj_uuid_cf = self._obj_uuid_cf
- obj_cols_iter = obj_uuid_cf.xget(obj_uuid)
- # TODO optimize this (converts tuple to dict)
- obj_cols = {}
- for col_info in obj_cols_iter:
- obj_cols[col_info[0]] = col_info[1]
-
- bch = obj_uuid_cf.batch()
- for col_name in obj_cols.keys():
- # TODO use compiled RE
- if re.match('prop:', col_name):
- (_, prop_name) = col_name.split(':')
- if prop_name == 'id_perms':
- # id-perms always has to be updated for last-mod timestamp
- # get it from request dict(or from db if not in request dict)
- new_id_perms = new_obj_dict.get(prop_name, json.loads(obj_cols[col_name]))
- self.update_last_modified(bch, obj_uuid, new_id_perms)
- elif prop_name in new_obj_dict:
- self._update_prop(bch, obj_uuid, prop_name, new_props)
-
- # TODO use compiled RE
- if re.match('ref:', col_name):
- (_, ref_type, ref_uuid) = col_name.split(':')
- self._update_ref(bch, 'instance_ip', obj_uuid, ref_type, ref_uuid, new_ref_infos)
- # for all column names
-
- # create new refs
- for ref_type in new_ref_infos.keys():
- for ref_uuid in new_ref_infos[ref_type].keys():
- ref_data = new_ref_infos[ref_type][ref_uuid]
- self._create_ref(bch, 'instance_ip', obj_uuid, ref_type, ref_uuid, ref_data)
-
- # create new props
- for prop_name in new_props.keys():
- self._create_prop(bch, obj_uuid, prop_name, new_props[prop_name])
-
- bch.send()
-
- return (True, '')
- #end _cassandra_instance_ip_update
-
- def _cassandra_instance_ip_list(self, parent_uuids=None, back_ref_uuids=None,
- obj_uuids=None, count=False, filters=None):
- children_fq_names_uuids = []
- if filters:
- fnames = filters.get('field_names', [])
- fvalues = filters.get('field_values', [])
- filter_fields = [(fnames[i], fvalues[i]) for i in range(len(fnames))]
- else:
- filter_fields = []
-
- def filter_rows(coll_infos, filter_cols, filter_params):
- filt_infos = {}
- coll_rows = obj_uuid_cf.multiget(coll_infos.keys(),
- columns=filter_cols,
- column_count=10000000)
- for row in coll_rows:
- # give chance for zk heartbeat/ping
- gevent.sleep(0)
- full_match = True
- for fname, fval in filter_params:
- if coll_rows[row]['prop:%s' %(fname)] != fval:
- full_match = False
- break
- if full_match:
- filt_infos[row] = coll_infos[row]
- return filt_infos
- # end filter_rows
-
- def get_fq_name_uuid_list(obj_uuids):
- ret_list = []
- for obj_uuid in obj_uuids:
- try:
- obj_fq_name = self.uuid_to_fq_name(obj_uuid)
- ret_list.append((obj_fq_name, obj_uuid))
- except cfgm_common.exceptions.NoIdError:
- pass
- return ret_list
- # end get_fq_name_uuid_list
-
- if parent_uuids:
- # go from parent to child
- obj_uuid_cf = self._obj_uuid_cf
- col_start = 'children:instance_ip:'
- col_fin = 'children:instance_ip;'
- try:
- obj_rows = obj_uuid_cf.multiget(parent_uuids,
- column_start = col_start,
- column_finish = col_fin,
- column_count = 10000000,
- include_timestamp = True)
- except pycassa.NotFoundException:
- if count:
- return (True, 0)
- else:
- return (True, children_fq_names_uuids)
-
- def filter_rows_parent_anchor(sort=False):
- # flatten to [('children:<type>:<uuid>', (<val>,<ts>), *]
- all_cols = [cols for obj_key in obj_rows.keys() for cols in obj_rows[obj_key].items()]
- all_child_infos = {}
- for col_name, col_val_ts in all_cols:
- # give chance for zk heartbeat/ping
- gevent.sleep(0)
- child_uuid = col_name.split(':')[2]
- if obj_uuids and child_uuid not in obj_uuids:
- continue
- all_child_infos[child_uuid] = {'uuid': child_uuid, 'tstamp': col_val_ts[1]}
-
- filter_cols = ['prop:%s' %(fname) for fname, _ in filter_fields]
- if filter_cols:
- filt_child_infos = filter_rows(all_child_infos, filter_cols, filter_fields)
- else: # no filter specified
- filt_child_infos = all_child_infos
-
- if not sort:
- ret_child_infos = filt_child_infos.values()
- else:
- ret_child_infos = sorted(filt_child_infos.values(), key=itemgetter('tstamp'))
-
- return get_fq_name_uuid_list(r['uuid'] for r in ret_child_infos)
- # end filter_rows_parent_anchor
-
- if count:
- return (True, len(filter_rows_parent_anchor()))
-
- children_fq_names_uuids = filter_rows_parent_anchor(sort=True)
-
- if back_ref_uuids:
- # go from anchor to backrefs
- obj_uuid_cf = self._obj_uuid_cf
- col_start = 'backref:instance_ip:'
- col_fin = 'backref:instance_ip;'
- try:
- obj_rows = obj_uuid_cf.multiget(back_ref_uuids,
- column_start = col_start,
- column_finish = col_fin,
- column_count = 10000000,
- include_timestamp = True)
- except pycassa.NotFoundException:
- if count:
- return (True, 0)
- else:
- return (True, children_fq_names_uuids)
-
- def filter_rows_backref_anchor():
- # flatten to [('<fqnstr>:<uuid>', (<val>,<ts>), *]
- all_cols = [cols for obj_key in obj_rows.keys() for cols in obj_rows[obj_key].items()]
- all_backref_infos = {}
- for col_name, col_val_ts in all_cols:
- # give chance for zk heartbeat/ping
- gevent.sleep(0)
- col_name_arr = col_name.split(':')
- fq_name = col_name_arr[:-1]
- obj_uuid = col_name_arr[-1]
- if obj_uuids and obj_uuid not in obj_uuids:
- continue
- all_backref_infos[obj_uuid] = \
- {'uuid': obj_uuid, 'fq_name': fq_name, 'tstamp': col_val_ts[1]}
-
- filter_cols = ['prop:%s' %(fname) for fname, _ in filter_fields]
- if filter_cols:
- filt_backref_infos = filter_rows(all_backref_infos, filter_cols, filter_fields)
- else: # no filter specified
- filt_backref_infos = all_backref_infos
-
- return [(br_info['fq_name'], br_info['uuid']) for br_info in filt_backref_infos.values()]
- # end filter_rows_backref_anchor
-
- if count:
- return (True, len(filter_rows_backref_anchor()))
-
- children_fq_names_uuids = filter_rows_backref_anchor()
-
- if not parent_uuids and not back_ref_uuids:
- obj_uuid_cf = self._obj_uuid_cf
- if obj_uuids:
- # exact objects specified
- def filter_rows_object_list():
- all_obj_infos = {}
- for obj_uuid in obj_uuids:
- all_obj_infos[obj_uuid] = None
-
- filter_cols = ['prop:%s' %(fname) for fname, _ in filter_fields]
- if filter_cols:
- filt_obj_infos = filter_rows(all_obj_infos, filter_cols, filter_fields)
- else: # no filters specified
- filt_obj_infos = all_obj_infos
-
- return get_fq_name_uuid_list(filt_obj_infos.keys())
- # end filter_rows_object_list
-
- if count:
- return (True, len(filter_rows_object_list()))
- children_fq_names_uuids = filter_rows_object_list()
-
- else: # grab all resources of this type
- obj_fq_name_cf = self._obj_fq_name_cf
- try:
- cols = obj_fq_name_cf.get('instance_ip', column_count = 10000000)
- except pycassa.NotFoundException:
- if count:
- return (True, 0)
- else:
- return (True, children_fq_names_uuids)
-
- def filter_rows_no_anchor():
- all_obj_infos = {}
- for col_name, col_val in cols.items():
- # give chance for zk heartbeat/ping
- gevent.sleep(0)
- col_name_arr = utils.decode_string(col_name).split(':')
- obj_uuid = col_name_arr[-1]
- all_obj_infos[obj_uuid] = (col_name_arr[:-1], obj_uuid)
-
- filter_cols = ['prop:%s' %(fname) for fname, _ in filter_fields]
- if filter_cols:
- filt_obj_infos = filter_rows(all_obj_infos, filter_cols, filter_fields)
- else: # no filters specified
- filt_obj_infos = all_obj_infos
-
- return filt_obj_infos.values()
- # end filter_rows_no_anchor
-
- if count:
- return (True, len(filter_rows_no_anchor()))
-
- children_fq_names_uuids = filter_rows_no_anchor()
-
- return (True, children_fq_names_uuids)
- #end _cassandra_instance_ip_list
-
- def _cassandra_instance_ip_delete(self, obj_uuid):
- obj_uuid_cf = self._obj_uuid_cf
- fq_name = json.loads(obj_uuid_cf.get(obj_uuid, columns = ['fq_name'])['fq_name'])
- bch = obj_uuid_cf.batch()
-
- # unlink from parent
- col_start = 'parent:'
- col_fin = 'parent;'
- col_name_iter = obj_uuid_cf.xget(obj_uuid, column_start = col_start, column_finish = col_fin)
- for (col_name, col_val) in col_name_iter:
- (_, parent_type, parent_uuid) = col_name.split(':')
- self._delete_child(bch, parent_type, parent_uuid, 'instance_ip', obj_uuid)
-
- # remove refs
- col_start = 'ref:'
- col_fin = 'ref;'
- col_name_iter = obj_uuid_cf.xget(obj_uuid, column_start = col_start, column_finish = col_fin)
- for (col_name, col_val) in col_name_iter:
- (_, ref_type, ref_uuid) = col_name.split(':')
- self._delete_ref(bch, 'instance_ip', obj_uuid, ref_type, ref_uuid)
-
- bch.remove(obj_uuid)
- bch.send()
-
- # Update fqname table
- fq_name_str = ':'.join(fq_name)
- fq_name_col = utils.encode_string(fq_name_str) + ':' + obj_uuid
- self._obj_fq_name_cf.remove('instance_ip', columns = [fq_name_col])
-
-
- return (True, '')
- #end _cassandra_instance_ip_delete
-
- def _cassandra_network_policy_alloc(self, fq_name):
- return (True, '')
- #end _cassandra_network_policy_alloc
-
- def _cassandra_network_policy_create(self, obj_ids, obj_dict):
- # Gather column values for obj and updates to backrefs
- # in a batch and write it at the end
- bch = self._obj_uuid_cf.batch()
-
- obj_cols = {}
- obj_cols['fq_name'] = json.dumps(obj_dict['fq_name'])
- obj_cols['type'] = json.dumps('network_policy')
- if 'parent_type' in obj_dict:
- # non config-root child
- parent_type = obj_dict['parent_type']
- parent_method_type = parent_type.replace('-', '_')
- parent_fq_name = obj_dict['fq_name'][:-1]
- obj_cols['parent_type'] = json.dumps(parent_type)
- parent_uuid = self.fq_name_to_uuid(parent_method_type, parent_fq_name)
- self._create_child(bch, parent_method_type, parent_uuid, 'network_policy', obj_ids['uuid'])
-
- # Properties
- field = obj_dict.get('network_policy_entries', None)
- if field is not None:
- self._create_prop(bch, obj_ids['uuid'], 'network_policy_entries', field)
-
- field = obj_dict.get('id_perms', None)
- if field is not None:
- field['created'] = datetime.datetime.utcnow().isoformat()
- field['last_modified'] = field['created']
- self._create_prop(bch, obj_ids['uuid'], 'id_perms', field)
-
- field = obj_dict.get('display_name', None)
- if field is not None:
- self._create_prop(bch, obj_ids['uuid'], 'display_name', field)
-
-
- # References
-
- bch.insert(obj_ids['uuid'], obj_cols)
- bch.send()
-
- # Update fqname table
- fq_name_str = ':'.join(obj_dict['fq_name'])
- fq_name_cols = {utils.encode_string(fq_name_str) + ':' + obj_ids['uuid']: json.dumps(None)}
- self._obj_fq_name_cf.insert('network_policy', fq_name_cols)
-
- return (True, '')
- #end _cassandra_network_policy_create
-
- def _cassandra_network_policy_read(self, obj_uuids, field_names = None):
- # if field_names = None, all fields will be read/returned
-
- obj_uuid_cf = self._obj_uuid_cf
-
- # optimize for common case of reading non-backref, non-children fields
- # ignoring columns starting from 'b' and 'c' - significant performance
- # impact in scaled setting. e.g. read of project
- if (field_names is None or
- (set(field_names) & (NetworkPolicy.backref_fields | NetworkPolicy.children_fields))):
- # atleast one backref/children field is needed
- obj_rows = obj_uuid_cf.multiget(obj_uuids,
- column_count = 10000000,
- include_timestamp = True)
- else: # ignore reading backref + children columns
- obj_rows = obj_uuid_cf.multiget(obj_uuids,
- column_start = 'd',
- column_count = 10000000,
- include_timestamp = True)
-
- if (len(obj_uuids) == 1) and not obj_rows:
- raise cfgm_common.exceptions.NoIdError(obj_uuids[0])
-
- results = []
- for row_key in obj_rows:
- # give chance for zk heartbeat/ping
- gevent.sleep(0)
- obj_uuid = row_key
- obj_cols = obj_rows[obj_uuid]
- result = {}
- result['uuid'] = obj_uuid
- result['fq_name'] = json.loads(obj_cols['fq_name'][0])
- for col_name in obj_cols.keys():
- if self._re_match_parent.match(col_name):
- # non config-root child
- (_, _, parent_uuid) = col_name.split(':')
- parent_type = json.loads(obj_cols['parent_type'][0])
- result['parent_type'] = parent_type
- try:
- result['parent_uuid'] = parent_uuid
- result['parent_href'] = self._generate_url(parent_type, parent_uuid)
- except cfgm_common.exceptions.NoIdError:
- err_msg = 'Unknown uuid for parent ' + result['fq_name'][-2]
- return (False, err_msg)
-
- # TODO use compiled RE
- if self._re_match_prop.match(col_name):
- (_, prop_name) = col_name.split(':')
- result[prop_name] = json.loads(obj_cols[col_name][0])
-
- # TODO use compiled RE
- if self._re_match_children.match(col_name):
- (_, child_type, child_uuid) = col_name.split(':')
- if field_names and '%ss' %(child_type) not in field_names:
- continue
-
- child_tstamp = obj_cols[col_name][1]
- try:
- self._read_child(result, obj_uuid, child_type, child_uuid, child_tstamp)
- except cfgm_common.exceptions.NoIdError:
- continue
-
- # TODO use compiled RE
- if self._re_match_ref.match(col_name):
- (_, ref_type, ref_uuid) = col_name.split(':')
- self._read_ref(result, obj_uuid, ref_type, ref_uuid, obj_cols[col_name][0])
-
- if self._re_match_backref.match(col_name):
- (_, back_ref_type, back_ref_uuid) = col_name.split(':')
- if field_names and '%s_back_refs' %(back_ref_type) not in field_names:
- continue
-
- try:
- self._read_back_ref(result, obj_uuid, back_ref_type, back_ref_uuid,
- obj_cols[col_name][0])
- except cfgm_common.exceptions.NoIdError:
- continue
-
- # for all column names
-
-
- results.append(result)
- # end for all rows
-
- return (True, results)
- #end _cassandra_network_policy_read
-
- def _cassandra_network_policy_count_children(self, obj_uuid, child_type):
- # if child_type = None, return
- if child_type is None:
- return (False, '')
-
- obj_uuid_cf = self._obj_uuid_cf
- if child_type not in NetworkPolicy.children_fields:
- return (False, '%s is not a valid children of NetworkPolicy' %(child_type))
-
- col_start = 'children:'+child_type[:-1]+':'
- col_finish = 'children:'+child_type[:-1]+';'
- num_children = obj_uuid_cf.get_count(obj_uuid,
- column_start = col_start,
- column_finish = col_finish,
- max_count = 10000000)
- return (True, num_children)
- #end _cassandra_network_policy_count_children
-
- def _cassandra_network_policy_update(self, obj_uuid, new_obj_dict):
- # Grab ref-uuids and properties in new version
- new_ref_infos = {}
-
- new_props = {}
- if 'network_policy_entries' in new_obj_dict:
- new_props['network_policy_entries'] = new_obj_dict['network_policy_entries']
- if 'id_perms' in new_obj_dict:
- new_props['id_perms'] = new_obj_dict['id_perms']
- if 'display_name' in new_obj_dict:
- new_props['display_name'] = new_obj_dict['display_name']
- # Gather column values for obj and updates to backrefs
- # in a batch and write it at the end
- obj_uuid_cf = self._obj_uuid_cf
- obj_cols_iter = obj_uuid_cf.xget(obj_uuid)
- # TODO optimize this (converts tuple to dict)
- obj_cols = {}
- for col_info in obj_cols_iter:
- obj_cols[col_info[0]] = col_info[1]
-
- bch = obj_uuid_cf.batch()
- for col_name in obj_cols.keys():
- # TODO use compiled RE
- if re.match('prop:', col_name):
- (_, prop_name) = col_name.split(':')
- if prop_name == 'id_perms':
- # id-perms always has to be updated for last-mod timestamp
- # get it from request dict(or from db if not in request dict)
- new_id_perms = new_obj_dict.get(prop_name, json.loads(obj_cols[col_name]))
- self.update_last_modified(bch, obj_uuid, new_id_perms)
- elif prop_name in new_obj_dict:
- self._update_prop(bch, obj_uuid, prop_name, new_props)
-
- # TODO use compiled RE
- if re.match('ref:', col_name):
- (_, ref_type, ref_uuid) = col_name.split(':')
- self._update_ref(bch, 'network_policy', obj_uuid, ref_type, ref_uuid, new_ref_infos)
- # for all column names
-
- # create new refs
- for ref_type in new_ref_infos.keys():
- for ref_uuid in new_ref_infos[ref_type].keys():
- ref_data = new_ref_infos[ref_type][ref_uuid]
- self._create_ref(bch, 'network_policy', obj_uuid, ref_type, ref_uuid, ref_data)
-
- # create new props
- for prop_name in new_props.keys():
- self._create_prop(bch, obj_uuid, prop_name, new_props[prop_name])
-
- bch.send()
-
- return (True, '')
- #end _cassandra_network_policy_update
-
- def _cassandra_network_policy_list(self, parent_uuids=None, back_ref_uuids=None,
- obj_uuids=None, count=False, filters=None):
- children_fq_names_uuids = []
- if filters:
- fnames = filters.get('field_names', [])
- fvalues = filters.get('field_values', [])
- filter_fields = [(fnames[i], fvalues[i]) for i in range(len(fnames))]
- else:
- filter_fields = []
-
- def filter_rows(coll_infos, filter_cols, filter_params):
- filt_infos = {}
- coll_rows = obj_uuid_cf.multiget(coll_infos.keys(),
- columns=filter_cols,
- column_count=10000000)
- for row in coll_rows:
- # give chance for zk heartbeat/ping
- gevent.sleep(0)
- full_match = True
- for fname, fval in filter_params:
- if coll_rows[row]['prop:%s' %(fname)] != fval:
- full_match = False
- break
- if full_match:
- filt_infos[row] = coll_infos[row]
- return filt_infos
- # end filter_rows
-
- def get_fq_name_uuid_list(obj_uuids):
- ret_list = []
- for obj_uuid in obj_uuids:
- try:
- obj_fq_name = self.uuid_to_fq_name(obj_uuid)
- ret_list.append((obj_fq_name, obj_uuid))
- except cfgm_common.exceptions.NoIdError:
- pass
- return ret_list
- # end get_fq_name_uuid_list
-
- if parent_uuids:
- # go from parent to child
- obj_uuid_cf = self._obj_uuid_cf
- col_start = 'children:network_policy:'
- col_fin = 'children:network_policy;'
- try:
- obj_rows = obj_uuid_cf.multiget(parent_uuids,
- column_start = col_start,
- column_finish = col_fin,
- column_count = 10000000,
- include_timestamp = True)
- except pycassa.NotFoundException:
- if count:
- return (True, 0)
- else:
- return (True, children_fq_names_uuids)
-
- def filter_rows_parent_anchor(sort=False):
- # flatten to [('children:<type>:<uuid>', (<val>,<ts>), *]
- all_cols = [cols for obj_key in obj_rows.keys() for cols in obj_rows[obj_key].items()]
- all_child_infos = {}
- for col_name, col_val_ts in all_cols:
- # give chance for zk heartbeat/ping
- gevent.sleep(0)
- child_uuid = col_name.split(':')[2]
- if obj_uuids and child_uuid not in obj_uuids:
- continue
- all_child_infos[child_uuid] = {'uuid': child_uuid, 'tstamp': col_val_ts[1]}
-
- filter_cols = ['prop:%s' %(fname) for fname, _ in filter_fields]
- if filter_cols:
- filt_child_infos = filter_rows(all_child_infos, filter_cols, filter_fields)
- else: # no filter specified
- filt_child_infos = all_child_infos
-
- if not sort:
- ret_child_infos = filt_child_infos.values()
- else:
- ret_child_infos = sorted(filt_child_infos.values(), key=itemgetter('tstamp'))
-
- return get_fq_name_uuid_list(r['uuid'] for r in ret_child_infos)
- # end filter_rows_parent_anchor
-
- if count:
- return (True, len(filter_rows_parent_anchor()))
-
- children_fq_names_uuids = filter_rows_parent_anchor(sort=True)
-
- if back_ref_uuids:
- # go from anchor to backrefs
- obj_uuid_cf = self._obj_uuid_cf
- col_start = 'backref:network_policy:'
- col_fin = 'backref:network_policy;'
- try:
- obj_rows = obj_uuid_cf.multiget(back_ref_uuids,
- column_start = col_start,
- column_finish = col_fin,
- column_count = 10000000,
- include_timestamp = True)
- except pycassa.NotFoundException:
- if count:
- return (True, 0)
- else:
- return (True, children_fq_names_uuids)
-
- def filter_rows_backref_anchor():
- # flatten to [('<fqnstr>:<uuid>', (<val>,<ts>), *]
- all_cols = [cols for obj_key in obj_rows.keys() for cols in obj_rows[obj_key].items()]
- all_backref_infos = {}
- for col_name, col_val_ts in all_cols:
- # give chance for zk heartbeat/ping
- gevent.sleep(0)
- col_name_arr = col_name.split(':')
- fq_name = col_name_arr[:-1]
- obj_uuid = col_name_arr[-1]
- if obj_uuids and obj_uuid not in obj_uuids:
- continue
- all_backref_infos[obj_uuid] = \
- {'uuid': obj_uuid, 'fq_name': fq_name, 'tstamp': col_val_ts[1]}
-
- filter_cols = ['prop:%s' %(fname) for fname, _ in filter_fields]
- if filter_cols:
- filt_backref_infos = filter_rows(all_backref_infos, filter_cols, filter_fields)
- else: # no filter specified
- filt_backref_infos = all_backref_infos
-
- return [(br_info['fq_name'], br_info['uuid']) for br_info in filt_backref_infos.values()]
- # end filter_rows_backref_anchor
-
- if count:
- return (True, len(filter_rows_backref_anchor()))
-
- children_fq_names_uuids = filter_rows_backref_anchor()
-
- if not parent_uuids and not back_ref_uuids:
- obj_uuid_cf = self._obj_uuid_cf
- if obj_uuids:
- # exact objects specified
- def filter_rows_object_list():
- all_obj_infos = {}
- for obj_uuid in obj_uuids:
- all_obj_infos[obj_uuid] = None
-
- filter_cols = ['prop:%s' %(fname) for fname, _ in filter_fields]
- if filter_cols:
- filt_obj_infos = filter_rows(all_obj_infos, filter_cols, filter_fields)
- else: # no filters specified
- filt_obj_infos = all_obj_infos
-
- return get_fq_name_uuid_list(filt_obj_infos.keys())
- # end filter_rows_object_list
-
- if count:
- return (True, len(filter_rows_object_list()))
- children_fq_names_uuids = filter_rows_object_list()
-
- else: # grab all resources of this type
- obj_fq_name_cf = self._obj_fq_name_cf
- try:
- cols = obj_fq_name_cf.get('network_policy', column_count = 10000000)
- except pycassa.NotFoundException:
- if count:
- return (True, 0)
- else:
- return (True, children_fq_names_uuids)
-
- def filter_rows_no_anchor():
- all_obj_infos = {}
- for col_name, col_val in cols.items():
- # give chance for zk heartbeat/ping
- gevent.sleep(0)
- col_name_arr = utils.decode_string(col_name).split(':')
- obj_uuid = col_name_arr[-1]
- all_obj_infos[obj_uuid] = (col_name_arr[:-1], obj_uuid)
-
- filter_cols = ['prop:%s' %(fname) for fname, _ in filter_fields]
- if filter_cols:
- filt_obj_infos = filter_rows(all_obj_infos, filter_cols, filter_fields)
- else: # no filters specified
- filt_obj_infos = all_obj_infos
-
- return filt_obj_infos.values()
- # end filter_rows_no_anchor
-
- if count:
- return (True, len(filter_rows_no_anchor()))
-
- children_fq_names_uuids = filter_rows_no_anchor()
-
- return (True, children_fq_names_uuids)
- #end _cassandra_network_policy_list
-
- def _cassandra_network_policy_delete(self, obj_uuid):
- obj_uuid_cf = self._obj_uuid_cf
- fq_name = json.loads(obj_uuid_cf.get(obj_uuid, columns = ['fq_name'])['fq_name'])
- bch = obj_uuid_cf.batch()
-
- # unlink from parent
- col_start = 'parent:'
- col_fin = 'parent;'
- col_name_iter = obj_uuid_cf.xget(obj_uuid, column_start = col_start, column_finish = col_fin)
- for (col_name, col_val) in col_name_iter:
- (_, parent_type, parent_uuid) = col_name.split(':')
- self._delete_child(bch, parent_type, parent_uuid, 'network_policy', obj_uuid)
-
- # remove refs
- col_start = 'ref:'
- col_fin = 'ref;'
- col_name_iter = obj_uuid_cf.xget(obj_uuid, column_start = col_start, column_finish = col_fin)
- for (col_name, col_val) in col_name_iter:
- (_, ref_type, ref_uuid) = col_name.split(':')
- self._delete_ref(bch, 'network_policy', obj_uuid, ref_type, ref_uuid)
-
- bch.remove(obj_uuid)
- bch.send()
-
- # Update fqname table
- fq_name_str = ':'.join(fq_name)
- fq_name_col = utils.encode_string(fq_name_str) + ':' + obj_uuid
- self._obj_fq_name_cf.remove('network_policy', columns = [fq_name_col])
-
-
- return (True, '')
- #end _cassandra_network_policy_delete
-
- def _cassandra_loadbalancer_pool_alloc(self, fq_name):
- return (True, '')
- #end _cassandra_loadbalancer_pool_alloc
-
- def _cassandra_loadbalancer_pool_create(self, obj_ids, obj_dict):
- # Gather column values for obj and updates to backrefs
- # in a batch and write it at the end
- bch = self._obj_uuid_cf.batch()
-
- obj_cols = {}
- obj_cols['fq_name'] = json.dumps(obj_dict['fq_name'])
- obj_cols['type'] = json.dumps('loadbalancer_pool')
- if 'parent_type' in obj_dict:
- # non config-root child
- parent_type = obj_dict['parent_type']
- parent_method_type = parent_type.replace('-', '_')
- parent_fq_name = obj_dict['fq_name'][:-1]
- obj_cols['parent_type'] = json.dumps(parent_type)
- parent_uuid = self.fq_name_to_uuid(parent_method_type, parent_fq_name)
- self._create_child(bch, parent_method_type, parent_uuid, 'loadbalancer_pool', obj_ids['uuid'])
-
- # Properties
- field = obj_dict.get('loadbalancer_pool_properties', None)
- if field is not None:
- self._create_prop(bch, obj_ids['uuid'], 'loadbalancer_pool_properties', field)
-
- field = obj_dict.get('loadbalancer_pool_provider', None)
- if field is not None:
- self._create_prop(bch, obj_ids['uuid'], 'loadbalancer_pool_provider', field)
-
- field = obj_dict.get('id_perms', None)
- if field is not None:
- field['created'] = datetime.datetime.utcnow().isoformat()
- field['last_modified'] = field['created']
- self._create_prop(bch, obj_ids['uuid'], 'id_perms', field)
-
- field = obj_dict.get('display_name', None)
- if field is not None:
- self._create_prop(bch, obj_ids['uuid'], 'display_name', field)
-
-
- # References
- refs = obj_dict.get('service_instance_refs', [])
- for ref in refs:
- ref_uuid = self.fq_name_to_uuid('service_instance', ref['to'])
- ref_attr = ref.get('attr', None)
- ref_data = {'attr': ref_attr, 'is_weakref': False}
- self._create_ref(bch, 'loadbalancer_pool', obj_ids['uuid'], 'service_instance', ref_uuid, ref_data)
- refs = obj_dict.get('virtual_machine_interface_refs', [])
- for ref in refs:
- ref_uuid = self.fq_name_to_uuid('virtual_machine_interface', ref['to'])
- ref_attr = ref.get('attr', None)
- ref_data = {'attr': ref_attr, 'is_weakref': False}
- self._create_ref(bch, 'loadbalancer_pool', obj_ids['uuid'], 'virtual_machine_interface', ref_uuid, ref_data)
- refs = obj_dict.get('service_appliance_set_refs', [])
- for ref in refs:
- ref_uuid = self.fq_name_to_uuid('service_appliance_set', ref['to'])
- ref_attr = ref.get('attr', None)
- ref_data = {'attr': ref_attr, 'is_weakref': False}
- self._create_ref(bch, 'loadbalancer_pool', obj_ids['uuid'], 'service_appliance_set', ref_uuid, ref_data)
- refs = obj_dict.get('loadbalancer_healthmonitor_refs', [])
- for ref in refs:
- ref_uuid = self.fq_name_to_uuid('loadbalancer_healthmonitor', ref['to'])
- ref_attr = ref.get('attr', None)
- ref_data = {'attr': ref_attr, 'is_weakref': False}
- self._create_ref(bch, 'loadbalancer_pool', obj_ids['uuid'], 'loadbalancer_healthmonitor', ref_uuid, ref_data)
-
- bch.insert(obj_ids['uuid'], obj_cols)
- bch.send()
-
- # Update fqname table
- fq_name_str = ':'.join(obj_dict['fq_name'])
- fq_name_cols = {utils.encode_string(fq_name_str) + ':' + obj_ids['uuid']: json.dumps(None)}
- self._obj_fq_name_cf.insert('loadbalancer_pool', fq_name_cols)
-
- return (True, '')
- #end _cassandra_loadbalancer_pool_create
-
- def _cassandra_loadbalancer_pool_read(self, obj_uuids, field_names = None):
- # if field_names = None, all fields will be read/returned
-
- obj_uuid_cf = self._obj_uuid_cf
-
- # optimize for common case of reading non-backref, non-children fields
- # ignoring columns starting from 'b' and 'c' - significant performance
- # impact in scaled setting. e.g. read of project
- if (field_names is None or
- (set(field_names) & (LoadbalancerPool.backref_fields | LoadbalancerPool.children_fields))):
- # atleast one backref/children field is needed
- obj_rows = obj_uuid_cf.multiget(obj_uuids,
- column_count = 10000000,
- include_timestamp = True)
- else: # ignore reading backref + children columns
- obj_rows = obj_uuid_cf.multiget(obj_uuids,
- column_start = 'd',
- column_count = 10000000,
- include_timestamp = True)
-
- if (len(obj_uuids) == 1) and not obj_rows:
- raise cfgm_common.exceptions.NoIdError(obj_uuids[0])
-
- results = []
- for row_key in obj_rows:
- # give chance for zk heartbeat/ping
- gevent.sleep(0)
- obj_uuid = row_key
- obj_cols = obj_rows[obj_uuid]
- result = {}
- result['uuid'] = obj_uuid
- result['fq_name'] = json.loads(obj_cols['fq_name'][0])
- for col_name in obj_cols.keys():
- if self._re_match_parent.match(col_name):
- # non config-root child
- (_, _, parent_uuid) = col_name.split(':')
- parent_type = json.loads(obj_cols['parent_type'][0])
- result['parent_type'] = parent_type
- try:
- result['parent_uuid'] = parent_uuid
- result['parent_href'] = self._generate_url(parent_type, parent_uuid)
- except cfgm_common.exceptions.NoIdError:
- err_msg = 'Unknown uuid for parent ' + result['fq_name'][-2]
- return (False, err_msg)
-
- # TODO use compiled RE
- if self._re_match_prop.match(col_name):
- (_, prop_name) = col_name.split(':')
- result[prop_name] = json.loads(obj_cols[col_name][0])
-
- # TODO use compiled RE
- if self._re_match_children.match(col_name):
- (_, child_type, child_uuid) = col_name.split(':')
- if field_names and '%ss' %(child_type) not in field_names:
- continue
-
- child_tstamp = obj_cols[col_name][1]
- try:
- self._read_child(result, obj_uuid, child_type, child_uuid, child_tstamp)
- except cfgm_common.exceptions.NoIdError:
- continue
-
- # TODO use compiled RE
- if self._re_match_ref.match(col_name):
- (_, ref_type, ref_uuid) = col_name.split(':')
- self._read_ref(result, obj_uuid, ref_type, ref_uuid, obj_cols[col_name][0])
-
- if self._re_match_backref.match(col_name):
- (_, back_ref_type, back_ref_uuid) = col_name.split(':')
- if field_names and '%s_back_refs' %(back_ref_type) not in field_names:
- continue
-
- try:
- self._read_back_ref(result, obj_uuid, back_ref_type, back_ref_uuid,
- obj_cols[col_name][0])
- except cfgm_common.exceptions.NoIdError:
- continue
-
- # for all column names
-
- if 'loadbalancer_members' in result:
- # sort children; TODO do this based on schema
- sorted_children = sorted(result['loadbalancer_members'], key = itemgetter('tstamp'))
- # re-write result's children without timestamp
- result['loadbalancer_members'] = sorted_children
- [child.pop('tstamp') for child in result['loadbalancer_members']]
-
- results.append(result)
- # end for all rows
-
- return (True, results)
- #end _cassandra_loadbalancer_pool_read
-
- def _cassandra_loadbalancer_pool_count_children(self, obj_uuid, child_type):
- # if child_type = None, return
- if child_type is None:
- return (False, '')
-
- obj_uuid_cf = self._obj_uuid_cf
- if child_type not in LoadbalancerPool.children_fields:
- return (False, '%s is not a valid children of LoadbalancerPool' %(child_type))
-
- col_start = 'children:'+child_type[:-1]+':'
- col_finish = 'children:'+child_type[:-1]+';'
- num_children = obj_uuid_cf.get_count(obj_uuid,
- column_start = col_start,
- column_finish = col_finish,
- max_count = 10000000)
- return (True, num_children)
- #end _cassandra_loadbalancer_pool_count_children
-
- def _cassandra_loadbalancer_pool_update(self, obj_uuid, new_obj_dict):
- # Grab ref-uuids and properties in new version
- new_ref_infos = {}
-
- if 'service_instance_refs' in new_obj_dict:
- new_ref_infos['service_instance'] = {}
- new_refs = new_obj_dict['service_instance_refs']
- if new_refs:
- for new_ref in new_refs:
- new_ref_uuid = self.fq_name_to_uuid('service_instance', new_ref['to'])
- new_ref_attr = new_ref.get('attr', None)
- new_ref_data = {'attr': new_ref_attr, 'is_weakref': False}
- new_ref_infos['service_instance'][new_ref_uuid] = new_ref_data
-
- if 'virtual_machine_interface_refs' in new_obj_dict:
- new_ref_infos['virtual_machine_interface'] = {}
- new_refs = new_obj_dict['virtual_machine_interface_refs']
- if new_refs:
- for new_ref in new_refs:
- new_ref_uuid = self.fq_name_to_uuid('virtual_machine_interface', new_ref['to'])
- new_ref_attr = new_ref.get('attr', None)
- new_ref_data = {'attr': new_ref_attr, 'is_weakref': False}
- new_ref_infos['virtual_machine_interface'][new_ref_uuid] = new_ref_data
-
- if 'service_appliance_set_refs' in new_obj_dict:
- new_ref_infos['service_appliance_set'] = {}
- new_refs = new_obj_dict['service_appliance_set_refs']
- if new_refs:
- for new_ref in new_refs:
- new_ref_uuid = self.fq_name_to_uuid('service_appliance_set', new_ref['to'])
- new_ref_attr = new_ref.get('attr', None)
- new_ref_data = {'attr': new_ref_attr, 'is_weakref': False}
- new_ref_infos['service_appliance_set'][new_ref_uuid] = new_ref_data
-
- if 'loadbalancer_healthmonitor_refs' in new_obj_dict:
- new_ref_infos['loadbalancer_healthmonitor'] = {}
- new_refs = new_obj_dict['loadbalancer_healthmonitor_refs']
- if new_refs:
- for new_ref in new_refs:
- new_ref_uuid = self.fq_name_to_uuid('loadbalancer_healthmonitor', new_ref['to'])
- new_ref_attr = new_ref.get('attr', None)
- new_ref_data = {'attr': new_ref_attr, 'is_weakref': False}
- new_ref_infos['loadbalancer_healthmonitor'][new_ref_uuid] = new_ref_data
-
- new_props = {}
- if 'loadbalancer_pool_properties' in new_obj_dict:
- new_props['loadbalancer_pool_properties'] = new_obj_dict['loadbalancer_pool_properties']
- if 'loadbalancer_pool_provider' in new_obj_dict:
- new_props['loadbalancer_pool_provider'] = new_obj_dict['loadbalancer_pool_provider']
- if 'id_perms' in new_obj_dict:
- new_props['id_perms'] = new_obj_dict['id_perms']
- if 'display_name' in new_obj_dict:
- new_props['display_name'] = new_obj_dict['display_name']
- # Gather column values for obj and updates to backrefs
- # in a batch and write it at the end
- obj_uuid_cf = self._obj_uuid_cf
- obj_cols_iter = obj_uuid_cf.xget(obj_uuid)
- # TODO optimize this (converts tuple to dict)
- obj_cols = {}
- for col_info in obj_cols_iter:
- obj_cols[col_info[0]] = col_info[1]
-
- bch = obj_uuid_cf.batch()
- for col_name in obj_cols.keys():
- # TODO use compiled RE
- if re.match('prop:', col_name):
- (_, prop_name) = col_name.split(':')
- if prop_name == 'id_perms':
- # id-perms always has to be updated for last-mod timestamp
- # get it from request dict(or from db if not in request dict)
- new_id_perms = new_obj_dict.get(prop_name, json.loads(obj_cols[col_name]))
- self.update_last_modified(bch, obj_uuid, new_id_perms)
- elif prop_name in new_obj_dict:
- self._update_prop(bch, obj_uuid, prop_name, new_props)
-
- # TODO use compiled RE
- if re.match('ref:', col_name):
- (_, ref_type, ref_uuid) = col_name.split(':')
- self._update_ref(bch, 'loadbalancer_pool', obj_uuid, ref_type, ref_uuid, new_ref_infos)
- # for all column names
-
- # create new refs
- for ref_type in new_ref_infos.keys():
- for ref_uuid in new_ref_infos[ref_type].keys():
- ref_data = new_ref_infos[ref_type][ref_uuid]
- self._create_ref(bch, 'loadbalancer_pool', obj_uuid, ref_type, ref_uuid, ref_data)
-
- # create new props
- for prop_name in new_props.keys():
- self._create_prop(bch, obj_uuid, prop_name, new_props[prop_name])
-
- bch.send()
-
- return (True, '')
- #end _cassandra_loadbalancer_pool_update
-
- def _cassandra_loadbalancer_pool_list(self, parent_uuids=None, back_ref_uuids=None,
- obj_uuids=None, count=False, filters=None):
- children_fq_names_uuids = []
- if filters:
- fnames = filters.get('field_names', [])
- fvalues = filters.get('field_values', [])
- filter_fields = [(fnames[i], fvalues[i]) for i in range(len(fnames))]
- else:
- filter_fields = []
-
- def filter_rows(coll_infos, filter_cols, filter_params):
- filt_infos = {}
- coll_rows = obj_uuid_cf.multiget(coll_infos.keys(),
- columns=filter_cols,
- column_count=10000000)
- for row in coll_rows:
- # give chance for zk heartbeat/ping
- gevent.sleep(0)
- full_match = True
- for fname, fval in filter_params:
- if coll_rows[row]['prop:%s' %(fname)] != fval:
- full_match = False
- break
- if full_match:
- filt_infos[row] = coll_infos[row]
- return filt_infos
- # end filter_rows
-
- def get_fq_name_uuid_list(obj_uuids):
- ret_list = []
- for obj_uuid in obj_uuids:
- try:
- obj_fq_name = self.uuid_to_fq_name(obj_uuid)
- ret_list.append((obj_fq_name, obj_uuid))
- except cfgm_common.exceptions.NoIdError:
- pass
- return ret_list
- # end get_fq_name_uuid_list
-
- if parent_uuids:
- # go from parent to child
- obj_uuid_cf = self._obj_uuid_cf
- col_start = 'children:loadbalancer_pool:'
- col_fin = 'children:loadbalancer_pool;'
- try:
- obj_rows = obj_uuid_cf.multiget(parent_uuids,
- column_start = col_start,
- column_finish = col_fin,
- column_count = 10000000,
- include_timestamp = True)
- except pycassa.NotFoundException:
- if count:
- return (True, 0)
- else:
- return (True, children_fq_names_uuids)
-
- def filter_rows_parent_anchor(sort=False):
- # flatten to [('children:<type>:<uuid>', (<val>,<ts>), *]
- all_cols = [cols for obj_key in obj_rows.keys() for cols in obj_rows[obj_key].items()]
- all_child_infos = {}
- for col_name, col_val_ts in all_cols:
- # give chance for zk heartbeat/ping
- gevent.sleep(0)
- child_uuid = col_name.split(':')[2]
- if obj_uuids and child_uuid not in obj_uuids:
- continue
- all_child_infos[child_uuid] = {'uuid': child_uuid, 'tstamp': col_val_ts[1]}
-
- filter_cols = ['prop:%s' %(fname) for fname, _ in filter_fields]
- if filter_cols:
- filt_child_infos = filter_rows(all_child_infos, filter_cols, filter_fields)
- else: # no filter specified
- filt_child_infos = all_child_infos
-
- if not sort:
- ret_child_infos = filt_child_infos.values()
- else:
- ret_child_infos = sorted(filt_child_infos.values(), key=itemgetter('tstamp'))
-
- return get_fq_name_uuid_list(r['uuid'] for r in ret_child_infos)
- # end filter_rows_parent_anchor
-
- if count:
- return (True, len(filter_rows_parent_anchor()))
-
- children_fq_names_uuids = filter_rows_parent_anchor(sort=True)
-
- if back_ref_uuids:
- # go from anchor to backrefs
- obj_uuid_cf = self._obj_uuid_cf
- col_start = 'backref:loadbalancer_pool:'
- col_fin = 'backref:loadbalancer_pool;'
- try:
- obj_rows = obj_uuid_cf.multiget(back_ref_uuids,
- column_start = col_start,
- column_finish = col_fin,
- column_count = 10000000,
- include_timestamp = True)
- except pycassa.NotFoundException:
- if count:
- return (True, 0)
- else:
- return (True, children_fq_names_uuids)
-
- def filter_rows_backref_anchor():
- # flatten to [('<fqnstr>:<uuid>', (<val>,<ts>), *]
- all_cols = [cols for obj_key in obj_rows.keys() for cols in obj_rows[obj_key].items()]
- all_backref_infos = {}
- for col_name, col_val_ts in all_cols:
- # give chance for zk heartbeat/ping
- gevent.sleep(0)
- col_name_arr = col_name.split(':')
- fq_name = col_name_arr[:-1]
- obj_uuid = col_name_arr[-1]
- if obj_uuids and obj_uuid not in obj_uuids:
- continue
- all_backref_infos[obj_uuid] = \
- {'uuid': obj_uuid, 'fq_name': fq_name, 'tstamp': col_val_ts[1]}
-
- filter_cols = ['prop:%s' %(fname) for fname, _ in filter_fields]
- if filter_cols:
- filt_backref_infos = filter_rows(all_backref_infos, filter_cols, filter_fields)
- else: # no filter specified
- filt_backref_infos = all_backref_infos
-
- return [(br_info['fq_name'], br_info['uuid']) for br_info in filt_backref_infos.values()]
- # end filter_rows_backref_anchor
-
- if count:
- return (True, len(filter_rows_backref_anchor()))
-
- children_fq_names_uuids = filter_rows_backref_anchor()
-
- if not parent_uuids and not back_ref_uuids:
- obj_uuid_cf = self._obj_uuid_cf
- if obj_uuids:
- # exact objects specified
- def filter_rows_object_list():
- all_obj_infos = {}
- for obj_uuid in obj_uuids:
- all_obj_infos[obj_uuid] = None
-
- filter_cols = ['prop:%s' %(fname) for fname, _ in filter_fields]
- if filter_cols:
- filt_obj_infos = filter_rows(all_obj_infos, filter_cols, filter_fields)
- else: # no filters specified
- filt_obj_infos = all_obj_infos
-
- return get_fq_name_uuid_list(filt_obj_infos.keys())
- # end filter_rows_object_list
-
- if count:
- return (True, len(filter_rows_object_list()))
- children_fq_names_uuids = filter_rows_object_list()
-
- else: # grab all resources of this type
- obj_fq_name_cf = self._obj_fq_name_cf
- try:
- cols = obj_fq_name_cf.get('loadbalancer_pool', column_count = 10000000)
- except pycassa.NotFoundException:
- if count:
- return (True, 0)
- else:
- return (True, children_fq_names_uuids)
-
- def filter_rows_no_anchor():
- all_obj_infos = {}
- for col_name, col_val in cols.items():
- # give chance for zk heartbeat/ping
- gevent.sleep(0)
- col_name_arr = utils.decode_string(col_name).split(':')
- obj_uuid = col_name_arr[-1]
- all_obj_infos[obj_uuid] = (col_name_arr[:-1], obj_uuid)
-
- filter_cols = ['prop:%s' %(fname) for fname, _ in filter_fields]
- if filter_cols:
- filt_obj_infos = filter_rows(all_obj_infos, filter_cols, filter_fields)
- else: # no filters specified
- filt_obj_infos = all_obj_infos
-
- return filt_obj_infos.values()
- # end filter_rows_no_anchor
-
- if count:
- return (True, len(filter_rows_no_anchor()))
-
- children_fq_names_uuids = filter_rows_no_anchor()
-
- return (True, children_fq_names_uuids)
- #end _cassandra_loadbalancer_pool_list
-
- def _cassandra_loadbalancer_pool_delete(self, obj_uuid):
- obj_uuid_cf = self._obj_uuid_cf
- fq_name = json.loads(obj_uuid_cf.get(obj_uuid, columns = ['fq_name'])['fq_name'])
- bch = obj_uuid_cf.batch()
-
- # unlink from parent
- col_start = 'parent:'
- col_fin = 'parent;'
- col_name_iter = obj_uuid_cf.xget(obj_uuid, column_start = col_start, column_finish = col_fin)
- for (col_name, col_val) in col_name_iter:
- (_, parent_type, parent_uuid) = col_name.split(':')
- self._delete_child(bch, parent_type, parent_uuid, 'loadbalancer_pool', obj_uuid)
-
- # remove refs
- col_start = 'ref:'
- col_fin = 'ref;'
- col_name_iter = obj_uuid_cf.xget(obj_uuid, column_start = col_start, column_finish = col_fin)
- for (col_name, col_val) in col_name_iter:
- (_, ref_type, ref_uuid) = col_name.split(':')
- self._delete_ref(bch, 'loadbalancer_pool', obj_uuid, ref_type, ref_uuid)
-
- bch.remove(obj_uuid)
- bch.send()
-
- # Update fqname table
- fq_name_str = ':'.join(fq_name)
- fq_name_col = utils.encode_string(fq_name_str) + ':' + obj_uuid
- self._obj_fq_name_cf.remove('loadbalancer_pool', columns = [fq_name_col])
-
-
- return (True, '')
- #end _cassandra_loadbalancer_pool_delete
-
- def _cassandra_virtual_DNS_record_alloc(self, fq_name):
- return (True, '')
- #end _cassandra_virtual_DNS_record_alloc
-
- def _cassandra_virtual_DNS_record_create(self, obj_ids, obj_dict):
- # Gather column values for obj and updates to backrefs
- # in a batch and write it at the end
- bch = self._obj_uuid_cf.batch()
-
- obj_cols = {}
- obj_cols['fq_name'] = json.dumps(obj_dict['fq_name'])
- obj_cols['type'] = json.dumps('virtual_DNS_record')
- if 'parent_type' in obj_dict:
- # non config-root child
- parent_type = obj_dict['parent_type']
- parent_method_type = parent_type.replace('-', '_')
- parent_fq_name = obj_dict['fq_name'][:-1]
- obj_cols['parent_type'] = json.dumps(parent_type)
- parent_uuid = self.fq_name_to_uuid(parent_method_type, parent_fq_name)
- self._create_child(bch, parent_method_type, parent_uuid, 'virtual_DNS_record', obj_ids['uuid'])
-
- # Properties
- field = obj_dict.get('virtual_DNS_record_data', None)
- if field is not None:
- self._create_prop(bch, obj_ids['uuid'], 'virtual_DNS_record_data', field)
-
- field = obj_dict.get('id_perms', None)
- if field is not None:
- field['created'] = datetime.datetime.utcnow().isoformat()
- field['last_modified'] = field['created']
- self._create_prop(bch, obj_ids['uuid'], 'id_perms', field)
-
- field = obj_dict.get('display_name', None)
- if field is not None:
- self._create_prop(bch, obj_ids['uuid'], 'display_name', field)
-
-
- # References
-
- bch.insert(obj_ids['uuid'], obj_cols)
- bch.send()
-
- # Update fqname table
- fq_name_str = ':'.join(obj_dict['fq_name'])
- fq_name_cols = {utils.encode_string(fq_name_str) + ':' + obj_ids['uuid']: json.dumps(None)}
- self._obj_fq_name_cf.insert('virtual_DNS_record', fq_name_cols)
-
- return (True, '')
- #end _cassandra_virtual_DNS_record_create
-
- def _cassandra_virtual_DNS_record_read(self, obj_uuids, field_names = None):
- # if field_names = None, all fields will be read/returned
-
- obj_uuid_cf = self._obj_uuid_cf
-
- # optimize for common case of reading non-backref, non-children fields
- # ignoring columns starting from 'b' and 'c' - significant performance
- # impact in scaled setting. e.g. read of project
- if (field_names is None or
- (set(field_names) & (VirtualDnsRecord.backref_fields | VirtualDnsRecord.children_fields))):
- # atleast one backref/children field is needed
- obj_rows = obj_uuid_cf.multiget(obj_uuids,
- column_count = 10000000,
- include_timestamp = True)
- else: # ignore reading backref + children columns
- obj_rows = obj_uuid_cf.multiget(obj_uuids,
- column_start = 'd',
- column_count = 10000000,
- include_timestamp = True)
-
- if (len(obj_uuids) == 1) and not obj_rows:
- raise cfgm_common.exceptions.NoIdError(obj_uuids[0])
-
- results = []
- for row_key in obj_rows:
- # give chance for zk heartbeat/ping
- gevent.sleep(0)
- obj_uuid = row_key
- obj_cols = obj_rows[obj_uuid]
- result = {}
- result['uuid'] = obj_uuid
- result['fq_name'] = json.loads(obj_cols['fq_name'][0])
- for col_name in obj_cols.keys():
- if self._re_match_parent.match(col_name):
- # non config-root child
- (_, _, parent_uuid) = col_name.split(':')
- parent_type = json.loads(obj_cols['parent_type'][0])
- result['parent_type'] = parent_type
- try:
- result['parent_uuid'] = parent_uuid
- result['parent_href'] = self._generate_url(parent_type, parent_uuid)
- except cfgm_common.exceptions.NoIdError:
- err_msg = 'Unknown uuid for parent ' + result['fq_name'][-2]
- return (False, err_msg)
-
- # TODO use compiled RE
- if self._re_match_prop.match(col_name):
- (_, prop_name) = col_name.split(':')
- result[prop_name] = json.loads(obj_cols[col_name][0])
-
- # TODO use compiled RE
- if self._re_match_children.match(col_name):
- (_, child_type, child_uuid) = col_name.split(':')
- if field_names and '%ss' %(child_type) not in field_names:
- continue
-
- child_tstamp = obj_cols[col_name][1]
- try:
- self._read_child(result, obj_uuid, child_type, child_uuid, child_tstamp)
- except cfgm_common.exceptions.NoIdError:
- continue
-
- # TODO use compiled RE
- if self._re_match_ref.match(col_name):
- (_, ref_type, ref_uuid) = col_name.split(':')
- self._read_ref(result, obj_uuid, ref_type, ref_uuid, obj_cols[col_name][0])
-
- if self._re_match_backref.match(col_name):
- (_, back_ref_type, back_ref_uuid) = col_name.split(':')
- if field_names and '%s_back_refs' %(back_ref_type) not in field_names:
- continue
-
- try:
- self._read_back_ref(result, obj_uuid, back_ref_type, back_ref_uuid,
- obj_cols[col_name][0])
- except cfgm_common.exceptions.NoIdError:
- continue
-
- # for all column names
-
-
- results.append(result)
- # end for all rows
-
- return (True, results)
- #end _cassandra_virtual_DNS_record_read
-
- def _cassandra_virtual_DNS_record_count_children(self, obj_uuid, child_type):
- # if child_type = None, return
- if child_type is None:
- return (False, '')
-
- obj_uuid_cf = self._obj_uuid_cf
- if child_type not in VirtualDnsRecord.children_fields:
- return (False, '%s is not a valid children of VirtualDnsRecord' %(child_type))
-
- col_start = 'children:'+child_type[:-1]+':'
- col_finish = 'children:'+child_type[:-1]+';'
- num_children = obj_uuid_cf.get_count(obj_uuid,
- column_start = col_start,
- column_finish = col_finish,
- max_count = 10000000)
- return (True, num_children)
- #end _cassandra_virtual_DNS_record_count_children
-
- def _cassandra_virtual_DNS_record_update(self, obj_uuid, new_obj_dict):
- # Grab ref-uuids and properties in new version
- new_ref_infos = {}
-
- new_props = {}
- if 'virtual_DNS_record_data' in new_obj_dict:
- new_props['virtual_DNS_record_data'] = new_obj_dict['virtual_DNS_record_data']
- if 'id_perms' in new_obj_dict:
- new_props['id_perms'] = new_obj_dict['id_perms']
- if 'display_name' in new_obj_dict:
- new_props['display_name'] = new_obj_dict['display_name']
- # Gather column values for obj and updates to backrefs
- # in a batch and write it at the end
- obj_uuid_cf = self._obj_uuid_cf
- obj_cols_iter = obj_uuid_cf.xget(obj_uuid)
- # TODO optimize this (converts tuple to dict)
- obj_cols = {}
- for col_info in obj_cols_iter:
- obj_cols[col_info[0]] = col_info[1]
-
- bch = obj_uuid_cf.batch()
- for col_name in obj_cols.keys():
- # TODO use compiled RE
- if re.match('prop:', col_name):
- (_, prop_name) = col_name.split(':')
- if prop_name == 'id_perms':
- # id-perms always has to be updated for last-mod timestamp
- # get it from request dict(or from db if not in request dict)
- new_id_perms = new_obj_dict.get(prop_name, json.loads(obj_cols[col_name]))
- self.update_last_modified(bch, obj_uuid, new_id_perms)
- elif prop_name in new_obj_dict:
- self._update_prop(bch, obj_uuid, prop_name, new_props)
-
- # TODO use compiled RE
- if re.match('ref:', col_name):
- (_, ref_type, ref_uuid) = col_name.split(':')
- self._update_ref(bch, 'virtual_DNS_record', obj_uuid, ref_type, ref_uuid, new_ref_infos)
- # for all column names
-
- # create new refs
- for ref_type in new_ref_infos.keys():
- for ref_uuid in new_ref_infos[ref_type].keys():
- ref_data = new_ref_infos[ref_type][ref_uuid]
- self._create_ref(bch, 'virtual_DNS_record', obj_uuid, ref_type, ref_uuid, ref_data)
-
- # create new props
- for prop_name in new_props.keys():
- self._create_prop(bch, obj_uuid, prop_name, new_props[prop_name])
-
- bch.send()
-
- return (True, '')
- #end _cassandra_virtual_DNS_record_update
-
- def _cassandra_virtual_DNS_record_list(self, parent_uuids=None, back_ref_uuids=None,
- obj_uuids=None, count=False, filters=None):
- children_fq_names_uuids = []
- if filters:
- fnames = filters.get('field_names', [])
- fvalues = filters.get('field_values', [])
- filter_fields = [(fnames[i], fvalues[i]) for i in range(len(fnames))]
- else:
- filter_fields = []
-
- def filter_rows(coll_infos, filter_cols, filter_params):
- filt_infos = {}
- coll_rows = obj_uuid_cf.multiget(coll_infos.keys(),
- columns=filter_cols,
- column_count=10000000)
- for row in coll_rows:
- # give chance for zk heartbeat/ping
- gevent.sleep(0)
- full_match = True
- for fname, fval in filter_params:
- if coll_rows[row]['prop:%s' %(fname)] != fval:
- full_match = False
- break
- if full_match:
- filt_infos[row] = coll_infos[row]
- return filt_infos
- # end filter_rows
-
- def get_fq_name_uuid_list(obj_uuids):
- ret_list = []
- for obj_uuid in obj_uuids:
- try:
- obj_fq_name = self.uuid_to_fq_name(obj_uuid)
- ret_list.append((obj_fq_name, obj_uuid))
- except cfgm_common.exceptions.NoIdError:
- pass
- return ret_list
- # end get_fq_name_uuid_list
-
- if parent_uuids:
- # go from parent to child
- obj_uuid_cf = self._obj_uuid_cf
- col_start = 'children:virtual_DNS_record:'
- col_fin = 'children:virtual_DNS_record;'
- try:
- obj_rows = obj_uuid_cf.multiget(parent_uuids,
- column_start = col_start,
- column_finish = col_fin,
- column_count = 10000000,
- include_timestamp = True)
- except pycassa.NotFoundException:
- if count:
- return (True, 0)
- else:
- return (True, children_fq_names_uuids)
-
- def filter_rows_parent_anchor(sort=False):
- # flatten to [('children:<type>:<uuid>', (<val>,<ts>), *]
- all_cols = [cols for obj_key in obj_rows.keys() for cols in obj_rows[obj_key].items()]
- all_child_infos = {}
- for col_name, col_val_ts in all_cols:
- # give chance for zk heartbeat/ping
- gevent.sleep(0)
- child_uuid = col_name.split(':')[2]
- if obj_uuids and child_uuid not in obj_uuids:
- continue
- all_child_infos[child_uuid] = {'uuid': child_uuid, 'tstamp': col_val_ts[1]}
-
- filter_cols = ['prop:%s' %(fname) for fname, _ in filter_fields]
- if filter_cols:
- filt_child_infos = filter_rows(all_child_infos, filter_cols, filter_fields)
- else: # no filter specified
- filt_child_infos = all_child_infos
-
- if not sort:
- ret_child_infos = filt_child_infos.values()
- else:
- ret_child_infos = sorted(filt_child_infos.values(), key=itemgetter('tstamp'))
-
- return get_fq_name_uuid_list(r['uuid'] for r in ret_child_infos)
- # end filter_rows_parent_anchor
-
- if count:
- return (True, len(filter_rows_parent_anchor()))
-
- children_fq_names_uuids = filter_rows_parent_anchor(sort=True)
-
- if back_ref_uuids:
- # go from anchor to backrefs
- obj_uuid_cf = self._obj_uuid_cf
- col_start = 'backref:virtual_DNS_record:'
- col_fin = 'backref:virtual_DNS_record;'
- try:
- obj_rows = obj_uuid_cf.multiget(back_ref_uuids,
- column_start = col_start,
- column_finish = col_fin,
- column_count = 10000000,
- include_timestamp = True)
- except pycassa.NotFoundException:
- if count:
- return (True, 0)
- else:
- return (True, children_fq_names_uuids)
-
- def filter_rows_backref_anchor():
- # flatten to [('<fqnstr>:<uuid>', (<val>,<ts>), *]
- all_cols = [cols for obj_key in obj_rows.keys() for cols in obj_rows[obj_key].items()]
- all_backref_infos = {}
- for col_name, col_val_ts in all_cols:
- # give chance for zk heartbeat/ping
- gevent.sleep(0)
- col_name_arr = col_name.split(':')
- fq_name = col_name_arr[:-1]
- obj_uuid = col_name_arr[-1]
- if obj_uuids and obj_uuid not in obj_uuids:
- continue
- all_backref_infos[obj_uuid] = \
- {'uuid': obj_uuid, 'fq_name': fq_name, 'tstamp': col_val_ts[1]}
-
- filter_cols = ['prop:%s' %(fname) for fname, _ in filter_fields]
- if filter_cols:
- filt_backref_infos = filter_rows(all_backref_infos, filter_cols, filter_fields)
- else: # no filter specified
- filt_backref_infos = all_backref_infos
-
- return [(br_info['fq_name'], br_info['uuid']) for br_info in filt_backref_infos.values()]
- # end filter_rows_backref_anchor
-
- if count:
- return (True, len(filter_rows_backref_anchor()))
-
- children_fq_names_uuids = filter_rows_backref_anchor()
-
- if not parent_uuids and not back_ref_uuids:
- obj_uuid_cf = self._obj_uuid_cf
- if obj_uuids:
- # exact objects specified
- def filter_rows_object_list():
- all_obj_infos = {}
- for obj_uuid in obj_uuids:
- all_obj_infos[obj_uuid] = None
-
- filter_cols = ['prop:%s' %(fname) for fname, _ in filter_fields]
- if filter_cols:
- filt_obj_infos = filter_rows(all_obj_infos, filter_cols, filter_fields)
- else: # no filters specified
- filt_obj_infos = all_obj_infos
-
- return get_fq_name_uuid_list(filt_obj_infos.keys())
- # end filter_rows_object_list
-
- if count:
- return (True, len(filter_rows_object_list()))
- children_fq_names_uuids = filter_rows_object_list()
-
- else: # grab all resources of this type
- obj_fq_name_cf = self._obj_fq_name_cf
- try:
- cols = obj_fq_name_cf.get('virtual_DNS_record', column_count = 10000000)
- except pycassa.NotFoundException:
- if count:
- return (True, 0)
- else:
- return (True, children_fq_names_uuids)
-
- def filter_rows_no_anchor():
- all_obj_infos = {}
- for col_name, col_val in cols.items():
- # give chance for zk heartbeat/ping
- gevent.sleep(0)
- col_name_arr = utils.decode_string(col_name).split(':')
- obj_uuid = col_name_arr[-1]
- all_obj_infos[obj_uuid] = (col_name_arr[:-1], obj_uuid)
-
- filter_cols = ['prop:%s' %(fname) for fname, _ in filter_fields]
- if filter_cols:
- filt_obj_infos = filter_rows(all_obj_infos, filter_cols, filter_fields)
- else: # no filters specified
- filt_obj_infos = all_obj_infos
-
- return filt_obj_infos.values()
- # end filter_rows_no_anchor
-
- if count:
- return (True, len(filter_rows_no_anchor()))
-
- children_fq_names_uuids = filter_rows_no_anchor()
-
- return (True, children_fq_names_uuids)
- #end _cassandra_virtual_DNS_record_list
-
- def _cassandra_virtual_DNS_record_delete(self, obj_uuid):
- obj_uuid_cf = self._obj_uuid_cf
- fq_name = json.loads(obj_uuid_cf.get(obj_uuid, columns = ['fq_name'])['fq_name'])
- bch = obj_uuid_cf.batch()
-
- # unlink from parent
- col_start = 'parent:'
- col_fin = 'parent;'
- col_name_iter = obj_uuid_cf.xget(obj_uuid, column_start = col_start, column_finish = col_fin)
- for (col_name, col_val) in col_name_iter:
- (_, parent_type, parent_uuid) = col_name.split(':')
- self._delete_child(bch, parent_type, parent_uuid, 'virtual_DNS_record', obj_uuid)
-
- # remove refs
- col_start = 'ref:'
- col_fin = 'ref;'
- col_name_iter = obj_uuid_cf.xget(obj_uuid, column_start = col_start, column_finish = col_fin)
- for (col_name, col_val) in col_name_iter:
- (_, ref_type, ref_uuid) = col_name.split(':')
- self._delete_ref(bch, 'virtual_DNS_record', obj_uuid, ref_type, ref_uuid)
-
- bch.remove(obj_uuid)
- bch.send()
-
- # Update fqname table
- fq_name_str = ':'.join(fq_name)
- fq_name_col = utils.encode_string(fq_name_str) + ':' + obj_uuid
- self._obj_fq_name_cf.remove('virtual_DNS_record', columns = [fq_name_col])
-
-
- return (True, '')
- #end _cassandra_virtual_DNS_record_delete
-
- def _cassandra_route_target_alloc(self, fq_name):
- return (True, '')
- #end _cassandra_route_target_alloc
-
- def _cassandra_route_target_create(self, obj_ids, obj_dict):
- # Gather column values for obj and updates to backrefs
- # in a batch and write it at the end
- bch = self._obj_uuid_cf.batch()
-
- obj_cols = {}
- obj_cols['fq_name'] = json.dumps(obj_dict['fq_name'])
- obj_cols['type'] = json.dumps('route_target')
-
- # Properties
- field = obj_dict.get('id_perms', None)
- if field is not None:
- field['created'] = datetime.datetime.utcnow().isoformat()
- field['last_modified'] = field['created']
- self._create_prop(bch, obj_ids['uuid'], 'id_perms', field)
-
- field = obj_dict.get('display_name', None)
- if field is not None:
- self._create_prop(bch, obj_ids['uuid'], 'display_name', field)
-
-
- # References
-
- bch.insert(obj_ids['uuid'], obj_cols)
- bch.send()
-
- # Update fqname table
- fq_name_str = ':'.join(obj_dict['fq_name'])
- fq_name_cols = {utils.encode_string(fq_name_str) + ':' + obj_ids['uuid']: json.dumps(None)}
- self._obj_fq_name_cf.insert('route_target', fq_name_cols)
-
- return (True, '')
- #end _cassandra_route_target_create
-
- def _cassandra_route_target_read(self, obj_uuids, field_names = None):
- # if field_names = None, all fields will be read/returned
-
- obj_uuid_cf = self._obj_uuid_cf
-
- # optimize for common case of reading non-backref, non-children fields
- # ignoring columns starting from 'b' and 'c' - significant performance
- # impact in scaled setting. e.g. read of project
- if (field_names is None or
- (set(field_names) & (RouteTarget.backref_fields | RouteTarget.children_fields))):
- # atleast one backref/children field is needed
- obj_rows = obj_uuid_cf.multiget(obj_uuids,
- column_count = 10000000,
- include_timestamp = True)
- else: # ignore reading backref + children columns
- obj_rows = obj_uuid_cf.multiget(obj_uuids,
- column_start = 'd',
- column_count = 10000000,
- include_timestamp = True)
-
- if (len(obj_uuids) == 1) and not obj_rows:
- raise cfgm_common.exceptions.NoIdError(obj_uuids[0])
-
- results = []
- for row_key in obj_rows:
- # give chance for zk heartbeat/ping
- gevent.sleep(0)
- obj_uuid = row_key
- obj_cols = obj_rows[obj_uuid]
- result = {}
- result['uuid'] = obj_uuid
- result['fq_name'] = json.loads(obj_cols['fq_name'][0])
- for col_name in obj_cols.keys():
- if self._re_match_parent.match(col_name):
- # non config-root child
- (_, _, parent_uuid) = col_name.split(':')
- parent_type = json.loads(obj_cols['parent_type'][0])
- result['parent_type'] = parent_type
- try:
- result['parent_uuid'] = parent_uuid
- result['parent_href'] = self._generate_url(parent_type, parent_uuid)
- except cfgm_common.exceptions.NoIdError:
- err_msg = 'Unknown uuid for parent ' + result['fq_name'][-2]
- return (False, err_msg)
-
- # TODO use compiled RE
- if self._re_match_prop.match(col_name):
- (_, prop_name) = col_name.split(':')
- result[prop_name] = json.loads(obj_cols[col_name][0])
-
- # TODO use compiled RE
- if self._re_match_children.match(col_name):
- (_, child_type, child_uuid) = col_name.split(':')
- if field_names and '%ss' %(child_type) not in field_names:
- continue
-
- child_tstamp = obj_cols[col_name][1]
- try:
- self._read_child(result, obj_uuid, child_type, child_uuid, child_tstamp)
- except cfgm_common.exceptions.NoIdError:
- continue
-
- # TODO use compiled RE
- if self._re_match_ref.match(col_name):
- (_, ref_type, ref_uuid) = col_name.split(':')
- self._read_ref(result, obj_uuid, ref_type, ref_uuid, obj_cols[col_name][0])
-
- if self._re_match_backref.match(col_name):
- (_, back_ref_type, back_ref_uuid) = col_name.split(':')
- if field_names and '%s_back_refs' %(back_ref_type) not in field_names:
- continue
-
- try:
- self._read_back_ref(result, obj_uuid, back_ref_type, back_ref_uuid,
- obj_cols[col_name][0])
- except cfgm_common.exceptions.NoIdError:
- continue
-
- # for all column names
-
-
- results.append(result)
- # end for all rows
-
- return (True, results)
- #end _cassandra_route_target_read
-
- def _cassandra_route_target_count_children(self, obj_uuid, child_type):
- # if child_type = None, return
- if child_type is None:
- return (False, '')
-
- obj_uuid_cf = self._obj_uuid_cf
- if child_type not in RouteTarget.children_fields:
- return (False, '%s is not a valid children of RouteTarget' %(child_type))
-
- col_start = 'children:'+child_type[:-1]+':'
- col_finish = 'children:'+child_type[:-1]+';'
- num_children = obj_uuid_cf.get_count(obj_uuid,
- column_start = col_start,
- column_finish = col_finish,
- max_count = 10000000)
- return (True, num_children)
- #end _cassandra_route_target_count_children
-
- def _cassandra_route_target_update(self, obj_uuid, new_obj_dict):
- # Grab ref-uuids and properties in new version
- new_ref_infos = {}
-
- new_props = {}
- if 'id_perms' in new_obj_dict:
- new_props['id_perms'] = new_obj_dict['id_perms']
- if 'display_name' in new_obj_dict:
- new_props['display_name'] = new_obj_dict['display_name']
- # Gather column values for obj and updates to backrefs
- # in a batch and write it at the end
- obj_uuid_cf = self._obj_uuid_cf
- obj_cols_iter = obj_uuid_cf.xget(obj_uuid)
- # TODO optimize this (converts tuple to dict)
- obj_cols = {}
- for col_info in obj_cols_iter:
- obj_cols[col_info[0]] = col_info[1]
-
- bch = obj_uuid_cf.batch()
- for col_name in obj_cols.keys():
- # TODO use compiled RE
- if re.match('prop:', col_name):
- (_, prop_name) = col_name.split(':')
- if prop_name == 'id_perms':
- # id-perms always has to be updated for last-mod timestamp
- # get it from request dict(or from db if not in request dict)
- new_id_perms = new_obj_dict.get(prop_name, json.loads(obj_cols[col_name]))
- self.update_last_modified(bch, obj_uuid, new_id_perms)
- elif prop_name in new_obj_dict:
- self._update_prop(bch, obj_uuid, prop_name, new_props)
-
- # TODO use compiled RE
- if re.match('ref:', col_name):
- (_, ref_type, ref_uuid) = col_name.split(':')
- self._update_ref(bch, 'route_target', obj_uuid, ref_type, ref_uuid, new_ref_infos)
- # for all column names
-
- # create new refs
- for ref_type in new_ref_infos.keys():
- for ref_uuid in new_ref_infos[ref_type].keys():
- ref_data = new_ref_infos[ref_type][ref_uuid]
- self._create_ref(bch, 'route_target', obj_uuid, ref_type, ref_uuid, ref_data)
-
- # create new props
- for prop_name in new_props.keys():
- self._create_prop(bch, obj_uuid, prop_name, new_props[prop_name])
-
- bch.send()
-
- return (True, '')
- #end _cassandra_route_target_update
-
- def _cassandra_route_target_list(self, parent_uuids=None, back_ref_uuids=None,
- obj_uuids=None, count=False, filters=None):
- children_fq_names_uuids = []
- if filters:
- fnames = filters.get('field_names', [])
- fvalues = filters.get('field_values', [])
- filter_fields = [(fnames[i], fvalues[i]) for i in range(len(fnames))]
- else:
- filter_fields = []
-
- def filter_rows(coll_infos, filter_cols, filter_params):
- filt_infos = {}
- coll_rows = obj_uuid_cf.multiget(coll_infos.keys(),
- columns=filter_cols,
- column_count=10000000)
- for row in coll_rows:
- # give chance for zk heartbeat/ping
- gevent.sleep(0)
- full_match = True
- for fname, fval in filter_params:
- if coll_rows[row]['prop:%s' %(fname)] != fval:
- full_match = False
- break
- if full_match:
- filt_infos[row] = coll_infos[row]
- return filt_infos
- # end filter_rows
-
- def get_fq_name_uuid_list(obj_uuids):
- ret_list = []
- for obj_uuid in obj_uuids:
- try:
- obj_fq_name = self.uuid_to_fq_name(obj_uuid)
- ret_list.append((obj_fq_name, obj_uuid))
- except cfgm_common.exceptions.NoIdError:
- pass
- return ret_list
- # end get_fq_name_uuid_list
-
- if parent_uuids:
- # go from parent to child
- obj_uuid_cf = self._obj_uuid_cf
- col_start = 'children:route_target:'
- col_fin = 'children:route_target;'
- try:
- obj_rows = obj_uuid_cf.multiget(parent_uuids,
- column_start = col_start,
- column_finish = col_fin,
- column_count = 10000000,
- include_timestamp = True)
- except pycassa.NotFoundException:
- if count:
- return (True, 0)
- else:
- return (True, children_fq_names_uuids)
-
- def filter_rows_parent_anchor(sort=False):
- # flatten to [('children:<type>:<uuid>', (<val>,<ts>), *]
- all_cols = [cols for obj_key in obj_rows.keys() for cols in obj_rows[obj_key].items()]
- all_child_infos = {}
- for col_name, col_val_ts in all_cols:
- # give chance for zk heartbeat/ping
- gevent.sleep(0)
- child_uuid = col_name.split(':')[2]
- if obj_uuids and child_uuid not in obj_uuids:
- continue
- all_child_infos[child_uuid] = {'uuid': child_uuid, 'tstamp': col_val_ts[1]}
-
- filter_cols = ['prop:%s' %(fname) for fname, _ in filter_fields]
- if filter_cols:
- filt_child_infos = filter_rows(all_child_infos, filter_cols, filter_fields)
- else: # no filter specified
- filt_child_infos = all_child_infos
-
- if not sort:
- ret_child_infos = filt_child_infos.values()
- else:
- ret_child_infos = sorted(filt_child_infos.values(), key=itemgetter('tstamp'))
-
- return get_fq_name_uuid_list(r['uuid'] for r in ret_child_infos)
- # end filter_rows_parent_anchor
-
- if count:
- return (True, len(filter_rows_parent_anchor()))
-
- children_fq_names_uuids = filter_rows_parent_anchor(sort=True)
-
- if back_ref_uuids:
- # go from anchor to backrefs
- obj_uuid_cf = self._obj_uuid_cf
- col_start = 'backref:route_target:'
- col_fin = 'backref:route_target;'
- try:
- obj_rows = obj_uuid_cf.multiget(back_ref_uuids,
- column_start = col_start,
- column_finish = col_fin,
- column_count = 10000000,
- include_timestamp = True)
- except pycassa.NotFoundException:
- if count:
- return (True, 0)
- else:
- return (True, children_fq_names_uuids)
-
- def filter_rows_backref_anchor():
- # flatten to [('<fqnstr>:<uuid>', (<val>,<ts>), *]
- all_cols = [cols for obj_key in obj_rows.keys() for cols in obj_rows[obj_key].items()]
- all_backref_infos = {}
- for col_name, col_val_ts in all_cols:
- # give chance for zk heartbeat/ping
- gevent.sleep(0)
- col_name_arr = col_name.split(':')
- fq_name = col_name_arr[:-1]
- obj_uuid = col_name_arr[-1]
- if obj_uuids and obj_uuid not in obj_uuids:
- continue
- all_backref_infos[obj_uuid] = \
- {'uuid': obj_uuid, 'fq_name': fq_name, 'tstamp': col_val_ts[1]}
-
- filter_cols = ['prop:%s' %(fname) for fname, _ in filter_fields]
- if filter_cols:
- filt_backref_infos = filter_rows(all_backref_infos, filter_cols, filter_fields)
- else: # no filter specified
- filt_backref_infos = all_backref_infos
-
- return [(br_info['fq_name'], br_info['uuid']) for br_info in filt_backref_infos.values()]
- # end filter_rows_backref_anchor
-
- if count:
- return (True, len(filter_rows_backref_anchor()))
-
- children_fq_names_uuids = filter_rows_backref_anchor()
-
- if not parent_uuids and not back_ref_uuids:
- obj_uuid_cf = self._obj_uuid_cf
- if obj_uuids:
- # exact objects specified
- def filter_rows_object_list():
- all_obj_infos = {}
- for obj_uuid in obj_uuids:
- all_obj_infos[obj_uuid] = None
-
- filter_cols = ['prop:%s' %(fname) for fname, _ in filter_fields]
- if filter_cols:
- filt_obj_infos = filter_rows(all_obj_infos, filter_cols, filter_fields)
- else: # no filters specified
- filt_obj_infos = all_obj_infos
-
- return get_fq_name_uuid_list(filt_obj_infos.keys())
- # end filter_rows_object_list
-
- if count:
- return (True, len(filter_rows_object_list()))
- children_fq_names_uuids = filter_rows_object_list()
-
- else: # grab all resources of this type
- obj_fq_name_cf = self._obj_fq_name_cf
- try:
- cols = obj_fq_name_cf.get('route_target', column_count = 10000000)
- except pycassa.NotFoundException:
- if count:
- return (True, 0)
- else:
- return (True, children_fq_names_uuids)
-
- def filter_rows_no_anchor():
- all_obj_infos = {}
- for col_name, col_val in cols.items():
- # give chance for zk heartbeat/ping
- gevent.sleep(0)
- col_name_arr = utils.decode_string(col_name).split(':')
- obj_uuid = col_name_arr[-1]
- all_obj_infos[obj_uuid] = (col_name_arr[:-1], obj_uuid)
-
- filter_cols = ['prop:%s' %(fname) for fname, _ in filter_fields]
- if filter_cols:
- filt_obj_infos = filter_rows(all_obj_infos, filter_cols, filter_fields)
- else: # no filters specified
- filt_obj_infos = all_obj_infos
-
- return filt_obj_infos.values()
- # end filter_rows_no_anchor
-
- if count:
- return (True, len(filter_rows_no_anchor()))
-
- children_fq_names_uuids = filter_rows_no_anchor()
-
- return (True, children_fq_names_uuids)
- #end _cassandra_route_target_list
-
- def _cassandra_route_target_delete(self, obj_uuid):
- obj_uuid_cf = self._obj_uuid_cf
- fq_name = json.loads(obj_uuid_cf.get(obj_uuid, columns = ['fq_name'])['fq_name'])
- bch = obj_uuid_cf.batch()
-
- # unlink from parent
- col_start = 'parent:'
- col_fin = 'parent;'
- col_name_iter = obj_uuid_cf.xget(obj_uuid, column_start = col_start, column_finish = col_fin)
- for (col_name, col_val) in col_name_iter:
- (_, parent_type, parent_uuid) = col_name.split(':')
- self._delete_child(bch, parent_type, parent_uuid, 'route_target', obj_uuid)
-
- # remove refs
- col_start = 'ref:'
- col_fin = 'ref;'
- col_name_iter = obj_uuid_cf.xget(obj_uuid, column_start = col_start, column_finish = col_fin)
- for (col_name, col_val) in col_name_iter:
- (_, ref_type, ref_uuid) = col_name.split(':')
- self._delete_ref(bch, 'route_target', obj_uuid, ref_type, ref_uuid)
-
- bch.remove(obj_uuid)
- bch.send()
-
- # Update fqname table
- fq_name_str = ':'.join(fq_name)
- fq_name_col = utils.encode_string(fq_name_str) + ':' + obj_uuid
- self._obj_fq_name_cf.remove('route_target', columns = [fq_name_col])
-
-
- return (True, '')
- #end _cassandra_route_target_delete
-
- def _cassandra_floating_ip_alloc(self, fq_name):
- return (True, '')
- #end _cassandra_floating_ip_alloc
-
- def _cassandra_floating_ip_create(self, obj_ids, obj_dict):
- # Gather column values for obj and updates to backrefs
- # in a batch and write it at the end
- bch = self._obj_uuid_cf.batch()
-
- obj_cols = {}
- obj_cols['fq_name'] = json.dumps(obj_dict['fq_name'])
- obj_cols['type'] = json.dumps('floating_ip')
- if 'parent_type' in obj_dict:
- # non config-root child
- parent_type = obj_dict['parent_type']
- parent_method_type = parent_type.replace('-', '_')
- parent_fq_name = obj_dict['fq_name'][:-1]
- obj_cols['parent_type'] = json.dumps(parent_type)
- parent_uuid = self.fq_name_to_uuid(parent_method_type, parent_fq_name)
- self._create_child(bch, parent_method_type, parent_uuid, 'floating_ip', obj_ids['uuid'])
-
- # Properties
- field = obj_dict.get('floating_ip_address', None)
- if field is not None:
- self._create_prop(bch, obj_ids['uuid'], 'floating_ip_address', field)
-
- field = obj_dict.get('floating_ip_is_virtual_ip', None)
- if field is not None:
- self._create_prop(bch, obj_ids['uuid'], 'floating_ip_is_virtual_ip', field)
-
- field = obj_dict.get('floating_ip_fixed_ip_address', None)
- if field is not None:
- self._create_prop(bch, obj_ids['uuid'], 'floating_ip_fixed_ip_address', field)
-
- field = obj_dict.get('floating_ip_address_family', None)
- if field is not None:
- self._create_prop(bch, obj_ids['uuid'], 'floating_ip_address_family', field)
-
- field = obj_dict.get('id_perms', None)
- if field is not None:
- field['created'] = datetime.datetime.utcnow().isoformat()
- field['last_modified'] = field['created']
- self._create_prop(bch, obj_ids['uuid'], 'id_perms', field)
-
- field = obj_dict.get('display_name', None)
- if field is not None:
- self._create_prop(bch, obj_ids['uuid'], 'display_name', field)
-
-
- # References
- refs = obj_dict.get('project_refs', [])
- for ref in refs:
- ref_uuid = self.fq_name_to_uuid('project', ref['to'])
- ref_attr = ref.get('attr', None)
- ref_data = {'attr': ref_attr, 'is_weakref': False}
- self._create_ref(bch, 'floating_ip', obj_ids['uuid'], 'project', ref_uuid, ref_data)
- refs = obj_dict.get('virtual_machine_interface_refs', [])
- for ref in refs:
- ref_uuid = self.fq_name_to_uuid('virtual_machine_interface', ref['to'])
- ref_attr = ref.get('attr', None)
- ref_data = {'attr': ref_attr, 'is_weakref': False}
- self._create_ref(bch, 'floating_ip', obj_ids['uuid'], 'virtual_machine_interface', ref_uuid, ref_data)
-
- bch.insert(obj_ids['uuid'], obj_cols)
- bch.send()
-
- # Update fqname table
- fq_name_str = ':'.join(obj_dict['fq_name'])
- fq_name_cols = {utils.encode_string(fq_name_str) + ':' + obj_ids['uuid']: json.dumps(None)}
- self._obj_fq_name_cf.insert('floating_ip', fq_name_cols)
-
- return (True, '')
- #end _cassandra_floating_ip_create
-
- def _cassandra_floating_ip_read(self, obj_uuids, field_names = None):
- # if field_names = None, all fields will be read/returned
-
- obj_uuid_cf = self._obj_uuid_cf
-
- # optimize for common case of reading non-backref, non-children fields
- # ignoring columns starting from 'b' and 'c' - significant performance
- # impact in scaled setting. e.g. read of project
- if (field_names is None or
- (set(field_names) & (FloatingIp.backref_fields | FloatingIp.children_fields))):
- # atleast one backref/children field is needed
- obj_rows = obj_uuid_cf.multiget(obj_uuids,
- column_count = 10000000,
- include_timestamp = True)
- else: # ignore reading backref + children columns
- obj_rows = obj_uuid_cf.multiget(obj_uuids,
- column_start = 'd',
- column_count = 10000000,
- include_timestamp = True)
-
- if (len(obj_uuids) == 1) and not obj_rows:
- raise cfgm_common.exceptions.NoIdError(obj_uuids[0])
-
- results = []
- for row_key in obj_rows:
- # give chance for zk heartbeat/ping
- gevent.sleep(0)
- obj_uuid = row_key
- obj_cols = obj_rows[obj_uuid]
- result = {}
- result['uuid'] = obj_uuid
- result['fq_name'] = json.loads(obj_cols['fq_name'][0])
- for col_name in obj_cols.keys():
- if self._re_match_parent.match(col_name):
- # non config-root child
- (_, _, parent_uuid) = col_name.split(':')
- parent_type = json.loads(obj_cols['parent_type'][0])
- result['parent_type'] = parent_type
- try:
- result['parent_uuid'] = parent_uuid
- result['parent_href'] = self._generate_url(parent_type, parent_uuid)
- except cfgm_common.exceptions.NoIdError:
- err_msg = 'Unknown uuid for parent ' + result['fq_name'][-2]
- return (False, err_msg)
-
- # TODO use compiled RE
- if self._re_match_prop.match(col_name):
- (_, prop_name) = col_name.split(':')
- result[prop_name] = json.loads(obj_cols[col_name][0])
-
- # TODO use compiled RE
- if self._re_match_children.match(col_name):
- (_, child_type, child_uuid) = col_name.split(':')
- if field_names and '%ss' %(child_type) not in field_names:
- continue
-
- child_tstamp = obj_cols[col_name][1]
- try:
- self._read_child(result, obj_uuid, child_type, child_uuid, child_tstamp)
- except cfgm_common.exceptions.NoIdError:
- continue
-
- # TODO use compiled RE
- if self._re_match_ref.match(col_name):
- (_, ref_type, ref_uuid) = col_name.split(':')
- self._read_ref(result, obj_uuid, ref_type, ref_uuid, obj_cols[col_name][0])
-
- if self._re_match_backref.match(col_name):
- (_, back_ref_type, back_ref_uuid) = col_name.split(':')
- if field_names and '%s_back_refs' %(back_ref_type) not in field_names:
- continue
-
- try:
- self._read_back_ref(result, obj_uuid, back_ref_type, back_ref_uuid,
- obj_cols[col_name][0])
- except cfgm_common.exceptions.NoIdError:
- continue
-
- # for all column names
-
-
- results.append(result)
- # end for all rows
-
- return (True, results)
- #end _cassandra_floating_ip_read
-
- def _cassandra_floating_ip_count_children(self, obj_uuid, child_type):
- # if child_type = None, return
- if child_type is None:
- return (False, '')
-
- obj_uuid_cf = self._obj_uuid_cf
- if child_type not in FloatingIp.children_fields:
- return (False, '%s is not a valid children of FloatingIp' %(child_type))
-
- col_start = 'children:'+child_type[:-1]+':'
- col_finish = 'children:'+child_type[:-1]+';'
- num_children = obj_uuid_cf.get_count(obj_uuid,
- column_start = col_start,
- column_finish = col_finish,
- max_count = 10000000)
- return (True, num_children)
- #end _cassandra_floating_ip_count_children
-
- def _cassandra_floating_ip_update(self, obj_uuid, new_obj_dict):
- # Grab ref-uuids and properties in new version
- new_ref_infos = {}
-
- if 'project_refs' in new_obj_dict:
- new_ref_infos['project'] = {}
- new_refs = new_obj_dict['project_refs']
- if new_refs:
- for new_ref in new_refs:
- new_ref_uuid = self.fq_name_to_uuid('project', new_ref['to'])
- new_ref_attr = new_ref.get('attr', None)
- new_ref_data = {'attr': new_ref_attr, 'is_weakref': False}
- new_ref_infos['project'][new_ref_uuid] = new_ref_data
-
- if 'virtual_machine_interface_refs' in new_obj_dict:
- new_ref_infos['virtual_machine_interface'] = {}
- new_refs = new_obj_dict['virtual_machine_interface_refs']
- if new_refs:
- for new_ref in new_refs:
- new_ref_uuid = self.fq_name_to_uuid('virtual_machine_interface', new_ref['to'])
- new_ref_attr = new_ref.get('attr', None)
- new_ref_data = {'attr': new_ref_attr, 'is_weakref': False}
- new_ref_infos['virtual_machine_interface'][new_ref_uuid] = new_ref_data
-
- new_props = {}
- if 'floating_ip_address' in new_obj_dict:
- new_props['floating_ip_address'] = new_obj_dict['floating_ip_address']
- if 'floating_ip_is_virtual_ip' in new_obj_dict:
- new_props['floating_ip_is_virtual_ip'] = new_obj_dict['floating_ip_is_virtual_ip']
- if 'floating_ip_fixed_ip_address' in new_obj_dict:
- new_props['floating_ip_fixed_ip_address'] = new_obj_dict['floating_ip_fixed_ip_address']
- if 'floating_ip_address_family' in new_obj_dict:
- new_props['floating_ip_address_family'] = new_obj_dict['floating_ip_address_family']
- if 'id_perms' in new_obj_dict:
- new_props['id_perms'] = new_obj_dict['id_perms']
- if 'display_name' in new_obj_dict:
- new_props['display_name'] = new_obj_dict['display_name']
- # Gather column values for obj and updates to backrefs
- # in a batch and write it at the end
- obj_uuid_cf = self._obj_uuid_cf
- obj_cols_iter = obj_uuid_cf.xget(obj_uuid)
- # TODO optimize this (converts tuple to dict)
- obj_cols = {}
- for col_info in obj_cols_iter:
- obj_cols[col_info[0]] = col_info[1]
-
- bch = obj_uuid_cf.batch()
- for col_name in obj_cols.keys():
- # TODO use compiled RE
- if re.match('prop:', col_name):
- (_, prop_name) = col_name.split(':')
- if prop_name == 'id_perms':
- # id-perms always has to be updated for last-mod timestamp
- # get it from request dict(or from db if not in request dict)
- new_id_perms = new_obj_dict.get(prop_name, json.loads(obj_cols[col_name]))
- self.update_last_modified(bch, obj_uuid, new_id_perms)
- elif prop_name in new_obj_dict:
- self._update_prop(bch, obj_uuid, prop_name, new_props)
-
- # TODO use compiled RE
- if re.match('ref:', col_name):
- (_, ref_type, ref_uuid) = col_name.split(':')
- self._update_ref(bch, 'floating_ip', obj_uuid, ref_type, ref_uuid, new_ref_infos)
- # for all column names
-
- # create new refs
- for ref_type in new_ref_infos.keys():
- for ref_uuid in new_ref_infos[ref_type].keys():
- ref_data = new_ref_infos[ref_type][ref_uuid]
- self._create_ref(bch, 'floating_ip', obj_uuid, ref_type, ref_uuid, ref_data)
-
- # create new props
- for prop_name in new_props.keys():
- self._create_prop(bch, obj_uuid, prop_name, new_props[prop_name])
-
- bch.send()
-
- return (True, '')
- #end _cassandra_floating_ip_update
-
- def _cassandra_floating_ip_list(self, parent_uuids=None, back_ref_uuids=None,
- obj_uuids=None, count=False, filters=None):
- children_fq_names_uuids = []
- if filters:
- fnames = filters.get('field_names', [])
- fvalues = filters.get('field_values', [])
- filter_fields = [(fnames[i], fvalues[i]) for i in range(len(fnames))]
- else:
- filter_fields = []
-
- def filter_rows(coll_infos, filter_cols, filter_params):
- filt_infos = {}
- coll_rows = obj_uuid_cf.multiget(coll_infos.keys(),
- columns=filter_cols,
- column_count=10000000)
- for row in coll_rows:
- # give chance for zk heartbeat/ping
- gevent.sleep(0)
- full_match = True
- for fname, fval in filter_params:
- if coll_rows[row]['prop:%s' %(fname)] != fval:
- full_match = False
- break
- if full_match:
- filt_infos[row] = coll_infos[row]
- return filt_infos
- # end filter_rows
-
- def get_fq_name_uuid_list(obj_uuids):
- ret_list = []
- for obj_uuid in obj_uuids:
- try:
- obj_fq_name = self.uuid_to_fq_name(obj_uuid)
- ret_list.append((obj_fq_name, obj_uuid))
- except cfgm_common.exceptions.NoIdError:
- pass
- return ret_list
- # end get_fq_name_uuid_list
-
- if parent_uuids:
- # go from parent to child
- obj_uuid_cf = self._obj_uuid_cf
- col_start = 'children:floating_ip:'
- col_fin = 'children:floating_ip;'
- try:
- obj_rows = obj_uuid_cf.multiget(parent_uuids,
- column_start = col_start,
- column_finish = col_fin,
- column_count = 10000000,
- include_timestamp = True)
- except pycassa.NotFoundException:
- if count:
- return (True, 0)
- else:
- return (True, children_fq_names_uuids)
-
- def filter_rows_parent_anchor(sort=False):
- # flatten to [('children:<type>:<uuid>', (<val>,<ts>), *]
- all_cols = [cols for obj_key in obj_rows.keys() for cols in obj_rows[obj_key].items()]
- all_child_infos = {}
- for col_name, col_val_ts in all_cols:
- # give chance for zk heartbeat/ping
- gevent.sleep(0)
- child_uuid = col_name.split(':')[2]
- if obj_uuids and child_uuid not in obj_uuids:
- continue
- all_child_infos[child_uuid] = {'uuid': child_uuid, 'tstamp': col_val_ts[1]}
-
- filter_cols = ['prop:%s' %(fname) for fname, _ in filter_fields]
- if filter_cols:
- filt_child_infos = filter_rows(all_child_infos, filter_cols, filter_fields)
- else: # no filter specified
- filt_child_infos = all_child_infos
-
- if not sort:
- ret_child_infos = filt_child_infos.values()
- else:
- ret_child_infos = sorted(filt_child_infos.values(), key=itemgetter('tstamp'))
-
- return get_fq_name_uuid_list(r['uuid'] for r in ret_child_infos)
- # end filter_rows_parent_anchor
-
- if count:
- return (True, len(filter_rows_parent_anchor()))
-
- children_fq_names_uuids = filter_rows_parent_anchor(sort=True)
-
- if back_ref_uuids:
- # go from anchor to backrefs
- obj_uuid_cf = self._obj_uuid_cf
- col_start = 'backref:floating_ip:'
- col_fin = 'backref:floating_ip;'
- try:
- obj_rows = obj_uuid_cf.multiget(back_ref_uuids,
- column_start = col_start,
- column_finish = col_fin,
- column_count = 10000000,
- include_timestamp = True)
- except pycassa.NotFoundException:
- if count:
- return (True, 0)
- else:
- return (True, children_fq_names_uuids)
-
- def filter_rows_backref_anchor():
- # flatten to [('<fqnstr>:<uuid>', (<val>,<ts>), *]
- all_cols = [cols for obj_key in obj_rows.keys() for cols in obj_rows[obj_key].items()]
- all_backref_infos = {}
- for col_name, col_val_ts in all_cols:
- # give chance for zk heartbeat/ping
- gevent.sleep(0)
- col_name_arr = col_name.split(':')
- fq_name = col_name_arr[:-1]
- obj_uuid = col_name_arr[-1]
- if obj_uuids and obj_uuid not in obj_uuids:
- continue
- all_backref_infos[obj_uuid] = \
- {'uuid': obj_uuid, 'fq_name': fq_name, 'tstamp': col_val_ts[1]}
-
- filter_cols = ['prop:%s' %(fname) for fname, _ in filter_fields]
- if filter_cols:
- filt_backref_infos = filter_rows(all_backref_infos, filter_cols, filter_fields)
- else: # no filter specified
- filt_backref_infos = all_backref_infos
-
- return [(br_info['fq_name'], br_info['uuid']) for br_info in filt_backref_infos.values()]
- # end filter_rows_backref_anchor
-
- if count:
- return (True, len(filter_rows_backref_anchor()))
-
- children_fq_names_uuids = filter_rows_backref_anchor()
-
- if not parent_uuids and not back_ref_uuids:
- obj_uuid_cf = self._obj_uuid_cf
- if obj_uuids:
- # exact objects specified
- def filter_rows_object_list():
- all_obj_infos = {}
- for obj_uuid in obj_uuids:
- all_obj_infos[obj_uuid] = None
-
- filter_cols = ['prop:%s' %(fname) for fname, _ in filter_fields]
- if filter_cols:
- filt_obj_infos = filter_rows(all_obj_infos, filter_cols, filter_fields)
- else: # no filters specified
- filt_obj_infos = all_obj_infos
-
- return get_fq_name_uuid_list(filt_obj_infos.keys())
- # end filter_rows_object_list
-
- if count:
- return (True, len(filter_rows_object_list()))
- children_fq_names_uuids = filter_rows_object_list()
-
- else: # grab all resources of this type
- obj_fq_name_cf = self._obj_fq_name_cf
- try:
- cols = obj_fq_name_cf.get('floating_ip', column_count = 10000000)
- except pycassa.NotFoundException:
- if count:
- return (True, 0)
- else:
- return (True, children_fq_names_uuids)
-
- def filter_rows_no_anchor():
- all_obj_infos = {}
- for col_name, col_val in cols.items():
- # give chance for zk heartbeat/ping
- gevent.sleep(0)
- col_name_arr = utils.decode_string(col_name).split(':')
- obj_uuid = col_name_arr[-1]
- all_obj_infos[obj_uuid] = (col_name_arr[:-1], obj_uuid)
-
- filter_cols = ['prop:%s' %(fname) for fname, _ in filter_fields]
- if filter_cols:
- filt_obj_infos = filter_rows(all_obj_infos, filter_cols, filter_fields)
- else: # no filters specified
- filt_obj_infos = all_obj_infos
-
- return filt_obj_infos.values()
- # end filter_rows_no_anchor
-
- if count:
- return (True, len(filter_rows_no_anchor()))
-
- children_fq_names_uuids = filter_rows_no_anchor()
-
- return (True, children_fq_names_uuids)
- #end _cassandra_floating_ip_list
-
- def _cassandra_floating_ip_delete(self, obj_uuid):
- obj_uuid_cf = self._obj_uuid_cf
- fq_name = json.loads(obj_uuid_cf.get(obj_uuid, columns = ['fq_name'])['fq_name'])
- bch = obj_uuid_cf.batch()
-
- # unlink from parent
- col_start = 'parent:'
- col_fin = 'parent;'
- col_name_iter = obj_uuid_cf.xget(obj_uuid, column_start = col_start, column_finish = col_fin)
- for (col_name, col_val) in col_name_iter:
- (_, parent_type, parent_uuid) = col_name.split(':')
- self._delete_child(bch, parent_type, parent_uuid, 'floating_ip', obj_uuid)
-
- # remove refs
- col_start = 'ref:'
- col_fin = 'ref;'
- col_name_iter = obj_uuid_cf.xget(obj_uuid, column_start = col_start, column_finish = col_fin)
- for (col_name, col_val) in col_name_iter:
- (_, ref_type, ref_uuid) = col_name.split(':')
- self._delete_ref(bch, 'floating_ip', obj_uuid, ref_type, ref_uuid)
-
- bch.remove(obj_uuid)
- bch.send()
-
- # Update fqname table
- fq_name_str = ':'.join(fq_name)
- fq_name_col = utils.encode_string(fq_name_str) + ':' + obj_uuid
- self._obj_fq_name_cf.remove('floating_ip', columns = [fq_name_col])
-
-
- return (True, '')
- #end _cassandra_floating_ip_delete
-
- def _cassandra_floating_ip_pool_alloc(self, fq_name):
- return (True, '')
- #end _cassandra_floating_ip_pool_alloc
-
- def _cassandra_floating_ip_pool_create(self, obj_ids, obj_dict):
- # Gather column values for obj and updates to backrefs
- # in a batch and write it at the end
- bch = self._obj_uuid_cf.batch()
-
- obj_cols = {}
- obj_cols['fq_name'] = json.dumps(obj_dict['fq_name'])
- obj_cols['type'] = json.dumps('floating_ip_pool')
- if 'parent_type' in obj_dict:
- # non config-root child
- parent_type = obj_dict['parent_type']
- parent_method_type = parent_type.replace('-', '_')
- parent_fq_name = obj_dict['fq_name'][:-1]
- obj_cols['parent_type'] = json.dumps(parent_type)
- parent_uuid = self.fq_name_to_uuid(parent_method_type, parent_fq_name)
- self._create_child(bch, parent_method_type, parent_uuid, 'floating_ip_pool', obj_ids['uuid'])
-
- # Properties
- field = obj_dict.get('floating_ip_pool_prefixes', None)
- if field is not None:
- self._create_prop(bch, obj_ids['uuid'], 'floating_ip_pool_prefixes', field)
-
- field = obj_dict.get('id_perms', None)
- if field is not None:
- field['created'] = datetime.datetime.utcnow().isoformat()
- field['last_modified'] = field['created']
- self._create_prop(bch, obj_ids['uuid'], 'id_perms', field)
-
- field = obj_dict.get('display_name', None)
- if field is not None:
- self._create_prop(bch, obj_ids['uuid'], 'display_name', field)
-
-
- # References
-
- bch.insert(obj_ids['uuid'], obj_cols)
- bch.send()
-
- # Update fqname table
- fq_name_str = ':'.join(obj_dict['fq_name'])
- fq_name_cols = {utils.encode_string(fq_name_str) + ':' + obj_ids['uuid']: json.dumps(None)}
- self._obj_fq_name_cf.insert('floating_ip_pool', fq_name_cols)
-
- return (True, '')
- #end _cassandra_floating_ip_pool_create
-
- def _cassandra_floating_ip_pool_read(self, obj_uuids, field_names = None):
- # if field_names = None, all fields will be read/returned
-
- obj_uuid_cf = self._obj_uuid_cf
-
- # optimize for common case of reading non-backref, non-children fields
- # ignoring columns starting from 'b' and 'c' - significant performance
- # impact in scaled setting. e.g. read of project
- if (field_names is None or
- (set(field_names) & (FloatingIpPool.backref_fields | FloatingIpPool.children_fields))):
- # atleast one backref/children field is needed
- obj_rows = obj_uuid_cf.multiget(obj_uuids,
- column_count = 10000000,
- include_timestamp = True)
- else: # ignore reading backref + children columns
- obj_rows = obj_uuid_cf.multiget(obj_uuids,
- column_start = 'd',
- column_count = 10000000,
- include_timestamp = True)
-
- if (len(obj_uuids) == 1) and not obj_rows:
- raise cfgm_common.exceptions.NoIdError(obj_uuids[0])
-
- results = []
- for row_key in obj_rows:
- # give chance for zk heartbeat/ping
- gevent.sleep(0)
- obj_uuid = row_key
- obj_cols = obj_rows[obj_uuid]
- result = {}
- result['uuid'] = obj_uuid
- result['fq_name'] = json.loads(obj_cols['fq_name'][0])
- for col_name in obj_cols.keys():
- if self._re_match_parent.match(col_name):
- # non config-root child
- (_, _, parent_uuid) = col_name.split(':')
- parent_type = json.loads(obj_cols['parent_type'][0])
- result['parent_type'] = parent_type
- try:
- result['parent_uuid'] = parent_uuid
- result['parent_href'] = self._generate_url(parent_type, parent_uuid)
- except cfgm_common.exceptions.NoIdError:
- err_msg = 'Unknown uuid for parent ' + result['fq_name'][-2]
- return (False, err_msg)
-
- # TODO use compiled RE
- if self._re_match_prop.match(col_name):
- (_, prop_name) = col_name.split(':')
- result[prop_name] = json.loads(obj_cols[col_name][0])
-
- # TODO use compiled RE
- if self._re_match_children.match(col_name):
- (_, child_type, child_uuid) = col_name.split(':')
- if field_names and '%ss' %(child_type) not in field_names:
- continue
-
- child_tstamp = obj_cols[col_name][1]
- try:
- self._read_child(result, obj_uuid, child_type, child_uuid, child_tstamp)
- except cfgm_common.exceptions.NoIdError:
- continue
-
- # TODO use compiled RE
- if self._re_match_ref.match(col_name):
- (_, ref_type, ref_uuid) = col_name.split(':')
- self._read_ref(result, obj_uuid, ref_type, ref_uuid, obj_cols[col_name][0])
-
- if self._re_match_backref.match(col_name):
- (_, back_ref_type, back_ref_uuid) = col_name.split(':')
- if field_names and '%s_back_refs' %(back_ref_type) not in field_names:
- continue
-
- try:
- self._read_back_ref(result, obj_uuid, back_ref_type, back_ref_uuid,
- obj_cols[col_name][0])
- except cfgm_common.exceptions.NoIdError:
- continue
-
- # for all column names
-
- if 'floating_ips' in result:
- # sort children; TODO do this based on schema
- sorted_children = sorted(result['floating_ips'], key = itemgetter('tstamp'))
- # re-write result's children without timestamp
- result['floating_ips'] = sorted_children
- [child.pop('tstamp') for child in result['floating_ips']]
-
- results.append(result)
- # end for all rows
-
- return (True, results)
- #end _cassandra_floating_ip_pool_read
-
- def _cassandra_floating_ip_pool_count_children(self, obj_uuid, child_type):
- # if child_type = None, return
- if child_type is None:
- return (False, '')
-
- obj_uuid_cf = self._obj_uuid_cf
- if child_type not in FloatingIpPool.children_fields:
- return (False, '%s is not a valid children of FloatingIpPool' %(child_type))
-
- col_start = 'children:'+child_type[:-1]+':'
- col_finish = 'children:'+child_type[:-1]+';'
- num_children = obj_uuid_cf.get_count(obj_uuid,
- column_start = col_start,
- column_finish = col_finish,
- max_count = 10000000)
- return (True, num_children)
- #end _cassandra_floating_ip_pool_count_children
-
- def _cassandra_floating_ip_pool_update(self, obj_uuid, new_obj_dict):
- # Grab ref-uuids and properties in new version
- new_ref_infos = {}
-
- new_props = {}
- if 'floating_ip_pool_prefixes' in new_obj_dict:
- new_props['floating_ip_pool_prefixes'] = new_obj_dict['floating_ip_pool_prefixes']
- if 'id_perms' in new_obj_dict:
- new_props['id_perms'] = new_obj_dict['id_perms']
- if 'display_name' in new_obj_dict:
- new_props['display_name'] = new_obj_dict['display_name']
- # Gather column values for obj and updates to backrefs
- # in a batch and write it at the end
- obj_uuid_cf = self._obj_uuid_cf
- obj_cols_iter = obj_uuid_cf.xget(obj_uuid)
- # TODO optimize this (converts tuple to dict)
- obj_cols = {}
- for col_info in obj_cols_iter:
- obj_cols[col_info[0]] = col_info[1]
-
- bch = obj_uuid_cf.batch()
- for col_name in obj_cols.keys():
- # TODO use compiled RE
- if re.match('prop:', col_name):
- (_, prop_name) = col_name.split(':')
- if prop_name == 'id_perms':
- # id-perms always has to be updated for last-mod timestamp
- # get it from request dict(or from db if not in request dict)
- new_id_perms = new_obj_dict.get(prop_name, json.loads(obj_cols[col_name]))
- self.update_last_modified(bch, obj_uuid, new_id_perms)
- elif prop_name in new_obj_dict:
- self._update_prop(bch, obj_uuid, prop_name, new_props)
-
- # TODO use compiled RE
- if re.match('ref:', col_name):
- (_, ref_type, ref_uuid) = col_name.split(':')
- self._update_ref(bch, 'floating_ip_pool', obj_uuid, ref_type, ref_uuid, new_ref_infos)
- # for all column names
-
- # create new refs
- for ref_type in new_ref_infos.keys():
- for ref_uuid in new_ref_infos[ref_type].keys():
- ref_data = new_ref_infos[ref_type][ref_uuid]
- self._create_ref(bch, 'floating_ip_pool', obj_uuid, ref_type, ref_uuid, ref_data)
-
- # create new props
- for prop_name in new_props.keys():
- self._create_prop(bch, obj_uuid, prop_name, new_props[prop_name])
-
- bch.send()
-
- return (True, '')
- #end _cassandra_floating_ip_pool_update
-
- def _cassandra_floating_ip_pool_list(self, parent_uuids=None, back_ref_uuids=None,
- obj_uuids=None, count=False, filters=None):
- children_fq_names_uuids = []
- if filters:
- fnames = filters.get('field_names', [])
- fvalues = filters.get('field_values', [])
- filter_fields = [(fnames[i], fvalues[i]) for i in range(len(fnames))]
- else:
- filter_fields = []
-
- def filter_rows(coll_infos, filter_cols, filter_params):
- filt_infos = {}
- coll_rows = obj_uuid_cf.multiget(coll_infos.keys(),
- columns=filter_cols,
- column_count=10000000)
- for row in coll_rows:
- # give chance for zk heartbeat/ping
- gevent.sleep(0)
- full_match = True
- for fname, fval in filter_params:
- if coll_rows[row]['prop:%s' %(fname)] != fval:
- full_match = False
- break
- if full_match:
- filt_infos[row] = coll_infos[row]
- return filt_infos
- # end filter_rows
-
- def get_fq_name_uuid_list(obj_uuids):
- ret_list = []
- for obj_uuid in obj_uuids:
- try:
- obj_fq_name = self.uuid_to_fq_name(obj_uuid)
- ret_list.append((obj_fq_name, obj_uuid))
- except cfgm_common.exceptions.NoIdError:
- pass
- return ret_list
- # end get_fq_name_uuid_list
-
- if parent_uuids:
- # go from parent to child
- obj_uuid_cf = self._obj_uuid_cf
- col_start = 'children:floating_ip_pool:'
- col_fin = 'children:floating_ip_pool;'
- try:
- obj_rows = obj_uuid_cf.multiget(parent_uuids,
- column_start = col_start,
- column_finish = col_fin,
- column_count = 10000000,
- include_timestamp = True)
- except pycassa.NotFoundException:
- if count:
- return (True, 0)
- else:
- return (True, children_fq_names_uuids)
-
- def filter_rows_parent_anchor(sort=False):
- # flatten to [('children:<type>:<uuid>', (<val>,<ts>), *]
- all_cols = [cols for obj_key in obj_rows.keys() for cols in obj_rows[obj_key].items()]
- all_child_infos = {}
- for col_name, col_val_ts in all_cols:
- # give chance for zk heartbeat/ping
- gevent.sleep(0)
- child_uuid = col_name.split(':')[2]
- if obj_uuids and child_uuid not in obj_uuids:
- continue
- all_child_infos[child_uuid] = {'uuid': child_uuid, 'tstamp': col_val_ts[1]}
-
- filter_cols = ['prop:%s' %(fname) for fname, _ in filter_fields]
- if filter_cols:
- filt_child_infos = filter_rows(all_child_infos, filter_cols, filter_fields)
- else: # no filter specified
- filt_child_infos = all_child_infos
-
- if not sort:
- ret_child_infos = filt_child_infos.values()
- else:
- ret_child_infos = sorted(filt_child_infos.values(), key=itemgetter('tstamp'))
-
- return get_fq_name_uuid_list(r['uuid'] for r in ret_child_infos)
- # end filter_rows_parent_anchor
-
- if count:
- return (True, len(filter_rows_parent_anchor()))
-
- children_fq_names_uuids = filter_rows_parent_anchor(sort=True)
-
- if back_ref_uuids:
- # go from anchor to backrefs
- obj_uuid_cf = self._obj_uuid_cf
- col_start = 'backref:floating_ip_pool:'
- col_fin = 'backref:floating_ip_pool;'
- try:
- obj_rows = obj_uuid_cf.multiget(back_ref_uuids,
- column_start = col_start,
- column_finish = col_fin,
- column_count = 10000000,
- include_timestamp = True)
- except pycassa.NotFoundException:
- if count:
- return (True, 0)
- else:
- return (True, children_fq_names_uuids)
-
- def filter_rows_backref_anchor():
- # flatten to [('<fqnstr>:<uuid>', (<val>,<ts>), *]
- all_cols = [cols for obj_key in obj_rows.keys() for cols in obj_rows[obj_key].items()]
- all_backref_infos = {}
- for col_name, col_val_ts in all_cols:
- # give chance for zk heartbeat/ping
- gevent.sleep(0)
- col_name_arr = col_name.split(':')
- fq_name = col_name_arr[:-1]
- obj_uuid = col_name_arr[-1]
- if obj_uuids and obj_uuid not in obj_uuids:
- continue
- all_backref_infos[obj_uuid] = \
- {'uuid': obj_uuid, 'fq_name': fq_name, 'tstamp': col_val_ts[1]}
-
- filter_cols = ['prop:%s' %(fname) for fname, _ in filter_fields]
- if filter_cols:
- filt_backref_infos = filter_rows(all_backref_infos, filter_cols, filter_fields)
- else: # no filter specified
- filt_backref_infos = all_backref_infos
-
- return [(br_info['fq_name'], br_info['uuid']) for br_info in filt_backref_infos.values()]
- # end filter_rows_backref_anchor
-
- if count:
- return (True, len(filter_rows_backref_anchor()))
-
- children_fq_names_uuids = filter_rows_backref_anchor()
-
- if not parent_uuids and not back_ref_uuids:
- obj_uuid_cf = self._obj_uuid_cf
- if obj_uuids:
- # exact objects specified
- def filter_rows_object_list():
- all_obj_infos = {}
- for obj_uuid in obj_uuids:
- all_obj_infos[obj_uuid] = None
-
- filter_cols = ['prop:%s' %(fname) for fname, _ in filter_fields]
- if filter_cols:
- filt_obj_infos = filter_rows(all_obj_infos, filter_cols, filter_fields)
- else: # no filters specified
- filt_obj_infos = all_obj_infos
-
- return get_fq_name_uuid_list(filt_obj_infos.keys())
- # end filter_rows_object_list
-
- if count:
- return (True, len(filter_rows_object_list()))
- children_fq_names_uuids = filter_rows_object_list()
-
- else: # grab all resources of this type
- obj_fq_name_cf = self._obj_fq_name_cf
- try:
- cols = obj_fq_name_cf.get('floating_ip_pool', column_count = 10000000)
- except pycassa.NotFoundException:
- if count:
- return (True, 0)
- else:
- return (True, children_fq_names_uuids)
-
- def filter_rows_no_anchor():
- all_obj_infos = {}
- for col_name, col_val in cols.items():
- # give chance for zk heartbeat/ping
- gevent.sleep(0)
- col_name_arr = utils.decode_string(col_name).split(':')
- obj_uuid = col_name_arr[-1]
- all_obj_infos[obj_uuid] = (col_name_arr[:-1], obj_uuid)
-
- filter_cols = ['prop:%s' %(fname) for fname, _ in filter_fields]
- if filter_cols:
- filt_obj_infos = filter_rows(all_obj_infos, filter_cols, filter_fields)
- else: # no filters specified
- filt_obj_infos = all_obj_infos
-
- return filt_obj_infos.values()
- # end filter_rows_no_anchor
-
- if count:
- return (True, len(filter_rows_no_anchor()))
-
- children_fq_names_uuids = filter_rows_no_anchor()
-
- return (True, children_fq_names_uuids)
- #end _cassandra_floating_ip_pool_list
-
- def _cassandra_floating_ip_pool_delete(self, obj_uuid):
- obj_uuid_cf = self._obj_uuid_cf
- fq_name = json.loads(obj_uuid_cf.get(obj_uuid, columns = ['fq_name'])['fq_name'])
- bch = obj_uuid_cf.batch()
-
- # unlink from parent
- col_start = 'parent:'
- col_fin = 'parent;'
- col_name_iter = obj_uuid_cf.xget(obj_uuid, column_start = col_start, column_finish = col_fin)
- for (col_name, col_val) in col_name_iter:
- (_, parent_type, parent_uuid) = col_name.split(':')
- self._delete_child(bch, parent_type, parent_uuid, 'floating_ip_pool', obj_uuid)
-
- # remove refs
- col_start = 'ref:'
- col_fin = 'ref;'
- col_name_iter = obj_uuid_cf.xget(obj_uuid, column_start = col_start, column_finish = col_fin)
- for (col_name, col_val) in col_name_iter:
- (_, ref_type, ref_uuid) = col_name.split(':')
- self._delete_ref(bch, 'floating_ip_pool', obj_uuid, ref_type, ref_uuid)
-
- bch.remove(obj_uuid)
- bch.send()
-
- # Update fqname table
- fq_name_str = ':'.join(fq_name)
- fq_name_col = utils.encode_string(fq_name_str) + ':' + obj_uuid
- self._obj_fq_name_cf.remove('floating_ip_pool', columns = [fq_name_col])
-
-
- return (True, '')
- #end _cassandra_floating_ip_pool_delete
-
- def _cassandra_physical_router_alloc(self, fq_name):
- return (True, '')
- #end _cassandra_physical_router_alloc
-
- def _cassandra_physical_router_create(self, obj_ids, obj_dict):
- # Gather column values for obj and updates to backrefs
- # in a batch and write it at the end
- bch = self._obj_uuid_cf.batch()
-
- obj_cols = {}
- obj_cols['fq_name'] = json.dumps(obj_dict['fq_name'])
- obj_cols['type'] = json.dumps('physical_router')
- if 'parent_type' in obj_dict:
- # non config-root child
- parent_type = obj_dict['parent_type']
- parent_method_type = parent_type.replace('-', '_')
- parent_fq_name = obj_dict['fq_name'][:-1]
- obj_cols['parent_type'] = json.dumps(parent_type)
- parent_uuid = self.fq_name_to_uuid(parent_method_type, parent_fq_name)
- self._create_child(bch, parent_method_type, parent_uuid, 'physical_router', obj_ids['uuid'])
-
- # Properties
- field = obj_dict.get('physical_router_management_ip', None)
- if field is not None:
- self._create_prop(bch, obj_ids['uuid'], 'physical_router_management_ip', field)
-
- field = obj_dict.get('physical_router_dataplane_ip', None)
- if field is not None:
- self._create_prop(bch, obj_ids['uuid'], 'physical_router_dataplane_ip', field)
-
- field = obj_dict.get('physical_router_vendor_name', None)
- if field is not None:
- self._create_prop(bch, obj_ids['uuid'], 'physical_router_vendor_name', field)
-
- field = obj_dict.get('physical_router_product_name', None)
- if field is not None:
- self._create_prop(bch, obj_ids['uuid'], 'physical_router_product_name', field)
-
- field = obj_dict.get('physical_router_vnc_managed', None)
- if field is not None:
- self._create_prop(bch, obj_ids['uuid'], 'physical_router_vnc_managed', field)
-
- field = obj_dict.get('physical_router_user_credentials', None)
- if field is not None:
- self._create_prop(bch, obj_ids['uuid'], 'physical_router_user_credentials', field)
-
- field = obj_dict.get('physical_router_snmp_credentials', None)
- if field is not None:
- self._create_prop(bch, obj_ids['uuid'], 'physical_router_snmp_credentials', field)
-
- field = obj_dict.get('physical_router_junos_service_ports', None)
- if field is not None:
- self._create_prop(bch, obj_ids['uuid'], 'physical_router_junos_service_ports', field)
-
- field = obj_dict.get('id_perms', None)
- if field is not None:
- field['created'] = datetime.datetime.utcnow().isoformat()
- field['last_modified'] = field['created']
- self._create_prop(bch, obj_ids['uuid'], 'id_perms', field)
-
- field = obj_dict.get('display_name', None)
- if field is not None:
- self._create_prop(bch, obj_ids['uuid'], 'display_name', field)
-
-
- # References
- refs = obj_dict.get('virtual_router_refs', [])
- for ref in refs:
- ref_uuid = self.fq_name_to_uuid('virtual_router', ref['to'])
- ref_attr = ref.get('attr', None)
- ref_data = {'attr': ref_attr, 'is_weakref': False}
- self._create_ref(bch, 'physical_router', obj_ids['uuid'], 'virtual_router', ref_uuid, ref_data)
- refs = obj_dict.get('bgp_router_refs', [])
- for ref in refs:
- ref_uuid = self.fq_name_to_uuid('bgp_router', ref['to'])
- ref_attr = ref.get('attr', None)
- ref_data = {'attr': ref_attr, 'is_weakref': False}
- self._create_ref(bch, 'physical_router', obj_ids['uuid'], 'bgp_router', ref_uuid, ref_data)
- refs = obj_dict.get('virtual_network_refs', [])
- for ref in refs:
- ref_uuid = self.fq_name_to_uuid('virtual_network', ref['to'])
- ref_attr = ref.get('attr', None)
- ref_data = {'attr': ref_attr, 'is_weakref': False}
- self._create_ref(bch, 'physical_router', obj_ids['uuid'], 'virtual_network', ref_uuid, ref_data)
-
- bch.insert(obj_ids['uuid'], obj_cols)
- bch.send()
-
- # Update fqname table
- fq_name_str = ':'.join(obj_dict['fq_name'])
- fq_name_cols = {utils.encode_string(fq_name_str) + ':' + obj_ids['uuid']: json.dumps(None)}
- self._obj_fq_name_cf.insert('physical_router', fq_name_cols)
-
- return (True, '')
- #end _cassandra_physical_router_create
-
- def _cassandra_physical_router_read(self, obj_uuids, field_names = None):
- # if field_names = None, all fields will be read/returned
-
- obj_uuid_cf = self._obj_uuid_cf
-
- # optimize for common case of reading non-backref, non-children fields
- # ignoring columns starting from 'b' and 'c' - significant performance
- # impact in scaled setting. e.g. read of project
- if (field_names is None or
- (set(field_names) & (PhysicalRouter.backref_fields | PhysicalRouter.children_fields))):
- # atleast one backref/children field is needed
- obj_rows = obj_uuid_cf.multiget(obj_uuids,
- column_count = 10000000,
- include_timestamp = True)
- else: # ignore reading backref + children columns
- obj_rows = obj_uuid_cf.multiget(obj_uuids,
- column_start = 'd',
- column_count = 10000000,
- include_timestamp = True)
-
- if (len(obj_uuids) == 1) and not obj_rows:
- raise cfgm_common.exceptions.NoIdError(obj_uuids[0])
-
- results = []
- for row_key in obj_rows:
- # give chance for zk heartbeat/ping
- gevent.sleep(0)
- obj_uuid = row_key
- obj_cols = obj_rows[obj_uuid]
- result = {}
- result['uuid'] = obj_uuid
- result['fq_name'] = json.loads(obj_cols['fq_name'][0])
- for col_name in obj_cols.keys():
- if self._re_match_parent.match(col_name):
- # non config-root child
- (_, _, parent_uuid) = col_name.split(':')
- parent_type = json.loads(obj_cols['parent_type'][0])
- result['parent_type'] = parent_type
- try:
- result['parent_uuid'] = parent_uuid
- result['parent_href'] = self._generate_url(parent_type, parent_uuid)
- except cfgm_common.exceptions.NoIdError:
- err_msg = 'Unknown uuid for parent ' + result['fq_name'][-2]
- return (False, err_msg)
-
- # TODO use compiled RE
- if self._re_match_prop.match(col_name):
- (_, prop_name) = col_name.split(':')
- result[prop_name] = json.loads(obj_cols[col_name][0])
-
- # TODO use compiled RE
- if self._re_match_children.match(col_name):
- (_, child_type, child_uuid) = col_name.split(':')
- if field_names and '%ss' %(child_type) not in field_names:
- continue
-
- child_tstamp = obj_cols[col_name][1]
- try:
- self._read_child(result, obj_uuid, child_type, child_uuid, child_tstamp)
- except cfgm_common.exceptions.NoIdError:
- continue
-
- # TODO use compiled RE
- if self._re_match_ref.match(col_name):
- (_, ref_type, ref_uuid) = col_name.split(':')
- self._read_ref(result, obj_uuid, ref_type, ref_uuid, obj_cols[col_name][0])
-
- if self._re_match_backref.match(col_name):
- (_, back_ref_type, back_ref_uuid) = col_name.split(':')
- if field_names and '%s_back_refs' %(back_ref_type) not in field_names:
- continue
-
- try:
- self._read_back_ref(result, obj_uuid, back_ref_type, back_ref_uuid,
- obj_cols[col_name][0])
- except cfgm_common.exceptions.NoIdError:
- continue
-
- # for all column names
-
- if 'physical_interfaces' in result:
- # sort children; TODO do this based on schema
- sorted_children = sorted(result['physical_interfaces'], key = itemgetter('tstamp'))
- # re-write result's children without timestamp
- result['physical_interfaces'] = sorted_children
- [child.pop('tstamp') for child in result['physical_interfaces']]
-
- if 'logical_interfaces' in result:
- # sort children; TODO do this based on schema
- sorted_children = sorted(result['logical_interfaces'], key = itemgetter('tstamp'))
- # re-write result's children without timestamp
- result['logical_interfaces'] = sorted_children
- [child.pop('tstamp') for child in result['logical_interfaces']]
-
- results.append(result)
- # end for all rows
-
- return (True, results)
- #end _cassandra_physical_router_read
-
- def _cassandra_physical_router_count_children(self, obj_uuid, child_type):
- # if child_type = None, return
- if child_type is None:
- return (False, '')
-
- obj_uuid_cf = self._obj_uuid_cf
- if child_type not in PhysicalRouter.children_fields:
- return (False, '%s is not a valid children of PhysicalRouter' %(child_type))
-
- col_start = 'children:'+child_type[:-1]+':'
- col_finish = 'children:'+child_type[:-1]+';'
- num_children = obj_uuid_cf.get_count(obj_uuid,
- column_start = col_start,
- column_finish = col_finish,
- max_count = 10000000)
- return (True, num_children)
- #end _cassandra_physical_router_count_children
-
- def _cassandra_physical_router_update(self, obj_uuid, new_obj_dict):
- # Grab ref-uuids and properties in new version
- new_ref_infos = {}
-
- if 'virtual_router_refs' in new_obj_dict:
- new_ref_infos['virtual_router'] = {}
- new_refs = new_obj_dict['virtual_router_refs']
- if new_refs:
- for new_ref in new_refs:
- new_ref_uuid = self.fq_name_to_uuid('virtual_router', new_ref['to'])
- new_ref_attr = new_ref.get('attr', None)
- new_ref_data = {'attr': new_ref_attr, 'is_weakref': False}
- new_ref_infos['virtual_router'][new_ref_uuid] = new_ref_data
-
- if 'bgp_router_refs' in new_obj_dict:
- new_ref_infos['bgp_router'] = {}
- new_refs = new_obj_dict['bgp_router_refs']
- if new_refs:
- for new_ref in new_refs:
- new_ref_uuid = self.fq_name_to_uuid('bgp_router', new_ref['to'])
- new_ref_attr = new_ref.get('attr', None)
- new_ref_data = {'attr': new_ref_attr, 'is_weakref': False}
- new_ref_infos['bgp_router'][new_ref_uuid] = new_ref_data
-
- if 'virtual_network_refs' in new_obj_dict:
- new_ref_infos['virtual_network'] = {}
- new_refs = new_obj_dict['virtual_network_refs']
- if new_refs:
- for new_ref in new_refs:
- new_ref_uuid = self.fq_name_to_uuid('virtual_network', new_ref['to'])
- new_ref_attr = new_ref.get('attr', None)
- new_ref_data = {'attr': new_ref_attr, 'is_weakref': False}
- new_ref_infos['virtual_network'][new_ref_uuid] = new_ref_data
-
- new_props = {}
- if 'physical_router_management_ip' in new_obj_dict:
- new_props['physical_router_management_ip'] = new_obj_dict['physical_router_management_ip']
- if 'physical_router_dataplane_ip' in new_obj_dict:
- new_props['physical_router_dataplane_ip'] = new_obj_dict['physical_router_dataplane_ip']
- if 'physical_router_vendor_name' in new_obj_dict:
- new_props['physical_router_vendor_name'] = new_obj_dict['physical_router_vendor_name']
- if 'physical_router_product_name' in new_obj_dict:
- new_props['physical_router_product_name'] = new_obj_dict['physical_router_product_name']
- if 'physical_router_vnc_managed' in new_obj_dict:
- new_props['physical_router_vnc_managed'] = new_obj_dict['physical_router_vnc_managed']
- if 'physical_router_user_credentials' in new_obj_dict:
- new_props['physical_router_user_credentials'] = new_obj_dict['physical_router_user_credentials']
- if 'physical_router_snmp_credentials' in new_obj_dict:
- new_props['physical_router_snmp_credentials'] = new_obj_dict['physical_router_snmp_credentials']
- if 'physical_router_junos_service_ports' in new_obj_dict:
- new_props['physical_router_junos_service_ports'] = new_obj_dict['physical_router_junos_service_ports']
- if 'id_perms' in new_obj_dict:
- new_props['id_perms'] = new_obj_dict['id_perms']
- if 'display_name' in new_obj_dict:
- new_props['display_name'] = new_obj_dict['display_name']
- # Gather column values for obj and updates to backrefs
- # in a batch and write it at the end
- obj_uuid_cf = self._obj_uuid_cf
- obj_cols_iter = obj_uuid_cf.xget(obj_uuid)
- # TODO optimize this (converts tuple to dict)
- obj_cols = {}
- for col_info in obj_cols_iter:
- obj_cols[col_info[0]] = col_info[1]
-
- bch = obj_uuid_cf.batch()
- for col_name in obj_cols.keys():
- # TODO use compiled RE
- if re.match('prop:', col_name):
- (_, prop_name) = col_name.split(':')
- if prop_name == 'id_perms':
- # id-perms always has to be updated for last-mod timestamp
- # get it from request dict(or from db if not in request dict)
- new_id_perms = new_obj_dict.get(prop_name, json.loads(obj_cols[col_name]))
- self.update_last_modified(bch, obj_uuid, new_id_perms)
- elif prop_name in new_obj_dict:
- self._update_prop(bch, obj_uuid, prop_name, new_props)
-
- # TODO use compiled RE
- if re.match('ref:', col_name):
- (_, ref_type, ref_uuid) = col_name.split(':')
- self._update_ref(bch, 'physical_router', obj_uuid, ref_type, ref_uuid, new_ref_infos)
- # for all column names
-
- # create new refs
- for ref_type in new_ref_infos.keys():
- for ref_uuid in new_ref_infos[ref_type].keys():
- ref_data = new_ref_infos[ref_type][ref_uuid]
- self._create_ref(bch, 'physical_router', obj_uuid, ref_type, ref_uuid, ref_data)
-
- # create new props
- for prop_name in new_props.keys():
- self._create_prop(bch, obj_uuid, prop_name, new_props[prop_name])
-
- bch.send()
-
- return (True, '')
- #end _cassandra_physical_router_update
-
- def _cassandra_physical_router_list(self, parent_uuids=None, back_ref_uuids=None,
- obj_uuids=None, count=False, filters=None):
- children_fq_names_uuids = []
- if filters:
- fnames = filters.get('field_names', [])
- fvalues = filters.get('field_values', [])
- filter_fields = [(fnames[i], fvalues[i]) for i in range(len(fnames))]
- else:
- filter_fields = []
-
- def filter_rows(coll_infos, filter_cols, filter_params):
- filt_infos = {}
- coll_rows = obj_uuid_cf.multiget(coll_infos.keys(),
- columns=filter_cols,
- column_count=10000000)
- for row in coll_rows:
- # give chance for zk heartbeat/ping
- gevent.sleep(0)
- full_match = True
- for fname, fval in filter_params:
- if coll_rows[row]['prop:%s' %(fname)] != fval:
- full_match = False
- break
- if full_match:
- filt_infos[row] = coll_infos[row]
- return filt_infos
- # end filter_rows
-
- def get_fq_name_uuid_list(obj_uuids):
- ret_list = []
- for obj_uuid in obj_uuids:
- try:
- obj_fq_name = self.uuid_to_fq_name(obj_uuid)
- ret_list.append((obj_fq_name, obj_uuid))
- except cfgm_common.exceptions.NoIdError:
- pass
- return ret_list
- # end get_fq_name_uuid_list
-
- if parent_uuids:
- # go from parent to child
- obj_uuid_cf = self._obj_uuid_cf
- col_start = 'children:physical_router:'
- col_fin = 'children:physical_router;'
- try:
- obj_rows = obj_uuid_cf.multiget(parent_uuids,
- column_start = col_start,
- column_finish = col_fin,
- column_count = 10000000,
- include_timestamp = True)
- except pycassa.NotFoundException:
- if count:
- return (True, 0)
- else:
- return (True, children_fq_names_uuids)
-
- def filter_rows_parent_anchor(sort=False):
- # flatten to [('children:<type>:<uuid>', (<val>,<ts>), *]
- all_cols = [cols for obj_key in obj_rows.keys() for cols in obj_rows[obj_key].items()]
- all_child_infos = {}
- for col_name, col_val_ts in all_cols:
- # give chance for zk heartbeat/ping
- gevent.sleep(0)
- child_uuid = col_name.split(':')[2]
- if obj_uuids and child_uuid not in obj_uuids:
- continue
- all_child_infos[child_uuid] = {'uuid': child_uuid, 'tstamp': col_val_ts[1]}
-
- filter_cols = ['prop:%s' %(fname) for fname, _ in filter_fields]
- if filter_cols:
- filt_child_infos = filter_rows(all_child_infos, filter_cols, filter_fields)
- else: # no filter specified
- filt_child_infos = all_child_infos
-
- if not sort:
- ret_child_infos = filt_child_infos.values()
- else:
- ret_child_infos = sorted(filt_child_infos.values(), key=itemgetter('tstamp'))
-
- return get_fq_name_uuid_list(r['uuid'] for r in ret_child_infos)
- # end filter_rows_parent_anchor
-
- if count:
- return (True, len(filter_rows_parent_anchor()))
-
- children_fq_names_uuids = filter_rows_parent_anchor(sort=True)
-
- if back_ref_uuids:
- # go from anchor to backrefs
- obj_uuid_cf = self._obj_uuid_cf
- col_start = 'backref:physical_router:'
- col_fin = 'backref:physical_router;'
- try:
- obj_rows = obj_uuid_cf.multiget(back_ref_uuids,
- column_start = col_start,
- column_finish = col_fin,
- column_count = 10000000,
- include_timestamp = True)
- except pycassa.NotFoundException:
- if count:
- return (True, 0)
- else:
- return (True, children_fq_names_uuids)
-
- def filter_rows_backref_anchor():
- # flatten to [('<fqnstr>:<uuid>', (<val>,<ts>), *]
- all_cols = [cols for obj_key in obj_rows.keys() for cols in obj_rows[obj_key].items()]
- all_backref_infos = {}
- for col_name, col_val_ts in all_cols:
- # give chance for zk heartbeat/ping
- gevent.sleep(0)
- col_name_arr = col_name.split(':')
- fq_name = col_name_arr[:-1]
- obj_uuid = col_name_arr[-1]
- if obj_uuids and obj_uuid not in obj_uuids:
- continue
- all_backref_infos[obj_uuid] = \
- {'uuid': obj_uuid, 'fq_name': fq_name, 'tstamp': col_val_ts[1]}
-
- filter_cols = ['prop:%s' %(fname) for fname, _ in filter_fields]
- if filter_cols:
- filt_backref_infos = filter_rows(all_backref_infos, filter_cols, filter_fields)
- else: # no filter specified
- filt_backref_infos = all_backref_infos
-
- return [(br_info['fq_name'], br_info['uuid']) for br_info in filt_backref_infos.values()]
- # end filter_rows_backref_anchor
-
- if count:
- return (True, len(filter_rows_backref_anchor()))
-
- children_fq_names_uuids = filter_rows_backref_anchor()
-
- if not parent_uuids and not back_ref_uuids:
- obj_uuid_cf = self._obj_uuid_cf
- if obj_uuids:
- # exact objects specified
- def filter_rows_object_list():
- all_obj_infos = {}
- for obj_uuid in obj_uuids:
- all_obj_infos[obj_uuid] = None
-
- filter_cols = ['prop:%s' %(fname) for fname, _ in filter_fields]
- if filter_cols:
- filt_obj_infos = filter_rows(all_obj_infos, filter_cols, filter_fields)
- else: # no filters specified
- filt_obj_infos = all_obj_infos
-
- return get_fq_name_uuid_list(filt_obj_infos.keys())
- # end filter_rows_object_list
-
- if count:
- return (True, len(filter_rows_object_list()))
- children_fq_names_uuids = filter_rows_object_list()
-
- else: # grab all resources of this type
- obj_fq_name_cf = self._obj_fq_name_cf
- try:
- cols = obj_fq_name_cf.get('physical_router', column_count = 10000000)
- except pycassa.NotFoundException:
- if count:
- return (True, 0)
- else:
- return (True, children_fq_names_uuids)
-
- def filter_rows_no_anchor():
- all_obj_infos = {}
- for col_name, col_val in cols.items():
- # give chance for zk heartbeat/ping
- gevent.sleep(0)
- col_name_arr = utils.decode_string(col_name).split(':')
- obj_uuid = col_name_arr[-1]
- all_obj_infos[obj_uuid] = (col_name_arr[:-1], obj_uuid)
-
- filter_cols = ['prop:%s' %(fname) for fname, _ in filter_fields]
- if filter_cols:
- filt_obj_infos = filter_rows(all_obj_infos, filter_cols, filter_fields)
- else: # no filters specified
- filt_obj_infos = all_obj_infos
-
- return filt_obj_infos.values()
- # end filter_rows_no_anchor
-
- if count:
- return (True, len(filter_rows_no_anchor()))
-
- children_fq_names_uuids = filter_rows_no_anchor()
-
- return (True, children_fq_names_uuids)
- #end _cassandra_physical_router_list
-
- def _cassandra_physical_router_delete(self, obj_uuid):
- obj_uuid_cf = self._obj_uuid_cf
- fq_name = json.loads(obj_uuid_cf.get(obj_uuid, columns = ['fq_name'])['fq_name'])
- bch = obj_uuid_cf.batch()
-
- # unlink from parent
- col_start = 'parent:'
- col_fin = 'parent;'
- col_name_iter = obj_uuid_cf.xget(obj_uuid, column_start = col_start, column_finish = col_fin)
- for (col_name, col_val) in col_name_iter:
- (_, parent_type, parent_uuid) = col_name.split(':')
- self._delete_child(bch, parent_type, parent_uuid, 'physical_router', obj_uuid)
-
- # remove refs
- col_start = 'ref:'
- col_fin = 'ref;'
- col_name_iter = obj_uuid_cf.xget(obj_uuid, column_start = col_start, column_finish = col_fin)
- for (col_name, col_val) in col_name_iter:
- (_, ref_type, ref_uuid) = col_name.split(':')
- self._delete_ref(bch, 'physical_router', obj_uuid, ref_type, ref_uuid)
-
- bch.remove(obj_uuid)
- bch.send()
-
- # Update fqname table
- fq_name_str = ':'.join(fq_name)
- fq_name_col = utils.encode_string(fq_name_str) + ':' + obj_uuid
- self._obj_fq_name_cf.remove('physical_router', columns = [fq_name_col])
-
-
- return (True, '')
- #end _cassandra_physical_router_delete
-
- def _cassandra_bgp_router_alloc(self, fq_name):
- return (True, '')
- #end _cassandra_bgp_router_alloc
-
- def _cassandra_bgp_router_create(self, obj_ids, obj_dict):
- # Gather column values for obj and updates to backrefs
- # in a batch and write it at the end
- bch = self._obj_uuid_cf.batch()
-
- obj_cols = {}
- obj_cols['fq_name'] = json.dumps(obj_dict['fq_name'])
- obj_cols['type'] = json.dumps('bgp_router')
- if 'parent_type' in obj_dict:
- # non config-root child
- parent_type = obj_dict['parent_type']
- parent_method_type = parent_type.replace('-', '_')
- parent_fq_name = obj_dict['fq_name'][:-1]
- obj_cols['parent_type'] = json.dumps(parent_type)
- parent_uuid = self.fq_name_to_uuid(parent_method_type, parent_fq_name)
- self._create_child(bch, parent_method_type, parent_uuid, 'bgp_router', obj_ids['uuid'])
-
- # Properties
- field = obj_dict.get('bgp_router_parameters', None)
- if field is not None:
- self._create_prop(bch, obj_ids['uuid'], 'bgp_router_parameters', field)
-
- field = obj_dict.get('id_perms', None)
- if field is not None:
- field['created'] = datetime.datetime.utcnow().isoformat()
- field['last_modified'] = field['created']
- self._create_prop(bch, obj_ids['uuid'], 'id_perms', field)
-
- field = obj_dict.get('display_name', None)
- if field is not None:
- self._create_prop(bch, obj_ids['uuid'], 'display_name', field)
-
-
- # References
- refs = obj_dict.get('bgp_router_refs', [])
- for ref in refs:
- ref_uuid = self.fq_name_to_uuid('bgp_router', ref['to'])
- ref_attr = ref.get('attr', None)
- ref_data = {'attr': ref_attr, 'is_weakref': False}
- self._create_ref(bch, 'bgp_router', obj_ids['uuid'], 'bgp_router', ref_uuid, ref_data)
-
- bch.insert(obj_ids['uuid'], obj_cols)
- bch.send()
-
- # Update fqname table
- fq_name_str = ':'.join(obj_dict['fq_name'])
- fq_name_cols = {utils.encode_string(fq_name_str) + ':' + obj_ids['uuid']: json.dumps(None)}
- self._obj_fq_name_cf.insert('bgp_router', fq_name_cols)
-
- return (True, '')
- #end _cassandra_bgp_router_create
-
- def _cassandra_bgp_router_read(self, obj_uuids, field_names = None):
- # if field_names = None, all fields will be read/returned
-
- obj_uuid_cf = self._obj_uuid_cf
-
- # optimize for common case of reading non-backref, non-children fields
- # ignoring columns starting from 'b' and 'c' - significant performance
- # impact in scaled setting. e.g. read of project
- if (field_names is None or
- (set(field_names) & (BgpRouter.backref_fields | BgpRouter.children_fields))):
- # atleast one backref/children field is needed
- obj_rows = obj_uuid_cf.multiget(obj_uuids,
- column_count = 10000000,
- include_timestamp = True)
- else: # ignore reading backref + children columns
- obj_rows = obj_uuid_cf.multiget(obj_uuids,
- column_start = 'd',
- column_count = 10000000,
- include_timestamp = True)
-
- if (len(obj_uuids) == 1) and not obj_rows:
- raise cfgm_common.exceptions.NoIdError(obj_uuids[0])
-
- results = []
- for row_key in obj_rows:
- # give chance for zk heartbeat/ping
- gevent.sleep(0)
- obj_uuid = row_key
- obj_cols = obj_rows[obj_uuid]
- result = {}
- result['uuid'] = obj_uuid
- result['fq_name'] = json.loads(obj_cols['fq_name'][0])
- for col_name in obj_cols.keys():
- if self._re_match_parent.match(col_name):
- # non config-root child
- (_, _, parent_uuid) = col_name.split(':')
- parent_type = json.loads(obj_cols['parent_type'][0])
- result['parent_type'] = parent_type
- try:
- result['parent_uuid'] = parent_uuid
- result['parent_href'] = self._generate_url(parent_type, parent_uuid)
- except cfgm_common.exceptions.NoIdError:
- err_msg = 'Unknown uuid for parent ' + result['fq_name'][-2]
- return (False, err_msg)
-
- # TODO use compiled RE
- if self._re_match_prop.match(col_name):
- (_, prop_name) = col_name.split(':')
- result[prop_name] = json.loads(obj_cols[col_name][0])
-
- # TODO use compiled RE
- if self._re_match_children.match(col_name):
- (_, child_type, child_uuid) = col_name.split(':')
- if field_names and '%ss' %(child_type) not in field_names:
- continue
-
- child_tstamp = obj_cols[col_name][1]
- try:
- self._read_child(result, obj_uuid, child_type, child_uuid, child_tstamp)
- except cfgm_common.exceptions.NoIdError:
- continue
-
- # TODO use compiled RE
- if self._re_match_ref.match(col_name):
- (_, ref_type, ref_uuid) = col_name.split(':')
- self._read_ref(result, obj_uuid, ref_type, ref_uuid, obj_cols[col_name][0])
-
- if self._re_match_backref.match(col_name):
- (_, back_ref_type, back_ref_uuid) = col_name.split(':')
- if field_names and '%s_back_refs' %(back_ref_type) not in field_names:
- continue
-
- try:
- self._read_back_ref(result, obj_uuid, back_ref_type, back_ref_uuid,
- obj_cols[col_name][0])
- except cfgm_common.exceptions.NoIdError:
- continue
-
- # for all column names
-
-
- results.append(result)
- # end for all rows
-
- return (True, results)
- #end _cassandra_bgp_router_read
-
- def _cassandra_bgp_router_count_children(self, obj_uuid, child_type):
- # if child_type = None, return
- if child_type is None:
- return (False, '')
-
- obj_uuid_cf = self._obj_uuid_cf
- if child_type not in BgpRouter.children_fields:
- return (False, '%s is not a valid children of BgpRouter' %(child_type))
-
- col_start = 'children:'+child_type[:-1]+':'
- col_finish = 'children:'+child_type[:-1]+';'
- num_children = obj_uuid_cf.get_count(obj_uuid,
- column_start = col_start,
- column_finish = col_finish,
- max_count = 10000000)
- return (True, num_children)
- #end _cassandra_bgp_router_count_children
-
- def _cassandra_bgp_router_update(self, obj_uuid, new_obj_dict):
- # Grab ref-uuids and properties in new version
- new_ref_infos = {}
-
- if 'bgp_router_refs' in new_obj_dict:
- new_ref_infos['bgp_router'] = {}
- new_refs = new_obj_dict['bgp_router_refs']
- if new_refs:
- for new_ref in new_refs:
- new_ref_uuid = self.fq_name_to_uuid('bgp_router', new_ref['to'])
- new_ref_attr = new_ref.get('attr', None)
- new_ref_data = {'attr': new_ref_attr, 'is_weakref': False}
- new_ref_infos['bgp_router'][new_ref_uuid] = new_ref_data
-
- new_props = {}
- if 'bgp_router_parameters' in new_obj_dict:
- new_props['bgp_router_parameters'] = new_obj_dict['bgp_router_parameters']
- if 'id_perms' in new_obj_dict:
- new_props['id_perms'] = new_obj_dict['id_perms']
- if 'display_name' in new_obj_dict:
- new_props['display_name'] = new_obj_dict['display_name']
- # Gather column values for obj and updates to backrefs
- # in a batch and write it at the end
- obj_uuid_cf = self._obj_uuid_cf
- obj_cols_iter = obj_uuid_cf.xget(obj_uuid)
- # TODO optimize this (converts tuple to dict)
- obj_cols = {}
- for col_info in obj_cols_iter:
- obj_cols[col_info[0]] = col_info[1]
-
- bch = obj_uuid_cf.batch()
- for col_name in obj_cols.keys():
- # TODO use compiled RE
- if re.match('prop:', col_name):
- (_, prop_name) = col_name.split(':')
- if prop_name == 'id_perms':
- # id-perms always has to be updated for last-mod timestamp
- # get it from request dict(or from db if not in request dict)
- new_id_perms = new_obj_dict.get(prop_name, json.loads(obj_cols[col_name]))
- self.update_last_modified(bch, obj_uuid, new_id_perms)
- elif prop_name in new_obj_dict:
- self._update_prop(bch, obj_uuid, prop_name, new_props)
-
- # TODO use compiled RE
- if re.match('ref:', col_name):
- (_, ref_type, ref_uuid) = col_name.split(':')
- self._update_ref(bch, 'bgp_router', obj_uuid, ref_type, ref_uuid, new_ref_infos)
- # for all column names
-
- # create new refs
- for ref_type in new_ref_infos.keys():
- for ref_uuid in new_ref_infos[ref_type].keys():
- ref_data = new_ref_infos[ref_type][ref_uuid]
- self._create_ref(bch, 'bgp_router', obj_uuid, ref_type, ref_uuid, ref_data)
-
- # create new props
- for prop_name in new_props.keys():
- self._create_prop(bch, obj_uuid, prop_name, new_props[prop_name])
-
- bch.send()
-
- return (True, '')
- #end _cassandra_bgp_router_update
-
- def _cassandra_bgp_router_list(self, parent_uuids=None, back_ref_uuids=None,
- obj_uuids=None, count=False, filters=None):
- children_fq_names_uuids = []
- if filters:
- fnames = filters.get('field_names', [])
- fvalues = filters.get('field_values', [])
- filter_fields = [(fnames[i], fvalues[i]) for i in range(len(fnames))]
- else:
- filter_fields = []
-
- def filter_rows(coll_infos, filter_cols, filter_params):
- filt_infos = {}
- coll_rows = obj_uuid_cf.multiget(coll_infos.keys(),
- columns=filter_cols,
- column_count=10000000)
- for row in coll_rows:
- # give chance for zk heartbeat/ping
- gevent.sleep(0)
- full_match = True
- for fname, fval in filter_params:
- if coll_rows[row]['prop:%s' %(fname)] != fval:
- full_match = False
- break
- if full_match:
- filt_infos[row] = coll_infos[row]
- return filt_infos
- # end filter_rows
-
- def get_fq_name_uuid_list(obj_uuids):
- ret_list = []
- for obj_uuid in obj_uuids:
- try:
- obj_fq_name = self.uuid_to_fq_name(obj_uuid)
- ret_list.append((obj_fq_name, obj_uuid))
- except cfgm_common.exceptions.NoIdError:
- pass
- return ret_list
- # end get_fq_name_uuid_list
-
- if parent_uuids:
- # go from parent to child
- obj_uuid_cf = self._obj_uuid_cf
- col_start = 'children:bgp_router:'
- col_fin = 'children:bgp_router;'
- try:
- obj_rows = obj_uuid_cf.multiget(parent_uuids,
- column_start = col_start,
- column_finish = col_fin,
- column_count = 10000000,
- include_timestamp = True)
- except pycassa.NotFoundException:
- if count:
- return (True, 0)
- else:
- return (True, children_fq_names_uuids)
-
- def filter_rows_parent_anchor(sort=False):
- # flatten to [('children:<type>:<uuid>', (<val>,<ts>), *]
- all_cols = [cols for obj_key in obj_rows.keys() for cols in obj_rows[obj_key].items()]
- all_child_infos = {}
- for col_name, col_val_ts in all_cols:
- # give chance for zk heartbeat/ping
- gevent.sleep(0)
- child_uuid = col_name.split(':')[2]
- if obj_uuids and child_uuid not in obj_uuids:
- continue
- all_child_infos[child_uuid] = {'uuid': child_uuid, 'tstamp': col_val_ts[1]}
-
- filter_cols = ['prop:%s' %(fname) for fname, _ in filter_fields]
- if filter_cols:
- filt_child_infos = filter_rows(all_child_infos, filter_cols, filter_fields)
- else: # no filter specified
- filt_child_infos = all_child_infos
-
- if not sort:
- ret_child_infos = filt_child_infos.values()
- else:
- ret_child_infos = sorted(filt_child_infos.values(), key=itemgetter('tstamp'))
-
- return get_fq_name_uuid_list(r['uuid'] for r in ret_child_infos)
- # end filter_rows_parent_anchor
-
- if count:
- return (True, len(filter_rows_parent_anchor()))
-
- children_fq_names_uuids = filter_rows_parent_anchor(sort=True)
-
- if back_ref_uuids:
- # go from anchor to backrefs
- obj_uuid_cf = self._obj_uuid_cf
- col_start = 'backref:bgp_router:'
- col_fin = 'backref:bgp_router;'
- try:
- obj_rows = obj_uuid_cf.multiget(back_ref_uuids,
- column_start = col_start,
- column_finish = col_fin,
- column_count = 10000000,
- include_timestamp = True)
- except pycassa.NotFoundException:
- if count:
- return (True, 0)
- else:
- return (True, children_fq_names_uuids)
-
- def filter_rows_backref_anchor():
- # flatten to [('<fqnstr>:<uuid>', (<val>,<ts>), *]
- all_cols = [cols for obj_key in obj_rows.keys() for cols in obj_rows[obj_key].items()]
- all_backref_infos = {}
- for col_name, col_val_ts in all_cols:
- # give chance for zk heartbeat/ping
- gevent.sleep(0)
- col_name_arr = col_name.split(':')
- fq_name = col_name_arr[:-1]
- obj_uuid = col_name_arr[-1]
- if obj_uuids and obj_uuid not in obj_uuids:
- continue
- all_backref_infos[obj_uuid] = \
- {'uuid': obj_uuid, 'fq_name': fq_name, 'tstamp': col_val_ts[1]}
-
- filter_cols = ['prop:%s' %(fname) for fname, _ in filter_fields]
- if filter_cols:
- filt_backref_infos = filter_rows(all_backref_infos, filter_cols, filter_fields)
- else: # no filter specified
- filt_backref_infos = all_backref_infos
-
- return [(br_info['fq_name'], br_info['uuid']) for br_info in filt_backref_infos.values()]
- # end filter_rows_backref_anchor
-
- if count:
- return (True, len(filter_rows_backref_anchor()))
-
- children_fq_names_uuids = filter_rows_backref_anchor()
-
- if not parent_uuids and not back_ref_uuids:
- obj_uuid_cf = self._obj_uuid_cf
- if obj_uuids:
- # exact objects specified
- def filter_rows_object_list():
- all_obj_infos = {}
- for obj_uuid in obj_uuids:
- all_obj_infos[obj_uuid] = None
-
- filter_cols = ['prop:%s' %(fname) for fname, _ in filter_fields]
- if filter_cols:
- filt_obj_infos = filter_rows(all_obj_infos, filter_cols, filter_fields)
- else: # no filters specified
- filt_obj_infos = all_obj_infos
-
- return get_fq_name_uuid_list(filt_obj_infos.keys())
- # end filter_rows_object_list
-
- if count:
- return (True, len(filter_rows_object_list()))
- children_fq_names_uuids = filter_rows_object_list()
-
- else: # grab all resources of this type
- obj_fq_name_cf = self._obj_fq_name_cf
- try:
- cols = obj_fq_name_cf.get('bgp_router', column_count = 10000000)
- except pycassa.NotFoundException:
- if count:
- return (True, 0)
- else:
- return (True, children_fq_names_uuids)
-
- def filter_rows_no_anchor():
- all_obj_infos = {}
- for col_name, col_val in cols.items():
- # give chance for zk heartbeat/ping
- gevent.sleep(0)
- col_name_arr = utils.decode_string(col_name).split(':')
- obj_uuid = col_name_arr[-1]
- all_obj_infos[obj_uuid] = (col_name_arr[:-1], obj_uuid)
-
- filter_cols = ['prop:%s' %(fname) for fname, _ in filter_fields]
- if filter_cols:
- filt_obj_infos = filter_rows(all_obj_infos, filter_cols, filter_fields)
- else: # no filters specified
- filt_obj_infos = all_obj_infos
-
- return filt_obj_infos.values()
- # end filter_rows_no_anchor
-
- if count:
- return (True, len(filter_rows_no_anchor()))
-
- children_fq_names_uuids = filter_rows_no_anchor()
-
- return (True, children_fq_names_uuids)
- #end _cassandra_bgp_router_list
-
- def _cassandra_bgp_router_delete(self, obj_uuid):
- obj_uuid_cf = self._obj_uuid_cf
- fq_name = json.loads(obj_uuid_cf.get(obj_uuid, columns = ['fq_name'])['fq_name'])
- bch = obj_uuid_cf.batch()
-
- # unlink from parent
- col_start = 'parent:'
- col_fin = 'parent;'
- col_name_iter = obj_uuid_cf.xget(obj_uuid, column_start = col_start, column_finish = col_fin)
- for (col_name, col_val) in col_name_iter:
- (_, parent_type, parent_uuid) = col_name.split(':')
- self._delete_child(bch, parent_type, parent_uuid, 'bgp_router', obj_uuid)
-
- # remove refs
- col_start = 'ref:'
- col_fin = 'ref;'
- col_name_iter = obj_uuid_cf.xget(obj_uuid, column_start = col_start, column_finish = col_fin)
- for (col_name, col_val) in col_name_iter:
- (_, ref_type, ref_uuid) = col_name.split(':')
- self._delete_ref(bch, 'bgp_router', obj_uuid, ref_type, ref_uuid)
-
- bch.remove(obj_uuid)
- bch.send()
-
- # Update fqname table
- fq_name_str = ':'.join(fq_name)
- fq_name_col = utils.encode_string(fq_name_str) + ':' + obj_uuid
- self._obj_fq_name_cf.remove('bgp_router', columns = [fq_name_col])
-
-
- return (True, '')
- #end _cassandra_bgp_router_delete
-
- def _cassandra_virtual_router_alloc(self, fq_name):
- return (True, '')
- #end _cassandra_virtual_router_alloc
-
- def _cassandra_virtual_router_create(self, obj_ids, obj_dict):
- # Gather column values for obj and updates to backrefs
- # in a batch and write it at the end
- bch = self._obj_uuid_cf.batch()
-
- obj_cols = {}
- obj_cols['fq_name'] = json.dumps(obj_dict['fq_name'])
- obj_cols['type'] = json.dumps('virtual_router')
- if 'parent_type' in obj_dict:
- # non config-root child
- parent_type = obj_dict['parent_type']
- parent_method_type = parent_type.replace('-', '_')
- parent_fq_name = obj_dict['fq_name'][:-1]
- obj_cols['parent_type'] = json.dumps(parent_type)
- parent_uuid = self.fq_name_to_uuid(parent_method_type, parent_fq_name)
- self._create_child(bch, parent_method_type, parent_uuid, 'virtual_router', obj_ids['uuid'])
-
- # Properties
- field = obj_dict.get('virtual_router_type', None)
- if field is not None:
- self._create_prop(bch, obj_ids['uuid'], 'virtual_router_type', field)
-
- field = obj_dict.get('virtual_router_ip_address', None)
- if field is not None:
- self._create_prop(bch, obj_ids['uuid'], 'virtual_router_ip_address', field)
-
- field = obj_dict.get('id_perms', None)
- if field is not None:
- field['created'] = datetime.datetime.utcnow().isoformat()
- field['last_modified'] = field['created']
- self._create_prop(bch, obj_ids['uuid'], 'id_perms', field)
-
- field = obj_dict.get('display_name', None)
- if field is not None:
- self._create_prop(bch, obj_ids['uuid'], 'display_name', field)
-
-
- # References
- refs = obj_dict.get('bgp_router_refs', [])
- for ref in refs:
- ref_uuid = self.fq_name_to_uuid('bgp_router', ref['to'])
- ref_attr = ref.get('attr', None)
- ref_data = {'attr': ref_attr, 'is_weakref': False}
- self._create_ref(bch, 'virtual_router', obj_ids['uuid'], 'bgp_router', ref_uuid, ref_data)
- refs = obj_dict.get('virtual_machine_refs', [])
- for ref in refs:
- ref_uuid = self.fq_name_to_uuid('virtual_machine', ref['to'])
- ref_attr = ref.get('attr', None)
- ref_data = {'attr': ref_attr, 'is_weakref': False}
- self._create_ref(bch, 'virtual_router', obj_ids['uuid'], 'virtual_machine', ref_uuid, ref_data)
-
- bch.insert(obj_ids['uuid'], obj_cols)
- bch.send()
-
- # Update fqname table
- fq_name_str = ':'.join(obj_dict['fq_name'])
- fq_name_cols = {utils.encode_string(fq_name_str) + ':' + obj_ids['uuid']: json.dumps(None)}
- self._obj_fq_name_cf.insert('virtual_router', fq_name_cols)
-
- return (True, '')
- #end _cassandra_virtual_router_create
-
- def _cassandra_virtual_router_read(self, obj_uuids, field_names = None):
- # if field_names = None, all fields will be read/returned
-
- obj_uuid_cf = self._obj_uuid_cf
-
- # optimize for common case of reading non-backref, non-children fields
- # ignoring columns starting from 'b' and 'c' - significant performance
- # impact in scaled setting. e.g. read of project
- if (field_names is None or
- (set(field_names) & (VirtualRouter.backref_fields | VirtualRouter.children_fields))):
- # atleast one backref/children field is needed
- obj_rows = obj_uuid_cf.multiget(obj_uuids,
- column_count = 10000000,
- include_timestamp = True)
- else: # ignore reading backref + children columns
- obj_rows = obj_uuid_cf.multiget(obj_uuids,
- column_start = 'd',
- column_count = 10000000,
- include_timestamp = True)
-
- if (len(obj_uuids) == 1) and not obj_rows:
- raise cfgm_common.exceptions.NoIdError(obj_uuids[0])
-
- results = []
- for row_key in obj_rows:
- # give chance for zk heartbeat/ping
- gevent.sleep(0)
- obj_uuid = row_key
- obj_cols = obj_rows[obj_uuid]
- result = {}
- result['uuid'] = obj_uuid
- result['fq_name'] = json.loads(obj_cols['fq_name'][0])
- for col_name in obj_cols.keys():
- if self._re_match_parent.match(col_name):
- # non config-root child
- (_, _, parent_uuid) = col_name.split(':')
- parent_type = json.loads(obj_cols['parent_type'][0])
- result['parent_type'] = parent_type
- try:
- result['parent_uuid'] = parent_uuid
- result['parent_href'] = self._generate_url(parent_type, parent_uuid)
- except cfgm_common.exceptions.NoIdError:
- err_msg = 'Unknown uuid for parent ' + result['fq_name'][-2]
- return (False, err_msg)
-
- # TODO use compiled RE
- if self._re_match_prop.match(col_name):
- (_, prop_name) = col_name.split(':')
- result[prop_name] = json.loads(obj_cols[col_name][0])
-
- # TODO use compiled RE
- if self._re_match_children.match(col_name):
- (_, child_type, child_uuid) = col_name.split(':')
- if field_names and '%ss' %(child_type) not in field_names:
- continue
-
- child_tstamp = obj_cols[col_name][1]
- try:
- self._read_child(result, obj_uuid, child_type, child_uuid, child_tstamp)
- except cfgm_common.exceptions.NoIdError:
- continue
-
- # TODO use compiled RE
- if self._re_match_ref.match(col_name):
- (_, ref_type, ref_uuid) = col_name.split(':')
- self._read_ref(result, obj_uuid, ref_type, ref_uuid, obj_cols[col_name][0])
-
- if self._re_match_backref.match(col_name):
- (_, back_ref_type, back_ref_uuid) = col_name.split(':')
- if field_names and '%s_back_refs' %(back_ref_type) not in field_names:
- continue
-
- try:
- self._read_back_ref(result, obj_uuid, back_ref_type, back_ref_uuid,
- obj_cols[col_name][0])
- except cfgm_common.exceptions.NoIdError:
- continue
-
- # for all column names
-
-
- results.append(result)
- # end for all rows
-
- return (True, results)
- #end _cassandra_virtual_router_read
-
- def _cassandra_virtual_router_count_children(self, obj_uuid, child_type):
- # if child_type = None, return
- if child_type is None:
- return (False, '')
-
- obj_uuid_cf = self._obj_uuid_cf
- if child_type not in VirtualRouter.children_fields:
- return (False, '%s is not a valid children of VirtualRouter' %(child_type))
-
- col_start = 'children:'+child_type[:-1]+':'
- col_finish = 'children:'+child_type[:-1]+';'
- num_children = obj_uuid_cf.get_count(obj_uuid,
- column_start = col_start,
- column_finish = col_finish,
- max_count = 10000000)
- return (True, num_children)
- #end _cassandra_virtual_router_count_children
-
- def _cassandra_virtual_router_update(self, obj_uuid, new_obj_dict):
- # Grab ref-uuids and properties in new version
- new_ref_infos = {}
-
- if 'bgp_router_refs' in new_obj_dict:
- new_ref_infos['bgp_router'] = {}
- new_refs = new_obj_dict['bgp_router_refs']
- if new_refs:
- for new_ref in new_refs:
- new_ref_uuid = self.fq_name_to_uuid('bgp_router', new_ref['to'])
- new_ref_attr = new_ref.get('attr', None)
- new_ref_data = {'attr': new_ref_attr, 'is_weakref': False}
- new_ref_infos['bgp_router'][new_ref_uuid] = new_ref_data
-
- if 'virtual_machine_refs' in new_obj_dict:
- new_ref_infos['virtual_machine'] = {}
- new_refs = new_obj_dict['virtual_machine_refs']
- if new_refs:
- for new_ref in new_refs:
- new_ref_uuid = self.fq_name_to_uuid('virtual_machine', new_ref['to'])
- new_ref_attr = new_ref.get('attr', None)
- new_ref_data = {'attr': new_ref_attr, 'is_weakref': False}
- new_ref_infos['virtual_machine'][new_ref_uuid] = new_ref_data
-
- new_props = {}
- if 'virtual_router_type' in new_obj_dict:
- new_props['virtual_router_type'] = new_obj_dict['virtual_router_type']
- if 'virtual_router_ip_address' in new_obj_dict:
- new_props['virtual_router_ip_address'] = new_obj_dict['virtual_router_ip_address']
- if 'id_perms' in new_obj_dict:
- new_props['id_perms'] = new_obj_dict['id_perms']
- if 'display_name' in new_obj_dict:
- new_props['display_name'] = new_obj_dict['display_name']
- # Gather column values for obj and updates to backrefs
- # in a batch and write it at the end
- obj_uuid_cf = self._obj_uuid_cf
- obj_cols_iter = obj_uuid_cf.xget(obj_uuid)
- # TODO optimize this (converts tuple to dict)
- obj_cols = {}
- for col_info in obj_cols_iter:
- obj_cols[col_info[0]] = col_info[1]
-
- bch = obj_uuid_cf.batch()
- for col_name in obj_cols.keys():
- # TODO use compiled RE
- if re.match('prop:', col_name):
- (_, prop_name) = col_name.split(':')
- if prop_name == 'id_perms':
- # id-perms always has to be updated for last-mod timestamp
- # get it from request dict(or from db if not in request dict)
- new_id_perms = new_obj_dict.get(prop_name, json.loads(obj_cols[col_name]))
- self.update_last_modified(bch, obj_uuid, new_id_perms)
- elif prop_name in new_obj_dict:
- self._update_prop(bch, obj_uuid, prop_name, new_props)
-
- # TODO use compiled RE
- if re.match('ref:', col_name):
- (_, ref_type, ref_uuid) = col_name.split(':')
- self._update_ref(bch, 'virtual_router', obj_uuid, ref_type, ref_uuid, new_ref_infos)
- # for all column names
-
- # create new refs
- for ref_type in new_ref_infos.keys():
- for ref_uuid in new_ref_infos[ref_type].keys():
- ref_data = new_ref_infos[ref_type][ref_uuid]
- self._create_ref(bch, 'virtual_router', obj_uuid, ref_type, ref_uuid, ref_data)
-
- # create new props
- for prop_name in new_props.keys():
- self._create_prop(bch, obj_uuid, prop_name, new_props[prop_name])
-
- bch.send()
-
- return (True, '')
- #end _cassandra_virtual_router_update
-
- def _cassandra_virtual_router_list(self, parent_uuids=None, back_ref_uuids=None,
- obj_uuids=None, count=False, filters=None):
- children_fq_names_uuids = []
- if filters:
- fnames = filters.get('field_names', [])
- fvalues = filters.get('field_values', [])
- filter_fields = [(fnames[i], fvalues[i]) for i in range(len(fnames))]
- else:
- filter_fields = []
-
- def filter_rows(coll_infos, filter_cols, filter_params):
- filt_infos = {}
- coll_rows = obj_uuid_cf.multiget(coll_infos.keys(),
- columns=filter_cols,
- column_count=10000000)
- for row in coll_rows:
- # give chance for zk heartbeat/ping
- gevent.sleep(0)
- full_match = True
- for fname, fval in filter_params:
- if coll_rows[row]['prop:%s' %(fname)] != fval:
- full_match = False
- break
- if full_match:
- filt_infos[row] = coll_infos[row]
- return filt_infos
- # end filter_rows
-
- def get_fq_name_uuid_list(obj_uuids):
- ret_list = []
- for obj_uuid in obj_uuids:
- try:
- obj_fq_name = self.uuid_to_fq_name(obj_uuid)
- ret_list.append((obj_fq_name, obj_uuid))
- except cfgm_common.exceptions.NoIdError:
- pass
- return ret_list
- # end get_fq_name_uuid_list
-
- if parent_uuids:
- # go from parent to child
- obj_uuid_cf = self._obj_uuid_cf
- col_start = 'children:virtual_router:'
- col_fin = 'children:virtual_router;'
- try:
- obj_rows = obj_uuid_cf.multiget(parent_uuids,
- column_start = col_start,
- column_finish = col_fin,
- column_count = 10000000,
- include_timestamp = True)
- except pycassa.NotFoundException:
- if count:
- return (True, 0)
- else:
- return (True, children_fq_names_uuids)
-
- def filter_rows_parent_anchor(sort=False):
- # flatten to [('children:<type>:<uuid>', (<val>,<ts>), *]
- all_cols = [cols for obj_key in obj_rows.keys() for cols in obj_rows[obj_key].items()]
- all_child_infos = {}
- for col_name, col_val_ts in all_cols:
- # give chance for zk heartbeat/ping
- gevent.sleep(0)
- child_uuid = col_name.split(':')[2]
- if obj_uuids and child_uuid not in obj_uuids:
- continue
- all_child_infos[child_uuid] = {'uuid': child_uuid, 'tstamp': col_val_ts[1]}
-
- filter_cols = ['prop:%s' %(fname) for fname, _ in filter_fields]
- if filter_cols:
- filt_child_infos = filter_rows(all_child_infos, filter_cols, filter_fields)
- else: # no filter specified
- filt_child_infos = all_child_infos
-
- if not sort:
- ret_child_infos = filt_child_infos.values()
- else:
- ret_child_infos = sorted(filt_child_infos.values(), key=itemgetter('tstamp'))
-
- return get_fq_name_uuid_list(r['uuid'] for r in ret_child_infos)
- # end filter_rows_parent_anchor
-
- if count:
- return (True, len(filter_rows_parent_anchor()))
-
- children_fq_names_uuids = filter_rows_parent_anchor(sort=True)
-
- if back_ref_uuids:
- # go from anchor to backrefs
- obj_uuid_cf = self._obj_uuid_cf
- col_start = 'backref:virtual_router:'
- col_fin = 'backref:virtual_router;'
- try:
- obj_rows = obj_uuid_cf.multiget(back_ref_uuids,
- column_start = col_start,
- column_finish = col_fin,
- column_count = 10000000,
- include_timestamp = True)
- except pycassa.NotFoundException:
- if count:
- return (True, 0)
- else:
- return (True, children_fq_names_uuids)
-
- def filter_rows_backref_anchor():
- # flatten to [('<fqnstr>:<uuid>', (<val>,<ts>), *]
- all_cols = [cols for obj_key in obj_rows.keys() for cols in obj_rows[obj_key].items()]
- all_backref_infos = {}
- for col_name, col_val_ts in all_cols:
- # give chance for zk heartbeat/ping
- gevent.sleep(0)
- col_name_arr = col_name.split(':')
- fq_name = col_name_arr[:-1]
- obj_uuid = col_name_arr[-1]
- if obj_uuids and obj_uuid not in obj_uuids:
- continue
- all_backref_infos[obj_uuid] = \
- {'uuid': obj_uuid, 'fq_name': fq_name, 'tstamp': col_val_ts[1]}
-
- filter_cols = ['prop:%s' %(fname) for fname, _ in filter_fields]
- if filter_cols:
- filt_backref_infos = filter_rows(all_backref_infos, filter_cols, filter_fields)
- else: # no filter specified
- filt_backref_infos = all_backref_infos
-
- return [(br_info['fq_name'], br_info['uuid']) for br_info in filt_backref_infos.values()]
- # end filter_rows_backref_anchor
-
- if count:
- return (True, len(filter_rows_backref_anchor()))
-
- children_fq_names_uuids = filter_rows_backref_anchor()
-
- if not parent_uuids and not back_ref_uuids:
- obj_uuid_cf = self._obj_uuid_cf
- if obj_uuids:
- # exact objects specified
- def filter_rows_object_list():
- all_obj_infos = {}
- for obj_uuid in obj_uuids:
- all_obj_infos[obj_uuid] = None
-
- filter_cols = ['prop:%s' %(fname) for fname, _ in filter_fields]
- if filter_cols:
- filt_obj_infos = filter_rows(all_obj_infos, filter_cols, filter_fields)
- else: # no filters specified
- filt_obj_infos = all_obj_infos
-
- return get_fq_name_uuid_list(filt_obj_infos.keys())
- # end filter_rows_object_list
-
- if count:
- return (True, len(filter_rows_object_list()))
- children_fq_names_uuids = filter_rows_object_list()
-
- else: # grab all resources of this type
- obj_fq_name_cf = self._obj_fq_name_cf
- try:
- cols = obj_fq_name_cf.get('virtual_router', column_count = 10000000)
- except pycassa.NotFoundException:
- if count:
- return (True, 0)
- else:
- return (True, children_fq_names_uuids)
-
- def filter_rows_no_anchor():
- all_obj_infos = {}
- for col_name, col_val in cols.items():
- # give chance for zk heartbeat/ping
- gevent.sleep(0)
- col_name_arr = utils.decode_string(col_name).split(':')
- obj_uuid = col_name_arr[-1]
- all_obj_infos[obj_uuid] = (col_name_arr[:-1], obj_uuid)
-
- filter_cols = ['prop:%s' %(fname) for fname, _ in filter_fields]
- if filter_cols:
- filt_obj_infos = filter_rows(all_obj_infos, filter_cols, filter_fields)
- else: # no filters specified
- filt_obj_infos = all_obj_infos
-
- return filt_obj_infos.values()
- # end filter_rows_no_anchor
-
- if count:
- return (True, len(filter_rows_no_anchor()))
-
- children_fq_names_uuids = filter_rows_no_anchor()
-
- return (True, children_fq_names_uuids)
- #end _cassandra_virtual_router_list
-
- def _cassandra_virtual_router_delete(self, obj_uuid):
- obj_uuid_cf = self._obj_uuid_cf
- fq_name = json.loads(obj_uuid_cf.get(obj_uuid, columns = ['fq_name'])['fq_name'])
- bch = obj_uuid_cf.batch()
-
- # unlink from parent
- col_start = 'parent:'
- col_fin = 'parent;'
- col_name_iter = obj_uuid_cf.xget(obj_uuid, column_start = col_start, column_finish = col_fin)
- for (col_name, col_val) in col_name_iter:
- (_, parent_type, parent_uuid) = col_name.split(':')
- self._delete_child(bch, parent_type, parent_uuid, 'virtual_router', obj_uuid)
-
- # remove refs
- col_start = 'ref:'
- col_fin = 'ref;'
- col_name_iter = obj_uuid_cf.xget(obj_uuid, column_start = col_start, column_finish = col_fin)
- for (col_name, col_val) in col_name_iter:
- (_, ref_type, ref_uuid) = col_name.split(':')
- self._delete_ref(bch, 'virtual_router', obj_uuid, ref_type, ref_uuid)
-
- bch.remove(obj_uuid)
- bch.send()
-
- # Update fqname table
- fq_name_str = ':'.join(fq_name)
- fq_name_col = utils.encode_string(fq_name_str) + ':' + obj_uuid
- self._obj_fq_name_cf.remove('virtual_router', columns = [fq_name_col])
-
-
- return (True, '')
- #end _cassandra_virtual_router_delete
-
- def _cassandra_config_root_alloc(self, fq_name):
- return (True, '')
- #end _cassandra_config_root_alloc
-
- def _cassandra_config_root_create(self, obj_ids, obj_dict):
- # Gather column values for obj and updates to backrefs
- # in a batch and write it at the end
- bch = self._obj_uuid_cf.batch()
-
- obj_cols = {}
- obj_cols['fq_name'] = json.dumps(obj_dict['fq_name'])
- obj_cols['type'] = json.dumps('config_root')
-
- # Properties
- field = obj_dict.get('id_perms', None)
- if field is not None:
- field['created'] = datetime.datetime.utcnow().isoformat()
- field['last_modified'] = field['created']
- self._create_prop(bch, obj_ids['uuid'], 'id_perms', field)
-
- field = obj_dict.get('display_name', None)
- if field is not None:
- self._create_prop(bch, obj_ids['uuid'], 'display_name', field)
-
-
- # References
-
- bch.insert(obj_ids['uuid'], obj_cols)
- bch.send()
-
- # Update fqname table
- fq_name_str = ':'.join(obj_dict['fq_name'])
- fq_name_cols = {utils.encode_string(fq_name_str) + ':' + obj_ids['uuid']: json.dumps(None)}
- self._obj_fq_name_cf.insert('config_root', fq_name_cols)
-
- return (True, '')
- #end _cassandra_config_root_create
-
- def _cassandra_config_root_read(self, obj_uuids, field_names = None):
- # if field_names = None, all fields will be read/returned
-
- obj_uuid_cf = self._obj_uuid_cf
-
- # optimize for common case of reading non-backref, non-children fields
- # ignoring columns starting from 'b' and 'c' - significant performance
- # impact in scaled setting. e.g. read of project
- if (field_names is None or
- (set(field_names) & (ConfigRoot.backref_fields | ConfigRoot.children_fields))):
- # atleast one backref/children field is needed
- obj_rows = obj_uuid_cf.multiget(obj_uuids,
- column_count = 10000000,
- include_timestamp = True)
- else: # ignore reading backref + children columns
- obj_rows = obj_uuid_cf.multiget(obj_uuids,
- column_start = 'd',
- column_count = 10000000,
- include_timestamp = True)
-
- if (len(obj_uuids) == 1) and not obj_rows:
- raise cfgm_common.exceptions.NoIdError(obj_uuids[0])
-
- results = []
- for row_key in obj_rows:
- # give chance for zk heartbeat/ping
- gevent.sleep(0)
- obj_uuid = row_key
- obj_cols = obj_rows[obj_uuid]
- result = {}
- result['uuid'] = obj_uuid
- result['fq_name'] = json.loads(obj_cols['fq_name'][0])
- for col_name in obj_cols.keys():
- if self._re_match_parent.match(col_name):
- # non config-root child
- (_, _, parent_uuid) = col_name.split(':')
- parent_type = json.loads(obj_cols['parent_type'][0])
- result['parent_type'] = parent_type
- try:
- result['parent_uuid'] = parent_uuid
- result['parent_href'] = self._generate_url(parent_type, parent_uuid)
- except cfgm_common.exceptions.NoIdError:
- err_msg = 'Unknown uuid for parent ' + result['fq_name'][-2]
- return (False, err_msg)
-
- # TODO use compiled RE
- if self._re_match_prop.match(col_name):
- (_, prop_name) = col_name.split(':')
- result[prop_name] = json.loads(obj_cols[col_name][0])
-
- # TODO use compiled RE
- if self._re_match_children.match(col_name):
- (_, child_type, child_uuid) = col_name.split(':')
- if field_names and '%ss' %(child_type) not in field_names:
- continue
-
- child_tstamp = obj_cols[col_name][1]
- try:
- self._read_child(result, obj_uuid, child_type, child_uuid, child_tstamp)
- except cfgm_common.exceptions.NoIdError:
- continue
-
- # TODO use compiled RE
- if self._re_match_ref.match(col_name):
- (_, ref_type, ref_uuid) = col_name.split(':')
- self._read_ref(result, obj_uuid, ref_type, ref_uuid, obj_cols[col_name][0])
-
- if self._re_match_backref.match(col_name):
- (_, back_ref_type, back_ref_uuid) = col_name.split(':')
- if field_names and '%s_back_refs' %(back_ref_type) not in field_names:
- continue
-
- try:
- self._read_back_ref(result, obj_uuid, back_ref_type, back_ref_uuid,
- obj_cols[col_name][0])
- except cfgm_common.exceptions.NoIdError:
- continue
-
- # for all column names
-
- if 'global_system_configs' in result:
- # sort children; TODO do this based on schema
- sorted_children = sorted(result['global_system_configs'], key = itemgetter('tstamp'))
- # re-write result's children without timestamp
- result['global_system_configs'] = sorted_children
- [child.pop('tstamp') for child in result['global_system_configs']]
-
- if 'domains' in result:
- # sort children; TODO do this based on schema
- sorted_children = sorted(result['domains'], key = itemgetter('tstamp'))
- # re-write result's children without timestamp
- result['domains'] = sorted_children
- [child.pop('tstamp') for child in result['domains']]
-
- results.append(result)
- # end for all rows
-
- return (True, results)
- #end _cassandra_config_root_read
-
- def _cassandra_config_root_count_children(self, obj_uuid, child_type):
- # if child_type = None, return
- if child_type is None:
- return (False, '')
-
- obj_uuid_cf = self._obj_uuid_cf
- if child_type not in ConfigRoot.children_fields:
- return (False, '%s is not a valid children of ConfigRoot' %(child_type))
-
- col_start = 'children:'+child_type[:-1]+':'
- col_finish = 'children:'+child_type[:-1]+';'
- num_children = obj_uuid_cf.get_count(obj_uuid,
- column_start = col_start,
- column_finish = col_finish,
- max_count = 10000000)
- return (True, num_children)
- #end _cassandra_config_root_count_children
-
- def _cassandra_config_root_update(self, obj_uuid, new_obj_dict):
- # Grab ref-uuids and properties in new version
- new_ref_infos = {}
-
- new_props = {}
- if 'id_perms' in new_obj_dict:
- new_props['id_perms'] = new_obj_dict['id_perms']
- if 'display_name' in new_obj_dict:
- new_props['display_name'] = new_obj_dict['display_name']
- # Gather column values for obj and updates to backrefs
- # in a batch and write it at the end
- obj_uuid_cf = self._obj_uuid_cf
- obj_cols_iter = obj_uuid_cf.xget(obj_uuid)
- # TODO optimize this (converts tuple to dict)
- obj_cols = {}
- for col_info in obj_cols_iter:
- obj_cols[col_info[0]] = col_info[1]
-
- bch = obj_uuid_cf.batch()
- for col_name in obj_cols.keys():
- # TODO use compiled RE
- if re.match('prop:', col_name):
- (_, prop_name) = col_name.split(':')
- if prop_name == 'id_perms':
- # id-perms always has to be updated for last-mod timestamp
- # get it from request dict(or from db if not in request dict)
- new_id_perms = new_obj_dict.get(prop_name, json.loads(obj_cols[col_name]))
- self.update_last_modified(bch, obj_uuid, new_id_perms)
- elif prop_name in new_obj_dict:
- self._update_prop(bch, obj_uuid, prop_name, new_props)
-
- # TODO use compiled RE
- if re.match('ref:', col_name):
- (_, ref_type, ref_uuid) = col_name.split(':')
- self._update_ref(bch, 'config_root', obj_uuid, ref_type, ref_uuid, new_ref_infos)
- # for all column names
-
- # create new refs
- for ref_type in new_ref_infos.keys():
- for ref_uuid in new_ref_infos[ref_type].keys():
- ref_data = new_ref_infos[ref_type][ref_uuid]
- self._create_ref(bch, 'config_root', obj_uuid, ref_type, ref_uuid, ref_data)
-
- # create new props
- for prop_name in new_props.keys():
- self._create_prop(bch, obj_uuid, prop_name, new_props[prop_name])
-
- bch.send()
-
- return (True, '')
- #end _cassandra_config_root_update
-
- def _cassandra_config_root_list(self, parent_uuids=None, back_ref_uuids=None,
- obj_uuids=None, count=False, filters=None):
- children_fq_names_uuids = []
- if filters:
- fnames = filters.get('field_names', [])
- fvalues = filters.get('field_values', [])
- filter_fields = [(fnames[i], fvalues[i]) for i in range(len(fnames))]
- else:
- filter_fields = []
-
- def filter_rows(coll_infos, filter_cols, filter_params):
- filt_infos = {}
- coll_rows = obj_uuid_cf.multiget(coll_infos.keys(),
- columns=filter_cols,
- column_count=10000000)
- for row in coll_rows:
- # give chance for zk heartbeat/ping
- gevent.sleep(0)
- full_match = True
- for fname, fval in filter_params:
- if coll_rows[row]['prop:%s' %(fname)] != fval:
- full_match = False
- break
- if full_match:
- filt_infos[row] = coll_infos[row]
- return filt_infos
- # end filter_rows
-
- def get_fq_name_uuid_list(obj_uuids):
- ret_list = []
- for obj_uuid in obj_uuids:
- try:
- obj_fq_name = self.uuid_to_fq_name(obj_uuid)
- ret_list.append((obj_fq_name, obj_uuid))
- except cfgm_common.exceptions.NoIdError:
- pass
- return ret_list
- # end get_fq_name_uuid_list
-
- if parent_uuids:
- # go from parent to child
- obj_uuid_cf = self._obj_uuid_cf
- col_start = 'children:config_root:'
- col_fin = 'children:config_root;'
- try:
- obj_rows = obj_uuid_cf.multiget(parent_uuids,
- column_start = col_start,
- column_finish = col_fin,
- column_count = 10000000,
- include_timestamp = True)
- except pycassa.NotFoundException:
- if count:
- return (True, 0)
- else:
- return (True, children_fq_names_uuids)
-
- def filter_rows_parent_anchor(sort=False):
- # flatten to [('children:<type>:<uuid>', (<val>,<ts>), *]
- all_cols = [cols for obj_key in obj_rows.keys() for cols in obj_rows[obj_key].items()]
- all_child_infos = {}
- for col_name, col_val_ts in all_cols:
- # give chance for zk heartbeat/ping
- gevent.sleep(0)
- child_uuid = col_name.split(':')[2]
- if obj_uuids and child_uuid not in obj_uuids:
- continue
- all_child_infos[child_uuid] = {'uuid': child_uuid, 'tstamp': col_val_ts[1]}
-
- filter_cols = ['prop:%s' %(fname) for fname, _ in filter_fields]
- if filter_cols:
- filt_child_infos = filter_rows(all_child_infos, filter_cols, filter_fields)
- else: # no filter specified
- filt_child_infos = all_child_infos
-
- if not sort:
- ret_child_infos = filt_child_infos.values()
- else:
- ret_child_infos = sorted(filt_child_infos.values(), key=itemgetter('tstamp'))
-
- return get_fq_name_uuid_list(r['uuid'] for r in ret_child_infos)
- # end filter_rows_parent_anchor
-
- if count:
- return (True, len(filter_rows_parent_anchor()))
-
- children_fq_names_uuids = filter_rows_parent_anchor(sort=True)
-
- if back_ref_uuids:
- # go from anchor to backrefs
- obj_uuid_cf = self._obj_uuid_cf
- col_start = 'backref:config_root:'
- col_fin = 'backref:config_root;'
- try:
- obj_rows = obj_uuid_cf.multiget(back_ref_uuids,
- column_start = col_start,
- column_finish = col_fin,
- column_count = 10000000,
- include_timestamp = True)
- except pycassa.NotFoundException:
- if count:
- return (True, 0)
- else:
- return (True, children_fq_names_uuids)
-
- def filter_rows_backref_anchor():
- # flatten to [('<fqnstr>:<uuid>', (<val>,<ts>), *]
- all_cols = [cols for obj_key in obj_rows.keys() for cols in obj_rows[obj_key].items()]
- all_backref_infos = {}
- for col_name, col_val_ts in all_cols:
- # give chance for zk heartbeat/ping
- gevent.sleep(0)
- col_name_arr = col_name.split(':')
- fq_name = col_name_arr[:-1]
- obj_uuid = col_name_arr[-1]
- if obj_uuids and obj_uuid not in obj_uuids:
- continue
- all_backref_infos[obj_uuid] = \
- {'uuid': obj_uuid, 'fq_name': fq_name, 'tstamp': col_val_ts[1]}
-
- filter_cols = ['prop:%s' %(fname) for fname, _ in filter_fields]
- if filter_cols:
- filt_backref_infos = filter_rows(all_backref_infos, filter_cols, filter_fields)
- else: # no filter specified
- filt_backref_infos = all_backref_infos
-
- return [(br_info['fq_name'], br_info['uuid']) for br_info in filt_backref_infos.values()]
- # end filter_rows_backref_anchor
-
- if count:
- return (True, len(filter_rows_backref_anchor()))
-
- children_fq_names_uuids = filter_rows_backref_anchor()
-
- if not parent_uuids and not back_ref_uuids:
- obj_uuid_cf = self._obj_uuid_cf
- if obj_uuids:
- # exact objects specified
- def filter_rows_object_list():
- all_obj_infos = {}
- for obj_uuid in obj_uuids:
- all_obj_infos[obj_uuid] = None
-
- filter_cols = ['prop:%s' %(fname) for fname, _ in filter_fields]
- if filter_cols:
- filt_obj_infos = filter_rows(all_obj_infos, filter_cols, filter_fields)
- else: # no filters specified
- filt_obj_infos = all_obj_infos
-
- return get_fq_name_uuid_list(filt_obj_infos.keys())
- # end filter_rows_object_list
-
- if count:
- return (True, len(filter_rows_object_list()))
- children_fq_names_uuids = filter_rows_object_list()
-
- else: # grab all resources of this type
- obj_fq_name_cf = self._obj_fq_name_cf
- try:
- cols = obj_fq_name_cf.get('config_root', column_count = 10000000)
- except pycassa.NotFoundException:
- if count:
- return (True, 0)
- else:
- return (True, children_fq_names_uuids)
-
- def filter_rows_no_anchor():
- all_obj_infos = {}
- for col_name, col_val in cols.items():
- # give chance for zk heartbeat/ping
- gevent.sleep(0)
- col_name_arr = utils.decode_string(col_name).split(':')
- obj_uuid = col_name_arr[-1]
- all_obj_infos[obj_uuid] = (col_name_arr[:-1], obj_uuid)
-
- filter_cols = ['prop:%s' %(fname) for fname, _ in filter_fields]
- if filter_cols:
- filt_obj_infos = filter_rows(all_obj_infos, filter_cols, filter_fields)
- else: # no filters specified
- filt_obj_infos = all_obj_infos
-
- return filt_obj_infos.values()
- # end filter_rows_no_anchor
-
- if count:
- return (True, len(filter_rows_no_anchor()))
-
- children_fq_names_uuids = filter_rows_no_anchor()
-
- return (True, children_fq_names_uuids)
- #end _cassandra_config_root_list
-
- def _cassandra_config_root_delete(self, obj_uuid):
- obj_uuid_cf = self._obj_uuid_cf
- fq_name = json.loads(obj_uuid_cf.get(obj_uuid, columns = ['fq_name'])['fq_name'])
- bch = obj_uuid_cf.batch()
-
- # unlink from parent
- col_start = 'parent:'
- col_fin = 'parent;'
- col_name_iter = obj_uuid_cf.xget(obj_uuid, column_start = col_start, column_finish = col_fin)
- for (col_name, col_val) in col_name_iter:
- (_, parent_type, parent_uuid) = col_name.split(':')
- self._delete_child(bch, parent_type, parent_uuid, 'config_root', obj_uuid)
-
- # remove refs
- col_start = 'ref:'
- col_fin = 'ref;'
- col_name_iter = obj_uuid_cf.xget(obj_uuid, column_start = col_start, column_finish = col_fin)
- for (col_name, col_val) in col_name_iter:
- (_, ref_type, ref_uuid) = col_name.split(':')
- self._delete_ref(bch, 'config_root', obj_uuid, ref_type, ref_uuid)
-
- bch.remove(obj_uuid)
- bch.send()
-
- # Update fqname table
- fq_name_str = ':'.join(fq_name)
- fq_name_col = utils.encode_string(fq_name_str) + ':' + obj_uuid
- self._obj_fq_name_cf.remove('config_root', columns = [fq_name_col])
-
-
- return (True, '')
- #end _cassandra_config_root_delete
-
- def _cassandra_subnet_alloc(self, fq_name):
- return (True, '')
- #end _cassandra_subnet_alloc
-
- def _cassandra_subnet_create(self, obj_ids, obj_dict):
- # Gather column values for obj and updates to backrefs
- # in a batch and write it at the end
- bch = self._obj_uuid_cf.batch()
-
- obj_cols = {}
- obj_cols['fq_name'] = json.dumps(obj_dict['fq_name'])
- obj_cols['type'] = json.dumps('subnet')
-
- # Properties
- field = obj_dict.get('subnet_ip_prefix', None)
- if field is not None:
- self._create_prop(bch, obj_ids['uuid'], 'subnet_ip_prefix', field)
-
- field = obj_dict.get('id_perms', None)
- if field is not None:
- field['created'] = datetime.datetime.utcnow().isoformat()
- field['last_modified'] = field['created']
- self._create_prop(bch, obj_ids['uuid'], 'id_perms', field)
-
- field = obj_dict.get('display_name', None)
- if field is not None:
- self._create_prop(bch, obj_ids['uuid'], 'display_name', field)
-
-
- # References
- refs = obj_dict.get('virtual_machine_interface_refs', [])
- for ref in refs:
- ref_uuid = self.fq_name_to_uuid('virtual_machine_interface', ref['to'])
- ref_attr = ref.get('attr', None)
- ref_data = {'attr': ref_attr, 'is_weakref': False}
- self._create_ref(bch, 'subnet', obj_ids['uuid'], 'virtual_machine_interface', ref_uuid, ref_data)
-
- bch.insert(obj_ids['uuid'], obj_cols)
- bch.send()
-
- # Update fqname table
- fq_name_str = ':'.join(obj_dict['fq_name'])
- fq_name_cols = {utils.encode_string(fq_name_str) + ':' + obj_ids['uuid']: json.dumps(None)}
- self._obj_fq_name_cf.insert('subnet', fq_name_cols)
-
- return (True, '')
- #end _cassandra_subnet_create
-
- def _cassandra_subnet_read(self, obj_uuids, field_names = None):
- # if field_names = None, all fields will be read/returned
-
- obj_uuid_cf = self._obj_uuid_cf
-
- # optimize for common case of reading non-backref, non-children fields
- # ignoring columns starting from 'b' and 'c' - significant performance
- # impact in scaled setting. e.g. read of project
- if (field_names is None or
- (set(field_names) & (Subnet.backref_fields | Subnet.children_fields))):
- # atleast one backref/children field is needed
- obj_rows = obj_uuid_cf.multiget(obj_uuids,
- column_count = 10000000,
- include_timestamp = True)
- else: # ignore reading backref + children columns
- obj_rows = obj_uuid_cf.multiget(obj_uuids,
- column_start = 'd',
- column_count = 10000000,
- include_timestamp = True)
-
- if (len(obj_uuids) == 1) and not obj_rows:
- raise cfgm_common.exceptions.NoIdError(obj_uuids[0])
-
- results = []
- for row_key in obj_rows:
- # give chance for zk heartbeat/ping
- gevent.sleep(0)
- obj_uuid = row_key
- obj_cols = obj_rows[obj_uuid]
- result = {}
- result['uuid'] = obj_uuid
- result['fq_name'] = json.loads(obj_cols['fq_name'][0])
- for col_name in obj_cols.keys():
- if self._re_match_parent.match(col_name):
- # non config-root child
- (_, _, parent_uuid) = col_name.split(':')
- parent_type = json.loads(obj_cols['parent_type'][0])
- result['parent_type'] = parent_type
- try:
- result['parent_uuid'] = parent_uuid
- result['parent_href'] = self._generate_url(parent_type, parent_uuid)
- except cfgm_common.exceptions.NoIdError:
- err_msg = 'Unknown uuid for parent ' + result['fq_name'][-2]
- return (False, err_msg)
-
- # TODO use compiled RE
- if self._re_match_prop.match(col_name):
- (_, prop_name) = col_name.split(':')
- result[prop_name] = json.loads(obj_cols[col_name][0])
-
- # TODO use compiled RE
- if self._re_match_children.match(col_name):
- (_, child_type, child_uuid) = col_name.split(':')
- if field_names and '%ss' %(child_type) not in field_names:
- continue
-
- child_tstamp = obj_cols[col_name][1]
- try:
- self._read_child(result, obj_uuid, child_type, child_uuid, child_tstamp)
- except cfgm_common.exceptions.NoIdError:
- continue
-
- # TODO use compiled RE
- if self._re_match_ref.match(col_name):
- (_, ref_type, ref_uuid) = col_name.split(':')
- self._read_ref(result, obj_uuid, ref_type, ref_uuid, obj_cols[col_name][0])
-
- if self._re_match_backref.match(col_name):
- (_, back_ref_type, back_ref_uuid) = col_name.split(':')
- if field_names and '%s_back_refs' %(back_ref_type) not in field_names:
- continue
-
- try:
- self._read_back_ref(result, obj_uuid, back_ref_type, back_ref_uuid,
- obj_cols[col_name][0])
- except cfgm_common.exceptions.NoIdError:
- continue
-
- # for all column names
-
-
- results.append(result)
- # end for all rows
-
- return (True, results)
- #end _cassandra_subnet_read
-
- def _cassandra_subnet_count_children(self, obj_uuid, child_type):
- # if child_type = None, return
- if child_type is None:
- return (False, '')
-
- obj_uuid_cf = self._obj_uuid_cf
- if child_type not in Subnet.children_fields:
- return (False, '%s is not a valid children of Subnet' %(child_type))
-
- col_start = 'children:'+child_type[:-1]+':'
- col_finish = 'children:'+child_type[:-1]+';'
- num_children = obj_uuid_cf.get_count(obj_uuid,
- column_start = col_start,
- column_finish = col_finish,
- max_count = 10000000)
- return (True, num_children)
- #end _cassandra_subnet_count_children
-
- def _cassandra_subnet_update(self, obj_uuid, new_obj_dict):
- # Grab ref-uuids and properties in new version
- new_ref_infos = {}
-
- if 'virtual_machine_interface_refs' in new_obj_dict:
- new_ref_infos['virtual_machine_interface'] = {}
- new_refs = new_obj_dict['virtual_machine_interface_refs']
- if new_refs:
- for new_ref in new_refs:
- new_ref_uuid = self.fq_name_to_uuid('virtual_machine_interface', new_ref['to'])
- new_ref_attr = new_ref.get('attr', None)
- new_ref_data = {'attr': new_ref_attr, 'is_weakref': False}
- new_ref_infos['virtual_machine_interface'][new_ref_uuid] = new_ref_data
-
- new_props = {}
- if 'subnet_ip_prefix' in new_obj_dict:
- new_props['subnet_ip_prefix'] = new_obj_dict['subnet_ip_prefix']
- if 'id_perms' in new_obj_dict:
- new_props['id_perms'] = new_obj_dict['id_perms']
- if 'display_name' in new_obj_dict:
- new_props['display_name'] = new_obj_dict['display_name']
- # Gather column values for obj and updates to backrefs
- # in a batch and write it at the end
- obj_uuid_cf = self._obj_uuid_cf
- obj_cols_iter = obj_uuid_cf.xget(obj_uuid)
- # TODO optimize this (converts tuple to dict)
- obj_cols = {}
- for col_info in obj_cols_iter:
- obj_cols[col_info[0]] = col_info[1]
-
- bch = obj_uuid_cf.batch()
- for col_name in obj_cols.keys():
- # TODO use compiled RE
- if re.match('prop:', col_name):
- (_, prop_name) = col_name.split(':')
- if prop_name == 'id_perms':
- # id-perms always has to be updated for last-mod timestamp
- # get it from request dict(or from db if not in request dict)
- new_id_perms = new_obj_dict.get(prop_name, json.loads(obj_cols[col_name]))
- self.update_last_modified(bch, obj_uuid, new_id_perms)
- elif prop_name in new_obj_dict:
- self._update_prop(bch, obj_uuid, prop_name, new_props)
-
- # TODO use compiled RE
- if re.match('ref:', col_name):
- (_, ref_type, ref_uuid) = col_name.split(':')
- self._update_ref(bch, 'subnet', obj_uuid, ref_type, ref_uuid, new_ref_infos)
- # for all column names
-
- # create new refs
- for ref_type in new_ref_infos.keys():
- for ref_uuid in new_ref_infos[ref_type].keys():
- ref_data = new_ref_infos[ref_type][ref_uuid]
- self._create_ref(bch, 'subnet', obj_uuid, ref_type, ref_uuid, ref_data)
-
- # create new props
- for prop_name in new_props.keys():
- self._create_prop(bch, obj_uuid, prop_name, new_props[prop_name])
-
- bch.send()
-
- return (True, '')
- #end _cassandra_subnet_update
-
- def _cassandra_subnet_list(self, parent_uuids=None, back_ref_uuids=None,
- obj_uuids=None, count=False, filters=None):
- children_fq_names_uuids = []
- if filters:
- fnames = filters.get('field_names', [])
- fvalues = filters.get('field_values', [])
- filter_fields = [(fnames[i], fvalues[i]) for i in range(len(fnames))]
- else:
- filter_fields = []
-
- def filter_rows(coll_infos, filter_cols, filter_params):
- filt_infos = {}
- coll_rows = obj_uuid_cf.multiget(coll_infos.keys(),
- columns=filter_cols,
- column_count=10000000)
- for row in coll_rows:
- # give chance for zk heartbeat/ping
- gevent.sleep(0)
- full_match = True
- for fname, fval in filter_params:
- if coll_rows[row]['prop:%s' %(fname)] != fval:
- full_match = False
- break
- if full_match:
- filt_infos[row] = coll_infos[row]
- return filt_infos
- # end filter_rows
-
- def get_fq_name_uuid_list(obj_uuids):
- ret_list = []
- for obj_uuid in obj_uuids:
- try:
- obj_fq_name = self.uuid_to_fq_name(obj_uuid)
- ret_list.append((obj_fq_name, obj_uuid))
- except cfgm_common.exceptions.NoIdError:
- pass
- return ret_list
- # end get_fq_name_uuid_list
-
- if parent_uuids:
- # go from parent to child
- obj_uuid_cf = self._obj_uuid_cf
- col_start = 'children:subnet:'
- col_fin = 'children:subnet;'
- try:
- obj_rows = obj_uuid_cf.multiget(parent_uuids,
- column_start = col_start,
- column_finish = col_fin,
- column_count = 10000000,
- include_timestamp = True)
- except pycassa.NotFoundException:
- if count:
- return (True, 0)
- else:
- return (True, children_fq_names_uuids)
-
- def filter_rows_parent_anchor(sort=False):
- # flatten to [('children:<type>:<uuid>', (<val>,<ts>), *]
- all_cols = [cols for obj_key in obj_rows.keys() for cols in obj_rows[obj_key].items()]
- all_child_infos = {}
- for col_name, col_val_ts in all_cols:
- # give chance for zk heartbeat/ping
- gevent.sleep(0)
- child_uuid = col_name.split(':')[2]
- if obj_uuids and child_uuid not in obj_uuids:
- continue
- all_child_infos[child_uuid] = {'uuid': child_uuid, 'tstamp': col_val_ts[1]}
-
- filter_cols = ['prop:%s' %(fname) for fname, _ in filter_fields]
- if filter_cols:
- filt_child_infos = filter_rows(all_child_infos, filter_cols, filter_fields)
- else: # no filter specified
- filt_child_infos = all_child_infos
-
- if not sort:
- ret_child_infos = filt_child_infos.values()
- else:
- ret_child_infos = sorted(filt_child_infos.values(), key=itemgetter('tstamp'))
-
- return get_fq_name_uuid_list(r['uuid'] for r in ret_child_infos)
- # end filter_rows_parent_anchor
-
- if count:
- return (True, len(filter_rows_parent_anchor()))
-
- children_fq_names_uuids = filter_rows_parent_anchor(sort=True)
-
- if back_ref_uuids:
- # go from anchor to backrefs
- obj_uuid_cf = self._obj_uuid_cf
- col_start = 'backref:subnet:'
- col_fin = 'backref:subnet;'
- try:
- obj_rows = obj_uuid_cf.multiget(back_ref_uuids,
- column_start = col_start,
- column_finish = col_fin,
- column_count = 10000000,
- include_timestamp = True)
- except pycassa.NotFoundException:
- if count:
- return (True, 0)
- else:
- return (True, children_fq_names_uuids)
-
- def filter_rows_backref_anchor():
- # flatten to [('<fqnstr>:<uuid>', (<val>,<ts>), *]
- all_cols = [cols for obj_key in obj_rows.keys() for cols in obj_rows[obj_key].items()]
- all_backref_infos = {}
- for col_name, col_val_ts in all_cols:
- # give chance for zk heartbeat/ping
- gevent.sleep(0)
- col_name_arr = col_name.split(':')
- fq_name = col_name_arr[:-1]
- obj_uuid = col_name_arr[-1]
- if obj_uuids and obj_uuid not in obj_uuids:
- continue
- all_backref_infos[obj_uuid] = \
- {'uuid': obj_uuid, 'fq_name': fq_name, 'tstamp': col_val_ts[1]}
-
- filter_cols = ['prop:%s' %(fname) for fname, _ in filter_fields]
- if filter_cols:
- filt_backref_infos = filter_rows(all_backref_infos, filter_cols, filter_fields)
- else: # no filter specified
- filt_backref_infos = all_backref_infos
-
- return [(br_info['fq_name'], br_info['uuid']) for br_info in filt_backref_infos.values()]
- # end filter_rows_backref_anchor
-
- if count:
- return (True, len(filter_rows_backref_anchor()))
-
- children_fq_names_uuids = filter_rows_backref_anchor()
-
- if not parent_uuids and not back_ref_uuids:
- obj_uuid_cf = self._obj_uuid_cf
- if obj_uuids:
- # exact objects specified
- def filter_rows_object_list():
- all_obj_infos = {}
- for obj_uuid in obj_uuids:
- all_obj_infos[obj_uuid] = None
-
- filter_cols = ['prop:%s' %(fname) for fname, _ in filter_fields]
- if filter_cols:
- filt_obj_infos = filter_rows(all_obj_infos, filter_cols, filter_fields)
- else: # no filters specified
- filt_obj_infos = all_obj_infos
-
- return get_fq_name_uuid_list(filt_obj_infos.keys())
- # end filter_rows_object_list
-
- if count:
- return (True, len(filter_rows_object_list()))
- children_fq_names_uuids = filter_rows_object_list()
-
- else: # grab all resources of this type
- obj_fq_name_cf = self._obj_fq_name_cf
- try:
- cols = obj_fq_name_cf.get('subnet', column_count = 10000000)
- except pycassa.NotFoundException:
- if count:
- return (True, 0)
- else:
- return (True, children_fq_names_uuids)
-
- def filter_rows_no_anchor():
- all_obj_infos = {}
- for col_name, col_val in cols.items():
- # give chance for zk heartbeat/ping
- gevent.sleep(0)
- col_name_arr = utils.decode_string(col_name).split(':')
- obj_uuid = col_name_arr[-1]
- all_obj_infos[obj_uuid] = (col_name_arr[:-1], obj_uuid)
-
- filter_cols = ['prop:%s' %(fname) for fname, _ in filter_fields]
- if filter_cols:
- filt_obj_infos = filter_rows(all_obj_infos, filter_cols, filter_fields)
- else: # no filters specified
- filt_obj_infos = all_obj_infos
-
- return filt_obj_infos.values()
- # end filter_rows_no_anchor
-
- if count:
- return (True, len(filter_rows_no_anchor()))
-
- children_fq_names_uuids = filter_rows_no_anchor()
-
- return (True, children_fq_names_uuids)
- #end _cassandra_subnet_list
-
- def _cassandra_subnet_delete(self, obj_uuid):
- obj_uuid_cf = self._obj_uuid_cf
- fq_name = json.loads(obj_uuid_cf.get(obj_uuid, columns = ['fq_name'])['fq_name'])
- bch = obj_uuid_cf.batch()
-
- # unlink from parent
- col_start = 'parent:'
- col_fin = 'parent;'
- col_name_iter = obj_uuid_cf.xget(obj_uuid, column_start = col_start, column_finish = col_fin)
- for (col_name, col_val) in col_name_iter:
- (_, parent_type, parent_uuid) = col_name.split(':')
- self._delete_child(bch, parent_type, parent_uuid, 'subnet', obj_uuid)
-
- # remove refs
- col_start = 'ref:'
- col_fin = 'ref;'
- col_name_iter = obj_uuid_cf.xget(obj_uuid, column_start = col_start, column_finish = col_fin)
- for (col_name, col_val) in col_name_iter:
- (_, ref_type, ref_uuid) = col_name.split(':')
- self._delete_ref(bch, 'subnet', obj_uuid, ref_type, ref_uuid)
-
- bch.remove(obj_uuid)
- bch.send()
-
- # Update fqname table
- fq_name_str = ':'.join(fq_name)
- fq_name_col = utils.encode_string(fq_name_str) + ':' + obj_uuid
- self._obj_fq_name_cf.remove('subnet', columns = [fq_name_col])
-
-
- return (True, '')
- #end _cassandra_subnet_delete
-
- def _cassandra_global_system_config_alloc(self, fq_name):
- return (True, '')
- #end _cassandra_global_system_config_alloc
-
- def _cassandra_global_system_config_create(self, obj_ids, obj_dict):
- # Gather column values for obj and updates to backrefs
- # in a batch and write it at the end
- bch = self._obj_uuid_cf.batch()
-
- obj_cols = {}
- obj_cols['fq_name'] = json.dumps(obj_dict['fq_name'])
- obj_cols['type'] = json.dumps('global_system_config')
- if 'parent_type' in obj_dict:
- # non config-root child
- parent_type = obj_dict['parent_type']
- parent_method_type = parent_type.replace('-', '_')
- parent_fq_name = obj_dict['fq_name'][:-1]
- obj_cols['parent_type'] = json.dumps(parent_type)
- parent_uuid = self.fq_name_to_uuid(parent_method_type, parent_fq_name)
- self._create_child(bch, parent_method_type, parent_uuid, 'global_system_config', obj_ids['uuid'])
-
- # Properties
- field = obj_dict.get('autonomous_system', None)
- if field is not None:
- self._create_prop(bch, obj_ids['uuid'], 'autonomous_system', field)
-
- field = obj_dict.get('config_version', None)
- if field is not None:
- self._create_prop(bch, obj_ids['uuid'], 'config_version', field)
-
- field = obj_dict.get('plugin_tuning', None)
- if field is not None:
- self._create_prop(bch, obj_ids['uuid'], 'plugin_tuning', field)
-
- field = obj_dict.get('ibgp_auto_mesh', None)
- if field is not None:
- self._create_prop(bch, obj_ids['uuid'], 'ibgp_auto_mesh', field)
-
- field = obj_dict.get('ip_fabric_subnets', None)
- if field is not None:
- self._create_prop(bch, obj_ids['uuid'], 'ip_fabric_subnets', field)
-
- field = obj_dict.get('id_perms', None)
- if field is not None:
- field['created'] = datetime.datetime.utcnow().isoformat()
- field['last_modified'] = field['created']
- self._create_prop(bch, obj_ids['uuid'], 'id_perms', field)
-
- field = obj_dict.get('display_name', None)
- if field is not None:
- self._create_prop(bch, obj_ids['uuid'], 'display_name', field)
-
-
- # References
- refs = obj_dict.get('bgp_router_refs', [])
- for ref in refs:
- ref_uuid = self.fq_name_to_uuid('bgp_router', ref['to'])
- ref_attr = ref.get('attr', None)
- ref_data = {'attr': ref_attr, 'is_weakref': False}
- self._create_ref(bch, 'global_system_config', obj_ids['uuid'], 'bgp_router', ref_uuid, ref_data)
-
- bch.insert(obj_ids['uuid'], obj_cols)
- bch.send()
-
- # Update fqname table
- fq_name_str = ':'.join(obj_dict['fq_name'])
- fq_name_cols = {utils.encode_string(fq_name_str) + ':' + obj_ids['uuid']: json.dumps(None)}
- self._obj_fq_name_cf.insert('global_system_config', fq_name_cols)
-
- return (True, '')
- #end _cassandra_global_system_config_create
-
- def _cassandra_global_system_config_read(self, obj_uuids, field_names = None):
- # if field_names = None, all fields will be read/returned
-
- obj_uuid_cf = self._obj_uuid_cf
-
- # optimize for common case of reading non-backref, non-children fields
- # ignoring columns starting from 'b' and 'c' - significant performance
- # impact in scaled setting. e.g. read of project
- if (field_names is None or
- (set(field_names) & (GlobalSystemConfig.backref_fields | GlobalSystemConfig.children_fields))):
- # atleast one backref/children field is needed
- obj_rows = obj_uuid_cf.multiget(obj_uuids,
- column_count = 10000000,
- include_timestamp = True)
- else: # ignore reading backref + children columns
- obj_rows = obj_uuid_cf.multiget(obj_uuids,
- column_start = 'd',
- column_count = 10000000,
- include_timestamp = True)
-
- if (len(obj_uuids) == 1) and not obj_rows:
- raise cfgm_common.exceptions.NoIdError(obj_uuids[0])
-
- results = []
- for row_key in obj_rows:
- # give chance for zk heartbeat/ping
- gevent.sleep(0)
- obj_uuid = row_key
- obj_cols = obj_rows[obj_uuid]
- result = {}
- result['uuid'] = obj_uuid
- result['fq_name'] = json.loads(obj_cols['fq_name'][0])
- for col_name in obj_cols.keys():
- if self._re_match_parent.match(col_name):
- # non config-root child
- (_, _, parent_uuid) = col_name.split(':')
- parent_type = json.loads(obj_cols['parent_type'][0])
- result['parent_type'] = parent_type
- try:
- result['parent_uuid'] = parent_uuid
- result['parent_href'] = self._generate_url(parent_type, parent_uuid)
- except cfgm_common.exceptions.NoIdError:
- err_msg = 'Unknown uuid for parent ' + result['fq_name'][-2]
- return (False, err_msg)
-
- # TODO use compiled RE
- if self._re_match_prop.match(col_name):
- (_, prop_name) = col_name.split(':')
- result[prop_name] = json.loads(obj_cols[col_name][0])
-
- # TODO use compiled RE
- if self._re_match_children.match(col_name):
- (_, child_type, child_uuid) = col_name.split(':')
- if field_names and '%ss' %(child_type) not in field_names:
- continue
-
- child_tstamp = obj_cols[col_name][1]
- try:
- self._read_child(result, obj_uuid, child_type, child_uuid, child_tstamp)
- except cfgm_common.exceptions.NoIdError:
- continue
-
- # TODO use compiled RE
- if self._re_match_ref.match(col_name):
- (_, ref_type, ref_uuid) = col_name.split(':')
- self._read_ref(result, obj_uuid, ref_type, ref_uuid, obj_cols[col_name][0])
-
- if self._re_match_backref.match(col_name):
- (_, back_ref_type, back_ref_uuid) = col_name.split(':')
- if field_names and '%s_back_refs' %(back_ref_type) not in field_names:
- continue
-
- try:
- self._read_back_ref(result, obj_uuid, back_ref_type, back_ref_uuid,
- obj_cols[col_name][0])
- except cfgm_common.exceptions.NoIdError:
- continue
-
- # for all column names
-
- if 'global_vrouter_configs' in result:
- # sort children; TODO do this based on schema
- sorted_children = sorted(result['global_vrouter_configs'], key = itemgetter('tstamp'))
- # re-write result's children without timestamp
- result['global_vrouter_configs'] = sorted_children
- [child.pop('tstamp') for child in result['global_vrouter_configs']]
-
- if 'physical_routers' in result:
- # sort children; TODO do this based on schema
- sorted_children = sorted(result['physical_routers'], key = itemgetter('tstamp'))
- # re-write result's children without timestamp
- result['physical_routers'] = sorted_children
- [child.pop('tstamp') for child in result['physical_routers']]
-
- if 'virtual_routers' in result:
- # sort children; TODO do this based on schema
- sorted_children = sorted(result['virtual_routers'], key = itemgetter('tstamp'))
- # re-write result's children without timestamp
- result['virtual_routers'] = sorted_children
- [child.pop('tstamp') for child in result['virtual_routers']]
-
- if 'config_nodes' in result:
- # sort children; TODO do this based on schema
- sorted_children = sorted(result['config_nodes'], key = itemgetter('tstamp'))
- # re-write result's children without timestamp
- result['config_nodes'] = sorted_children
- [child.pop('tstamp') for child in result['config_nodes']]
-
- if 'analytics_nodes' in result:
- # sort children; TODO do this based on schema
- sorted_children = sorted(result['analytics_nodes'], key = itemgetter('tstamp'))
- # re-write result's children without timestamp
- result['analytics_nodes'] = sorted_children
- [child.pop('tstamp') for child in result['analytics_nodes']]
-
- if 'database_nodes' in result:
- # sort children; TODO do this based on schema
- sorted_children = sorted(result['database_nodes'], key = itemgetter('tstamp'))
- # re-write result's children without timestamp
- result['database_nodes'] = sorted_children
- [child.pop('tstamp') for child in result['database_nodes']]
-
- if 'service_appliance_sets' in result:
- # sort children; TODO do this based on schema
- sorted_children = sorted(result['service_appliance_sets'], key = itemgetter('tstamp'))
- # re-write result's children without timestamp
- result['service_appliance_sets'] = sorted_children
- [child.pop('tstamp') for child in result['service_appliance_sets']]
-
- results.append(result)
- # end for all rows
-
- return (True, results)
- #end _cassandra_global_system_config_read
-
- def _cassandra_global_system_config_count_children(self, obj_uuid, child_type):
- # if child_type = None, return
- if child_type is None:
- return (False, '')
-
- obj_uuid_cf = self._obj_uuid_cf
- if child_type not in GlobalSystemConfig.children_fields:
- return (False, '%s is not a valid children of GlobalSystemConfig' %(child_type))
-
- col_start = 'children:'+child_type[:-1]+':'
- col_finish = 'children:'+child_type[:-1]+';'
- num_children = obj_uuid_cf.get_count(obj_uuid,
- column_start = col_start,
- column_finish = col_finish,
- max_count = 10000000)
- return (True, num_children)
- #end _cassandra_global_system_config_count_children
-
- def _cassandra_global_system_config_update(self, obj_uuid, new_obj_dict):
- # Grab ref-uuids and properties in new version
- new_ref_infos = {}
-
- if 'bgp_router_refs' in new_obj_dict:
- new_ref_infos['bgp_router'] = {}
- new_refs = new_obj_dict['bgp_router_refs']
- if new_refs:
- for new_ref in new_refs:
- new_ref_uuid = self.fq_name_to_uuid('bgp_router', new_ref['to'])
- new_ref_attr = new_ref.get('attr', None)
- new_ref_data = {'attr': new_ref_attr, 'is_weakref': False}
- new_ref_infos['bgp_router'][new_ref_uuid] = new_ref_data
-
- new_props = {}
- if 'autonomous_system' in new_obj_dict:
- new_props['autonomous_system'] = new_obj_dict['autonomous_system']
- if 'config_version' in new_obj_dict:
- new_props['config_version'] = new_obj_dict['config_version']
- if 'plugin_tuning' in new_obj_dict:
- new_props['plugin_tuning'] = new_obj_dict['plugin_tuning']
- if 'ibgp_auto_mesh' in new_obj_dict:
- new_props['ibgp_auto_mesh'] = new_obj_dict['ibgp_auto_mesh']
- if 'ip_fabric_subnets' in new_obj_dict:
- new_props['ip_fabric_subnets'] = new_obj_dict['ip_fabric_subnets']
- if 'id_perms' in new_obj_dict:
- new_props['id_perms'] = new_obj_dict['id_perms']
- if 'display_name' in new_obj_dict:
- new_props['display_name'] = new_obj_dict['display_name']
- # Gather column values for obj and updates to backrefs
- # in a batch and write it at the end
- obj_uuid_cf = self._obj_uuid_cf
- obj_cols_iter = obj_uuid_cf.xget(obj_uuid)
- # TODO optimize this (converts tuple to dict)
- obj_cols = {}
- for col_info in obj_cols_iter:
- obj_cols[col_info[0]] = col_info[1]
-
- bch = obj_uuid_cf.batch()
- for col_name in obj_cols.keys():
- # TODO use compiled RE
- if re.match('prop:', col_name):
- (_, prop_name) = col_name.split(':')
- if prop_name == 'id_perms':
- # id-perms always has to be updated for last-mod timestamp
- # get it from request dict(or from db if not in request dict)
- new_id_perms = new_obj_dict.get(prop_name, json.loads(obj_cols[col_name]))
- self.update_last_modified(bch, obj_uuid, new_id_perms)
- elif prop_name in new_obj_dict:
- self._update_prop(bch, obj_uuid, prop_name, new_props)
-
- # TODO use compiled RE
- if re.match('ref:', col_name):
- (_, ref_type, ref_uuid) = col_name.split(':')
- self._update_ref(bch, 'global_system_config', obj_uuid, ref_type, ref_uuid, new_ref_infos)
- # for all column names
-
- # create new refs
- for ref_type in new_ref_infos.keys():
- for ref_uuid in new_ref_infos[ref_type].keys():
- ref_data = new_ref_infos[ref_type][ref_uuid]
- self._create_ref(bch, 'global_system_config', obj_uuid, ref_type, ref_uuid, ref_data)
-
- # create new props
- for prop_name in new_props.keys():
- self._create_prop(bch, obj_uuid, prop_name, new_props[prop_name])
-
- bch.send()
-
- return (True, '')
- #end _cassandra_global_system_config_update
-
- def _cassandra_global_system_config_list(self, parent_uuids=None, back_ref_uuids=None,
- obj_uuids=None, count=False, filters=None):
- children_fq_names_uuids = []
- if filters:
- fnames = filters.get('field_names', [])
- fvalues = filters.get('field_values', [])
- filter_fields = [(fnames[i], fvalues[i]) for i in range(len(fnames))]
- else:
- filter_fields = []
-
- def filter_rows(coll_infos, filter_cols, filter_params):
- filt_infos = {}
- coll_rows = obj_uuid_cf.multiget(coll_infos.keys(),
- columns=filter_cols,
- column_count=10000000)
- for row in coll_rows:
- # give chance for zk heartbeat/ping
- gevent.sleep(0)
- full_match = True
- for fname, fval in filter_params:
- if coll_rows[row]['prop:%s' %(fname)] != fval:
- full_match = False
- break
- if full_match:
- filt_infos[row] = coll_infos[row]
- return filt_infos
- # end filter_rows
-
- def get_fq_name_uuid_list(obj_uuids):
- ret_list = []
- for obj_uuid in obj_uuids:
- try:
- obj_fq_name = self.uuid_to_fq_name(obj_uuid)
- ret_list.append((obj_fq_name, obj_uuid))
- except cfgm_common.exceptions.NoIdError:
- pass
- return ret_list
- # end get_fq_name_uuid_list
-
- if parent_uuids:
- # go from parent to child
- obj_uuid_cf = self._obj_uuid_cf
- col_start = 'children:global_system_config:'
- col_fin = 'children:global_system_config;'
- try:
- obj_rows = obj_uuid_cf.multiget(parent_uuids,
- column_start = col_start,
- column_finish = col_fin,
- column_count = 10000000,
- include_timestamp = True)
- except pycassa.NotFoundException:
- if count:
- return (True, 0)
- else:
- return (True, children_fq_names_uuids)
-
- def filter_rows_parent_anchor(sort=False):
- # flatten to [('children:<type>:<uuid>', (<val>,<ts>), *]
- all_cols = [cols for obj_key in obj_rows.keys() for cols in obj_rows[obj_key].items()]
- all_child_infos = {}
- for col_name, col_val_ts in all_cols:
- # give chance for zk heartbeat/ping
- gevent.sleep(0)
- child_uuid = col_name.split(':')[2]
- if obj_uuids and child_uuid not in obj_uuids:
- continue
- all_child_infos[child_uuid] = {'uuid': child_uuid, 'tstamp': col_val_ts[1]}
-
- filter_cols = ['prop:%s' %(fname) for fname, _ in filter_fields]
- if filter_cols:
- filt_child_infos = filter_rows(all_child_infos, filter_cols, filter_fields)
- else: # no filter specified
- filt_child_infos = all_child_infos
-
- if not sort:
- ret_child_infos = filt_child_infos.values()
- else:
- ret_child_infos = sorted(filt_child_infos.values(), key=itemgetter('tstamp'))
-
- return get_fq_name_uuid_list(r['uuid'] for r in ret_child_infos)
- # end filter_rows_parent_anchor
-
- if count:
- return (True, len(filter_rows_parent_anchor()))
-
- children_fq_names_uuids = filter_rows_parent_anchor(sort=True)
-
- if back_ref_uuids:
- # go from anchor to backrefs
- obj_uuid_cf = self._obj_uuid_cf
- col_start = 'backref:global_system_config:'
- col_fin = 'backref:global_system_config;'
- try:
- obj_rows = obj_uuid_cf.multiget(back_ref_uuids,
- column_start = col_start,
- column_finish = col_fin,
- column_count = 10000000,
- include_timestamp = True)
- except pycassa.NotFoundException:
- if count:
- return (True, 0)
- else:
- return (True, children_fq_names_uuids)
-
- def filter_rows_backref_anchor():
- # flatten to [('<fqnstr>:<uuid>', (<val>,<ts>), *]
- all_cols = [cols for obj_key in obj_rows.keys() for cols in obj_rows[obj_key].items()]
- all_backref_infos = {}
- for col_name, col_val_ts in all_cols:
- # give chance for zk heartbeat/ping
- gevent.sleep(0)
- col_name_arr = col_name.split(':')
- fq_name = col_name_arr[:-1]
- obj_uuid = col_name_arr[-1]
- if obj_uuids and obj_uuid not in obj_uuids:
- continue
- all_backref_infos[obj_uuid] = \
- {'uuid': obj_uuid, 'fq_name': fq_name, 'tstamp': col_val_ts[1]}
-
- filter_cols = ['prop:%s' %(fname) for fname, _ in filter_fields]
- if filter_cols:
- filt_backref_infos = filter_rows(all_backref_infos, filter_cols, filter_fields)
- else: # no filter specified
- filt_backref_infos = all_backref_infos
-
- return [(br_info['fq_name'], br_info['uuid']) for br_info in filt_backref_infos.values()]
- # end filter_rows_backref_anchor
-
- if count:
- return (True, len(filter_rows_backref_anchor()))
-
- children_fq_names_uuids = filter_rows_backref_anchor()
-
- if not parent_uuids and not back_ref_uuids:
- obj_uuid_cf = self._obj_uuid_cf
- if obj_uuids:
- # exact objects specified
- def filter_rows_object_list():
- all_obj_infos = {}
- for obj_uuid in obj_uuids:
- all_obj_infos[obj_uuid] = None
-
- filter_cols = ['prop:%s' %(fname) for fname, _ in filter_fields]
- if filter_cols:
- filt_obj_infos = filter_rows(all_obj_infos, filter_cols, filter_fields)
- else: # no filters specified
- filt_obj_infos = all_obj_infos
-
- return get_fq_name_uuid_list(filt_obj_infos.keys())
- # end filter_rows_object_list
-
- if count:
- return (True, len(filter_rows_object_list()))
- children_fq_names_uuids = filter_rows_object_list()
-
- else: # grab all resources of this type
- obj_fq_name_cf = self._obj_fq_name_cf
- try:
- cols = obj_fq_name_cf.get('global_system_config', column_count = 10000000)
- except pycassa.NotFoundException:
- if count:
- return (True, 0)
- else:
- return (True, children_fq_names_uuids)
-
- def filter_rows_no_anchor():
- all_obj_infos = {}
- for col_name, col_val in cols.items():
- # give chance for zk heartbeat/ping
- gevent.sleep(0)
- col_name_arr = utils.decode_string(col_name).split(':')
- obj_uuid = col_name_arr[-1]
- all_obj_infos[obj_uuid] = (col_name_arr[:-1], obj_uuid)
-
- filter_cols = ['prop:%s' %(fname) for fname, _ in filter_fields]
- if filter_cols:
- filt_obj_infos = filter_rows(all_obj_infos, filter_cols, filter_fields)
- else: # no filters specified
- filt_obj_infos = all_obj_infos
-
- return filt_obj_infos.values()
- # end filter_rows_no_anchor
-
- if count:
- return (True, len(filter_rows_no_anchor()))
-
- children_fq_names_uuids = filter_rows_no_anchor()
-
- return (True, children_fq_names_uuids)
- #end _cassandra_global_system_config_list
-
- def _cassandra_global_system_config_delete(self, obj_uuid):
- obj_uuid_cf = self._obj_uuid_cf
- fq_name = json.loads(obj_uuid_cf.get(obj_uuid, columns = ['fq_name'])['fq_name'])
- bch = obj_uuid_cf.batch()
-
- # unlink from parent
- col_start = 'parent:'
- col_fin = 'parent;'
- col_name_iter = obj_uuid_cf.xget(obj_uuid, column_start = col_start, column_finish = col_fin)
- for (col_name, col_val) in col_name_iter:
- (_, parent_type, parent_uuid) = col_name.split(':')
- self._delete_child(bch, parent_type, parent_uuid, 'global_system_config', obj_uuid)
-
- # remove refs
- col_start = 'ref:'
- col_fin = 'ref;'
- col_name_iter = obj_uuid_cf.xget(obj_uuid, column_start = col_start, column_finish = col_fin)
- for (col_name, col_val) in col_name_iter:
- (_, ref_type, ref_uuid) = col_name.split(':')
- self._delete_ref(bch, 'global_system_config', obj_uuid, ref_type, ref_uuid)
-
- bch.remove(obj_uuid)
- bch.send()
-
- # Update fqname table
- fq_name_str = ':'.join(fq_name)
- fq_name_col = utils.encode_string(fq_name_str) + ':' + obj_uuid
- self._obj_fq_name_cf.remove('global_system_config', columns = [fq_name_col])
-
-
- return (True, '')
- #end _cassandra_global_system_config_delete
-
- def _cassandra_service_appliance_alloc(self, fq_name):
- return (True, '')
- #end _cassandra_service_appliance_alloc
-
- def _cassandra_service_appliance_create(self, obj_ids, obj_dict):
- # Gather column values for obj and updates to backrefs
- # in a batch and write it at the end
- bch = self._obj_uuid_cf.batch()
-
- obj_cols = {}
- obj_cols['fq_name'] = json.dumps(obj_dict['fq_name'])
- obj_cols['type'] = json.dumps('service_appliance')
- if 'parent_type' in obj_dict:
- # non config-root child
- parent_type = obj_dict['parent_type']
- parent_method_type = parent_type.replace('-', '_')
- parent_fq_name = obj_dict['fq_name'][:-1]
- obj_cols['parent_type'] = json.dumps(parent_type)
- parent_uuid = self.fq_name_to_uuid(parent_method_type, parent_fq_name)
- self._create_child(bch, parent_method_type, parent_uuid, 'service_appliance', obj_ids['uuid'])
-
- # Properties
- field = obj_dict.get('service_appliance_user_credentials', None)
- if field is not None:
- self._create_prop(bch, obj_ids['uuid'], 'service_appliance_user_credentials', field)
-
- field = obj_dict.get('service_appliance_ip_address', None)
- if field is not None:
- self._create_prop(bch, obj_ids['uuid'], 'service_appliance_ip_address', field)
-
- field = obj_dict.get('service_appliance_properties', None)
- if field is not None:
- self._create_prop(bch, obj_ids['uuid'], 'service_appliance_properties', field)
-
- field = obj_dict.get('id_perms', None)
- if field is not None:
- field['created'] = datetime.datetime.utcnow().isoformat()
- field['last_modified'] = field['created']
- self._create_prop(bch, obj_ids['uuid'], 'id_perms', field)
-
- field = obj_dict.get('display_name', None)
- if field is not None:
- self._create_prop(bch, obj_ids['uuid'], 'display_name', field)
-
-
- # References
-
- bch.insert(obj_ids['uuid'], obj_cols)
- bch.send()
-
- # Update fqname table
- fq_name_str = ':'.join(obj_dict['fq_name'])
- fq_name_cols = {utils.encode_string(fq_name_str) + ':' + obj_ids['uuid']: json.dumps(None)}
- self._obj_fq_name_cf.insert('service_appliance', fq_name_cols)
-
- return (True, '')
- #end _cassandra_service_appliance_create
-
- def _cassandra_service_appliance_read(self, obj_uuids, field_names = None):
- # if field_names = None, all fields will be read/returned
-
- obj_uuid_cf = self._obj_uuid_cf
-
- # optimize for common case of reading non-backref, non-children fields
- # ignoring columns starting from 'b' and 'c' - significant performance
- # impact in scaled setting. e.g. read of project
- if (field_names is None or
- (set(field_names) & (ServiceAppliance.backref_fields | ServiceAppliance.children_fields))):
- # atleast one backref/children field is needed
- obj_rows = obj_uuid_cf.multiget(obj_uuids,
- column_count = 10000000,
- include_timestamp = True)
- else: # ignore reading backref + children columns
- obj_rows = obj_uuid_cf.multiget(obj_uuids,
- column_start = 'd',
- column_count = 10000000,
- include_timestamp = True)
-
- if (len(obj_uuids) == 1) and not obj_rows:
- raise cfgm_common.exceptions.NoIdError(obj_uuids[0])
-
- results = []
- for row_key in obj_rows:
- # give chance for zk heartbeat/ping
- gevent.sleep(0)
- obj_uuid = row_key
- obj_cols = obj_rows[obj_uuid]
- result = {}
- result['uuid'] = obj_uuid
- result['fq_name'] = json.loads(obj_cols['fq_name'][0])
- for col_name in obj_cols.keys():
- if self._re_match_parent.match(col_name):
- # non config-root child
- (_, _, parent_uuid) = col_name.split(':')
- parent_type = json.loads(obj_cols['parent_type'][0])
- result['parent_type'] = parent_type
- try:
- result['parent_uuid'] = parent_uuid
- result['parent_href'] = self._generate_url(parent_type, parent_uuid)
- except cfgm_common.exceptions.NoIdError:
- err_msg = 'Unknown uuid for parent ' + result['fq_name'][-2]
- return (False, err_msg)
-
- # TODO use compiled RE
- if self._re_match_prop.match(col_name):
- (_, prop_name) = col_name.split(':')
- result[prop_name] = json.loads(obj_cols[col_name][0])
-
- # TODO use compiled RE
- if self._re_match_children.match(col_name):
- (_, child_type, child_uuid) = col_name.split(':')
- if field_names and '%ss' %(child_type) not in field_names:
- continue
-
- child_tstamp = obj_cols[col_name][1]
- try:
- self._read_child(result, obj_uuid, child_type, child_uuid, child_tstamp)
- except cfgm_common.exceptions.NoIdError:
- continue
-
- # TODO use compiled RE
- if self._re_match_ref.match(col_name):
- (_, ref_type, ref_uuid) = col_name.split(':')
- self._read_ref(result, obj_uuid, ref_type, ref_uuid, obj_cols[col_name][0])
-
- if self._re_match_backref.match(col_name):
- (_, back_ref_type, back_ref_uuid) = col_name.split(':')
- if field_names and '%s_back_refs' %(back_ref_type) not in field_names:
- continue
-
- try:
- self._read_back_ref(result, obj_uuid, back_ref_type, back_ref_uuid,
- obj_cols[col_name][0])
- except cfgm_common.exceptions.NoIdError:
- continue
-
- # for all column names
-
-
- results.append(result)
- # end for all rows
-
- return (True, results)
- #end _cassandra_service_appliance_read
-
- def _cassandra_service_appliance_count_children(self, obj_uuid, child_type):
- # if child_type = None, return
- if child_type is None:
- return (False, '')
-
- obj_uuid_cf = self._obj_uuid_cf
- if child_type not in ServiceAppliance.children_fields:
- return (False, '%s is not a valid children of ServiceAppliance' %(child_type))
-
- col_start = 'children:'+child_type[:-1]+':'
- col_finish = 'children:'+child_type[:-1]+';'
- num_children = obj_uuid_cf.get_count(obj_uuid,
- column_start = col_start,
- column_finish = col_finish,
- max_count = 10000000)
- return (True, num_children)
- #end _cassandra_service_appliance_count_children
-
- def _cassandra_service_appliance_update(self, obj_uuid, new_obj_dict):
- # Grab ref-uuids and properties in new version
- new_ref_infos = {}
-
- new_props = {}
- if 'service_appliance_user_credentials' in new_obj_dict:
- new_props['service_appliance_user_credentials'] = new_obj_dict['service_appliance_user_credentials']
- if 'service_appliance_ip_address' in new_obj_dict:
- new_props['service_appliance_ip_address'] = new_obj_dict['service_appliance_ip_address']
- if 'service_appliance_properties' in new_obj_dict:
- new_props['service_appliance_properties'] = new_obj_dict['service_appliance_properties']
- if 'id_perms' in new_obj_dict:
- new_props['id_perms'] = new_obj_dict['id_perms']
- if 'display_name' in new_obj_dict:
- new_props['display_name'] = new_obj_dict['display_name']
- # Gather column values for obj and updates to backrefs
- # in a batch and write it at the end
- obj_uuid_cf = self._obj_uuid_cf
- obj_cols_iter = obj_uuid_cf.xget(obj_uuid)
- # TODO optimize this (converts tuple to dict)
- obj_cols = {}
- for col_info in obj_cols_iter:
- obj_cols[col_info[0]] = col_info[1]
-
- bch = obj_uuid_cf.batch()
- for col_name in obj_cols.keys():
- # TODO use compiled RE
- if re.match('prop:', col_name):
- (_, prop_name) = col_name.split(':')
- if prop_name == 'id_perms':
- # id-perms always has to be updated for last-mod timestamp
- # get it from request dict(or from db if not in request dict)
- new_id_perms = new_obj_dict.get(prop_name, json.loads(obj_cols[col_name]))
- self.update_last_modified(bch, obj_uuid, new_id_perms)
- elif prop_name in new_obj_dict:
- self._update_prop(bch, obj_uuid, prop_name, new_props)
-
- # TODO use compiled RE
- if re.match('ref:', col_name):
- (_, ref_type, ref_uuid) = col_name.split(':')
- self._update_ref(bch, 'service_appliance', obj_uuid, ref_type, ref_uuid, new_ref_infos)
- # for all column names
-
- # create new refs
- for ref_type in new_ref_infos.keys():
- for ref_uuid in new_ref_infos[ref_type].keys():
- ref_data = new_ref_infos[ref_type][ref_uuid]
- self._create_ref(bch, 'service_appliance', obj_uuid, ref_type, ref_uuid, ref_data)
-
- # create new props
- for prop_name in new_props.keys():
- self._create_prop(bch, obj_uuid, prop_name, new_props[prop_name])
-
- bch.send()
-
- return (True, '')
- #end _cassandra_service_appliance_update
-
- def _cassandra_service_appliance_list(self, parent_uuids=None, back_ref_uuids=None,
- obj_uuids=None, count=False, filters=None):
- children_fq_names_uuids = []
- if filters:
- fnames = filters.get('field_names', [])
- fvalues = filters.get('field_values', [])
- filter_fields = [(fnames[i], fvalues[i]) for i in range(len(fnames))]
- else:
- filter_fields = []
-
- def filter_rows(coll_infos, filter_cols, filter_params):
- filt_infos = {}
- coll_rows = obj_uuid_cf.multiget(coll_infos.keys(),
- columns=filter_cols,
- column_count=10000000)
- for row in coll_rows:
- # give chance for zk heartbeat/ping
- gevent.sleep(0)
- full_match = True
- for fname, fval in filter_params:
- if coll_rows[row]['prop:%s' %(fname)] != fval:
- full_match = False
- break
- if full_match:
- filt_infos[row] = coll_infos[row]
- return filt_infos
- # end filter_rows
-
- def get_fq_name_uuid_list(obj_uuids):
- ret_list = []
- for obj_uuid in obj_uuids:
- try:
- obj_fq_name = self.uuid_to_fq_name(obj_uuid)
- ret_list.append((obj_fq_name, obj_uuid))
- except cfgm_common.exceptions.NoIdError:
- pass
- return ret_list
- # end get_fq_name_uuid_list
-
- if parent_uuids:
- # go from parent to child
- obj_uuid_cf = self._obj_uuid_cf
- col_start = 'children:service_appliance:'
- col_fin = 'children:service_appliance;'
- try:
- obj_rows = obj_uuid_cf.multiget(parent_uuids,
- column_start = col_start,
- column_finish = col_fin,
- column_count = 10000000,
- include_timestamp = True)
- except pycassa.NotFoundException:
- if count:
- return (True, 0)
- else:
- return (True, children_fq_names_uuids)
-
- def filter_rows_parent_anchor(sort=False):
- # flatten to [('children:<type>:<uuid>', (<val>,<ts>), *]
- all_cols = [cols for obj_key in obj_rows.keys() for cols in obj_rows[obj_key].items()]
- all_child_infos = {}
- for col_name, col_val_ts in all_cols:
- # give chance for zk heartbeat/ping
- gevent.sleep(0)
- child_uuid = col_name.split(':')[2]
- if obj_uuids and child_uuid not in obj_uuids:
- continue
- all_child_infos[child_uuid] = {'uuid': child_uuid, 'tstamp': col_val_ts[1]}
-
- filter_cols = ['prop:%s' %(fname) for fname, _ in filter_fields]
- if filter_cols:
- filt_child_infos = filter_rows(all_child_infos, filter_cols, filter_fields)
- else: # no filter specified
- filt_child_infos = all_child_infos
-
- if not sort:
- ret_child_infos = filt_child_infos.values()
- else:
- ret_child_infos = sorted(filt_child_infos.values(), key=itemgetter('tstamp'))
-
- return get_fq_name_uuid_list(r['uuid'] for r in ret_child_infos)
- # end filter_rows_parent_anchor
-
- if count:
- return (True, len(filter_rows_parent_anchor()))
-
- children_fq_names_uuids = filter_rows_parent_anchor(sort=True)
-
- if back_ref_uuids:
- # go from anchor to backrefs
- obj_uuid_cf = self._obj_uuid_cf
- col_start = 'backref:service_appliance:'
- col_fin = 'backref:service_appliance;'
- try:
- obj_rows = obj_uuid_cf.multiget(back_ref_uuids,
- column_start = col_start,
- column_finish = col_fin,
- column_count = 10000000,
- include_timestamp = True)
- except pycassa.NotFoundException:
- if count:
- return (True, 0)
- else:
- return (True, children_fq_names_uuids)
-
- def filter_rows_backref_anchor():
- # flatten to [('<fqnstr>:<uuid>', (<val>,<ts>), *]
- all_cols = [cols for obj_key in obj_rows.keys() for cols in obj_rows[obj_key].items()]
- all_backref_infos = {}
- for col_name, col_val_ts in all_cols:
- # give chance for zk heartbeat/ping
- gevent.sleep(0)
- col_name_arr = col_name.split(':')
- fq_name = col_name_arr[:-1]
- obj_uuid = col_name_arr[-1]
- if obj_uuids and obj_uuid not in obj_uuids:
- continue
- all_backref_infos[obj_uuid] = \
- {'uuid': obj_uuid, 'fq_name': fq_name, 'tstamp': col_val_ts[1]}
-
- filter_cols = ['prop:%s' %(fname) for fname, _ in filter_fields]
- if filter_cols:
- filt_backref_infos = filter_rows(all_backref_infos, filter_cols, filter_fields)
- else: # no filter specified
- filt_backref_infos = all_backref_infos
-
- return [(br_info['fq_name'], br_info['uuid']) for br_info in filt_backref_infos.values()]
- # end filter_rows_backref_anchor
-
- if count:
- return (True, len(filter_rows_backref_anchor()))
-
- children_fq_names_uuids = filter_rows_backref_anchor()
-
- if not parent_uuids and not back_ref_uuids:
- obj_uuid_cf = self._obj_uuid_cf
- if obj_uuids:
- # exact objects specified
- def filter_rows_object_list():
- all_obj_infos = {}
- for obj_uuid in obj_uuids:
- all_obj_infos[obj_uuid] = None
-
- filter_cols = ['prop:%s' %(fname) for fname, _ in filter_fields]
- if filter_cols:
- filt_obj_infos = filter_rows(all_obj_infos, filter_cols, filter_fields)
- else: # no filters specified
- filt_obj_infos = all_obj_infos
-
- return get_fq_name_uuid_list(filt_obj_infos.keys())
- # end filter_rows_object_list
-
- if count:
- return (True, len(filter_rows_object_list()))
- children_fq_names_uuids = filter_rows_object_list()
-
- else: # grab all resources of this type
- obj_fq_name_cf = self._obj_fq_name_cf
- try:
- cols = obj_fq_name_cf.get('service_appliance', column_count = 10000000)
- except pycassa.NotFoundException:
- if count:
- return (True, 0)
- else:
- return (True, children_fq_names_uuids)
-
- def filter_rows_no_anchor():
- all_obj_infos = {}
- for col_name, col_val in cols.items():
- # give chance for zk heartbeat/ping
- gevent.sleep(0)
- col_name_arr = utils.decode_string(col_name).split(':')
- obj_uuid = col_name_arr[-1]
- all_obj_infos[obj_uuid] = (col_name_arr[:-1], obj_uuid)
-
- filter_cols = ['prop:%s' %(fname) for fname, _ in filter_fields]
- if filter_cols:
- filt_obj_infos = filter_rows(all_obj_infos, filter_cols, filter_fields)
- else: # no filters specified
- filt_obj_infos = all_obj_infos
-
- return filt_obj_infos.values()
- # end filter_rows_no_anchor
-
- if count:
- return (True, len(filter_rows_no_anchor()))
-
- children_fq_names_uuids = filter_rows_no_anchor()
-
- return (True, children_fq_names_uuids)
- #end _cassandra_service_appliance_list
-
- def _cassandra_service_appliance_delete(self, obj_uuid):
- obj_uuid_cf = self._obj_uuid_cf
- fq_name = json.loads(obj_uuid_cf.get(obj_uuid, columns = ['fq_name'])['fq_name'])
- bch = obj_uuid_cf.batch()
-
- # unlink from parent
- col_start = 'parent:'
- col_fin = 'parent;'
- col_name_iter = obj_uuid_cf.xget(obj_uuid, column_start = col_start, column_finish = col_fin)
- for (col_name, col_val) in col_name_iter:
- (_, parent_type, parent_uuid) = col_name.split(':')
- self._delete_child(bch, parent_type, parent_uuid, 'service_appliance', obj_uuid)
-
- # remove refs
- col_start = 'ref:'
- col_fin = 'ref;'
- col_name_iter = obj_uuid_cf.xget(obj_uuid, column_start = col_start, column_finish = col_fin)
- for (col_name, col_val) in col_name_iter:
- (_, ref_type, ref_uuid) = col_name.split(':')
- self._delete_ref(bch, 'service_appliance', obj_uuid, ref_type, ref_uuid)
-
- bch.remove(obj_uuid)
- bch.send()
-
- # Update fqname table
- fq_name_str = ':'.join(fq_name)
- fq_name_col = utils.encode_string(fq_name_str) + ':' + obj_uuid
- self._obj_fq_name_cf.remove('service_appliance', columns = [fq_name_col])
-
-
- return (True, '')
- #end _cassandra_service_appliance_delete
-
- def _cassandra_service_instance_alloc(self, fq_name):
- return (True, '')
- #end _cassandra_service_instance_alloc
-
- def _cassandra_service_instance_create(self, obj_ids, obj_dict):
- # Gather column values for obj and updates to backrefs
- # in a batch and write it at the end
- bch = self._obj_uuid_cf.batch()
-
- obj_cols = {}
- obj_cols['fq_name'] = json.dumps(obj_dict['fq_name'])
- obj_cols['type'] = json.dumps('service_instance')
- if 'parent_type' in obj_dict:
- # non config-root child
- parent_type = obj_dict['parent_type']
- parent_method_type = parent_type.replace('-', '_')
- parent_fq_name = obj_dict['fq_name'][:-1]
- obj_cols['parent_type'] = json.dumps(parent_type)
- parent_uuid = self.fq_name_to_uuid(parent_method_type, parent_fq_name)
- self._create_child(bch, parent_method_type, parent_uuid, 'service_instance', obj_ids['uuid'])
-
- # Properties
- field = obj_dict.get('service_instance_properties', None)
- if field is not None:
- self._create_prop(bch, obj_ids['uuid'], 'service_instance_properties', field)
-
- field = obj_dict.get('id_perms', None)
- if field is not None:
- field['created'] = datetime.datetime.utcnow().isoformat()
- field['last_modified'] = field['created']
- self._create_prop(bch, obj_ids['uuid'], 'id_perms', field)
-
- field = obj_dict.get('display_name', None)
- if field is not None:
- self._create_prop(bch, obj_ids['uuid'], 'display_name', field)
-
-
- # References
- refs = obj_dict.get('service_template_refs', [])
- for ref in refs:
- ref_uuid = self.fq_name_to_uuid('service_template', ref['to'])
- ref_attr = ref.get('attr', None)
- ref_data = {'attr': ref_attr, 'is_weakref': False}
- self._create_ref(bch, 'service_instance', obj_ids['uuid'], 'service_template', ref_uuid, ref_data)
-
- bch.insert(obj_ids['uuid'], obj_cols)
- bch.send()
-
- # Update fqname table
- fq_name_str = ':'.join(obj_dict['fq_name'])
- fq_name_cols = {utils.encode_string(fq_name_str) + ':' + obj_ids['uuid']: json.dumps(None)}
- self._obj_fq_name_cf.insert('service_instance', fq_name_cols)
-
- return (True, '')
- #end _cassandra_service_instance_create
-
- def _cassandra_service_instance_read(self, obj_uuids, field_names = None):
- # if field_names = None, all fields will be read/returned
-
- obj_uuid_cf = self._obj_uuid_cf
-
- # optimize for common case of reading non-backref, non-children fields
- # ignoring columns starting from 'b' and 'c' - significant performance
- # impact in scaled setting. e.g. read of project
- if (field_names is None or
- (set(field_names) & (ServiceInstance.backref_fields | ServiceInstance.children_fields))):
- # atleast one backref/children field is needed
- obj_rows = obj_uuid_cf.multiget(obj_uuids,
- column_count = 10000000,
- include_timestamp = True)
- else: # ignore reading backref + children columns
- obj_rows = obj_uuid_cf.multiget(obj_uuids,
- column_start = 'd',
- column_count = 10000000,
- include_timestamp = True)
-
- if (len(obj_uuids) == 1) and not obj_rows:
- raise cfgm_common.exceptions.NoIdError(obj_uuids[0])
-
- results = []
- for row_key in obj_rows:
- # give chance for zk heartbeat/ping
- gevent.sleep(0)
- obj_uuid = row_key
- obj_cols = obj_rows[obj_uuid]
- result = {}
- result['uuid'] = obj_uuid
- result['fq_name'] = json.loads(obj_cols['fq_name'][0])
- for col_name in obj_cols.keys():
- if self._re_match_parent.match(col_name):
- # non config-root child
- (_, _, parent_uuid) = col_name.split(':')
- parent_type = json.loads(obj_cols['parent_type'][0])
- result['parent_type'] = parent_type
- try:
- result['parent_uuid'] = parent_uuid
- result['parent_href'] = self._generate_url(parent_type, parent_uuid)
- except cfgm_common.exceptions.NoIdError:
- err_msg = 'Unknown uuid for parent ' + result['fq_name'][-2]
- return (False, err_msg)
-
- # TODO use compiled RE
- if self._re_match_prop.match(col_name):
- (_, prop_name) = col_name.split(':')
- result[prop_name] = json.loads(obj_cols[col_name][0])
-
- # TODO use compiled RE
- if self._re_match_children.match(col_name):
- (_, child_type, child_uuid) = col_name.split(':')
- if field_names and '%ss' %(child_type) not in field_names:
- continue
-
- child_tstamp = obj_cols[col_name][1]
- try:
- self._read_child(result, obj_uuid, child_type, child_uuid, child_tstamp)
- except cfgm_common.exceptions.NoIdError:
- continue
-
- # TODO use compiled RE
- if self._re_match_ref.match(col_name):
- (_, ref_type, ref_uuid) = col_name.split(':')
- self._read_ref(result, obj_uuid, ref_type, ref_uuid, obj_cols[col_name][0])
-
- if self._re_match_backref.match(col_name):
- (_, back_ref_type, back_ref_uuid) = col_name.split(':')
- if field_names and '%s_back_refs' %(back_ref_type) not in field_names:
- continue
-
- try:
- self._read_back_ref(result, obj_uuid, back_ref_type, back_ref_uuid,
- obj_cols[col_name][0])
- except cfgm_common.exceptions.NoIdError:
- continue
-
- # for all column names
-
-
- results.append(result)
- # end for all rows
-
- return (True, results)
- #end _cassandra_service_instance_read
-
- def _cassandra_service_instance_count_children(self, obj_uuid, child_type):
- # if child_type = None, return
- if child_type is None:
- return (False, '')
-
- obj_uuid_cf = self._obj_uuid_cf
- if child_type not in ServiceInstance.children_fields:
- return (False, '%s is not a valid children of ServiceInstance' %(child_type))
-
- col_start = 'children:'+child_type[:-1]+':'
- col_finish = 'children:'+child_type[:-1]+';'
- num_children = obj_uuid_cf.get_count(obj_uuid,
- column_start = col_start,
- column_finish = col_finish,
- max_count = 10000000)
- return (True, num_children)
- #end _cassandra_service_instance_count_children
-
- def _cassandra_service_instance_update(self, obj_uuid, new_obj_dict):
- # Grab ref-uuids and properties in new version
- new_ref_infos = {}
-
- if 'service_template_refs' in new_obj_dict:
- new_ref_infos['service_template'] = {}
- new_refs = new_obj_dict['service_template_refs']
- if new_refs:
- for new_ref in new_refs:
- new_ref_uuid = self.fq_name_to_uuid('service_template', new_ref['to'])
- new_ref_attr = new_ref.get('attr', None)
- new_ref_data = {'attr': new_ref_attr, 'is_weakref': False}
- new_ref_infos['service_template'][new_ref_uuid] = new_ref_data
-
- new_props = {}
- if 'service_instance_properties' in new_obj_dict:
- new_props['service_instance_properties'] = new_obj_dict['service_instance_properties']
- if 'id_perms' in new_obj_dict:
- new_props['id_perms'] = new_obj_dict['id_perms']
- if 'display_name' in new_obj_dict:
- new_props['display_name'] = new_obj_dict['display_name']
- # Gather column values for obj and updates to backrefs
- # in a batch and write it at the end
- obj_uuid_cf = self._obj_uuid_cf
- obj_cols_iter = obj_uuid_cf.xget(obj_uuid)
- # TODO optimize this (converts tuple to dict)
- obj_cols = {}
- for col_info in obj_cols_iter:
- obj_cols[col_info[0]] = col_info[1]
-
- bch = obj_uuid_cf.batch()
- for col_name in obj_cols.keys():
- # TODO use compiled RE
- if re.match('prop:', col_name):
- (_, prop_name) = col_name.split(':')
- if prop_name == 'id_perms':
- # id-perms always has to be updated for last-mod timestamp
- # get it from request dict(or from db if not in request dict)
- new_id_perms = new_obj_dict.get(prop_name, json.loads(obj_cols[col_name]))
- self.update_last_modified(bch, obj_uuid, new_id_perms)
- elif prop_name in new_obj_dict:
- self._update_prop(bch, obj_uuid, prop_name, new_props)
-
- # TODO use compiled RE
- if re.match('ref:', col_name):
- (_, ref_type, ref_uuid) = col_name.split(':')
- self._update_ref(bch, 'service_instance', obj_uuid, ref_type, ref_uuid, new_ref_infos)
- # for all column names
-
- # create new refs
- for ref_type in new_ref_infos.keys():
- for ref_uuid in new_ref_infos[ref_type].keys():
- ref_data = new_ref_infos[ref_type][ref_uuid]
- self._create_ref(bch, 'service_instance', obj_uuid, ref_type, ref_uuid, ref_data)
-
- # create new props
- for prop_name in new_props.keys():
- self._create_prop(bch, obj_uuid, prop_name, new_props[prop_name])
-
- bch.send()
-
- return (True, '')
- #end _cassandra_service_instance_update
-
- def _cassandra_service_instance_list(self, parent_uuids=None, back_ref_uuids=None,
- obj_uuids=None, count=False, filters=None):
- children_fq_names_uuids = []
- if filters:
- fnames = filters.get('field_names', [])
- fvalues = filters.get('field_values', [])
- filter_fields = [(fnames[i], fvalues[i]) for i in range(len(fnames))]
- else:
- filter_fields = []
-
- def filter_rows(coll_infos, filter_cols, filter_params):
- filt_infos = {}
- coll_rows = obj_uuid_cf.multiget(coll_infos.keys(),
- columns=filter_cols,
- column_count=10000000)
- for row in coll_rows:
- # give chance for zk heartbeat/ping
- gevent.sleep(0)
- full_match = True
- for fname, fval in filter_params:
- if coll_rows[row]['prop:%s' %(fname)] != fval:
- full_match = False
- break
- if full_match:
- filt_infos[row] = coll_infos[row]
- return filt_infos
- # end filter_rows
-
- def get_fq_name_uuid_list(obj_uuids):
- ret_list = []
- for obj_uuid in obj_uuids:
- try:
- obj_fq_name = self.uuid_to_fq_name(obj_uuid)
- ret_list.append((obj_fq_name, obj_uuid))
- except cfgm_common.exceptions.NoIdError:
- pass
- return ret_list
- # end get_fq_name_uuid_list
-
- if parent_uuids:
- # go from parent to child
- obj_uuid_cf = self._obj_uuid_cf
- col_start = 'children:service_instance:'
- col_fin = 'children:service_instance;'
- try:
- obj_rows = obj_uuid_cf.multiget(parent_uuids,
- column_start = col_start,
- column_finish = col_fin,
- column_count = 10000000,
- include_timestamp = True)
- except pycassa.NotFoundException:
- if count:
- return (True, 0)
- else:
- return (True, children_fq_names_uuids)
-
- def filter_rows_parent_anchor(sort=False):
- # flatten to [('children:<type>:<uuid>', (<val>,<ts>), *]
- all_cols = [cols for obj_key in obj_rows.keys() for cols in obj_rows[obj_key].items()]
- all_child_infos = {}
- for col_name, col_val_ts in all_cols:
- # give chance for zk heartbeat/ping
- gevent.sleep(0)
- child_uuid = col_name.split(':')[2]
- if obj_uuids and child_uuid not in obj_uuids:
- continue
- all_child_infos[child_uuid] = {'uuid': child_uuid, 'tstamp': col_val_ts[1]}
-
- filter_cols = ['prop:%s' %(fname) for fname, _ in filter_fields]
- if filter_cols:
- filt_child_infos = filter_rows(all_child_infos, filter_cols, filter_fields)
- else: # no filter specified
- filt_child_infos = all_child_infos
-
- if not sort:
- ret_child_infos = filt_child_infos.values()
- else:
- ret_child_infos = sorted(filt_child_infos.values(), key=itemgetter('tstamp'))
-
- return get_fq_name_uuid_list(r['uuid'] for r in ret_child_infos)
- # end filter_rows_parent_anchor
-
- if count:
- return (True, len(filter_rows_parent_anchor()))
-
- children_fq_names_uuids = filter_rows_parent_anchor(sort=True)
-
- if back_ref_uuids:
- # go from anchor to backrefs
- obj_uuid_cf = self._obj_uuid_cf
- col_start = 'backref:service_instance:'
- col_fin = 'backref:service_instance;'
- try:
- obj_rows = obj_uuid_cf.multiget(back_ref_uuids,
- column_start = col_start,
- column_finish = col_fin,
- column_count = 10000000,
- include_timestamp = True)
- except pycassa.NotFoundException:
- if count:
- return (True, 0)
- else:
- return (True, children_fq_names_uuids)
-
- def filter_rows_backref_anchor():
- # flatten to [('<fqnstr>:<uuid>', (<val>,<ts>), *]
- all_cols = [cols for obj_key in obj_rows.keys() for cols in obj_rows[obj_key].items()]
- all_backref_infos = {}
- for col_name, col_val_ts in all_cols:
- # give chance for zk heartbeat/ping
- gevent.sleep(0)
- col_name_arr = col_name.split(':')
- fq_name = col_name_arr[:-1]
- obj_uuid = col_name_arr[-1]
- if obj_uuids and obj_uuid not in obj_uuids:
- continue
- all_backref_infos[obj_uuid] = \
- {'uuid': obj_uuid, 'fq_name': fq_name, 'tstamp': col_val_ts[1]}
-
- filter_cols = ['prop:%s' %(fname) for fname, _ in filter_fields]
- if filter_cols:
- filt_backref_infos = filter_rows(all_backref_infos, filter_cols, filter_fields)
- else: # no filter specified
- filt_backref_infos = all_backref_infos
-
- return [(br_info['fq_name'], br_info['uuid']) for br_info in filt_backref_infos.values()]
- # end filter_rows_backref_anchor
-
- if count:
- return (True, len(filter_rows_backref_anchor()))
-
- children_fq_names_uuids = filter_rows_backref_anchor()
-
- if not parent_uuids and not back_ref_uuids:
- obj_uuid_cf = self._obj_uuid_cf
- if obj_uuids:
- # exact objects specified
- def filter_rows_object_list():
- all_obj_infos = {}
- for obj_uuid in obj_uuids:
- all_obj_infos[obj_uuid] = None
-
- filter_cols = ['prop:%s' %(fname) for fname, _ in filter_fields]
- if filter_cols:
- filt_obj_infos = filter_rows(all_obj_infos, filter_cols, filter_fields)
- else: # no filters specified
- filt_obj_infos = all_obj_infos
-
- return get_fq_name_uuid_list(filt_obj_infos.keys())
- # end filter_rows_object_list
-
- if count:
- return (True, len(filter_rows_object_list()))
- children_fq_names_uuids = filter_rows_object_list()
-
- else: # grab all resources of this type
- obj_fq_name_cf = self._obj_fq_name_cf
- try:
- cols = obj_fq_name_cf.get('service_instance', column_count = 10000000)
- except pycassa.NotFoundException:
- if count:
- return (True, 0)
- else:
- return (True, children_fq_names_uuids)
-
- def filter_rows_no_anchor():
- all_obj_infos = {}
- for col_name, col_val in cols.items():
- # give chance for zk heartbeat/ping
- gevent.sleep(0)
- col_name_arr = utils.decode_string(col_name).split(':')
- obj_uuid = col_name_arr[-1]
- all_obj_infos[obj_uuid] = (col_name_arr[:-1], obj_uuid)
-
- filter_cols = ['prop:%s' %(fname) for fname, _ in filter_fields]
- if filter_cols:
- filt_obj_infos = filter_rows(all_obj_infos, filter_cols, filter_fields)
- else: # no filters specified
- filt_obj_infos = all_obj_infos
-
- return filt_obj_infos.values()
- # end filter_rows_no_anchor
-
- if count:
- return (True, len(filter_rows_no_anchor()))
-
- children_fq_names_uuids = filter_rows_no_anchor()
-
- return (True, children_fq_names_uuids)
- #end _cassandra_service_instance_list
-
- def _cassandra_service_instance_delete(self, obj_uuid):
- obj_uuid_cf = self._obj_uuid_cf
- fq_name = json.loads(obj_uuid_cf.get(obj_uuid, columns = ['fq_name'])['fq_name'])
- bch = obj_uuid_cf.batch()
-
- # unlink from parent
- col_start = 'parent:'
- col_fin = 'parent;'
- col_name_iter = obj_uuid_cf.xget(obj_uuid, column_start = col_start, column_finish = col_fin)
- for (col_name, col_val) in col_name_iter:
- (_, parent_type, parent_uuid) = col_name.split(':')
- self._delete_child(bch, parent_type, parent_uuid, 'service_instance', obj_uuid)
-
- # remove refs
- col_start = 'ref:'
- col_fin = 'ref;'
- col_name_iter = obj_uuid_cf.xget(obj_uuid, column_start = col_start, column_finish = col_fin)
- for (col_name, col_val) in col_name_iter:
- (_, ref_type, ref_uuid) = col_name.split(':')
- self._delete_ref(bch, 'service_instance', obj_uuid, ref_type, ref_uuid)
-
- bch.remove(obj_uuid)
- bch.send()
-
- # Update fqname table
- fq_name_str = ':'.join(fq_name)
- fq_name_col = utils.encode_string(fq_name_str) + ':' + obj_uuid
- self._obj_fq_name_cf.remove('service_instance', columns = [fq_name_col])
-
-
- return (True, '')
- #end _cassandra_service_instance_delete
-
- def _cassandra_namespace_alloc(self, fq_name):
- return (True, '')
- #end _cassandra_namespace_alloc
-
- def _cassandra_namespace_create(self, obj_ids, obj_dict):
- # Gather column values for obj and updates to backrefs
- # in a batch and write it at the end
- bch = self._obj_uuid_cf.batch()
-
- obj_cols = {}
- obj_cols['fq_name'] = json.dumps(obj_dict['fq_name'])
- obj_cols['type'] = json.dumps('namespace')
- if 'parent_type' in obj_dict:
- # non config-root child
- parent_type = obj_dict['parent_type']
- parent_method_type = parent_type.replace('-', '_')
- parent_fq_name = obj_dict['fq_name'][:-1]
- obj_cols['parent_type'] = json.dumps(parent_type)
- parent_uuid = self.fq_name_to_uuid(parent_method_type, parent_fq_name)
- self._create_child(bch, parent_method_type, parent_uuid, 'namespace', obj_ids['uuid'])
-
- # Properties
- field = obj_dict.get('namespace_cidr', None)
- if field is not None:
- self._create_prop(bch, obj_ids['uuid'], 'namespace_cidr', field)
-
- field = obj_dict.get('id_perms', None)
- if field is not None:
- field['created'] = datetime.datetime.utcnow().isoformat()
- field['last_modified'] = field['created']
- self._create_prop(bch, obj_ids['uuid'], 'id_perms', field)
-
- field = obj_dict.get('display_name', None)
- if field is not None:
- self._create_prop(bch, obj_ids['uuid'], 'display_name', field)
-
-
- # References
-
- bch.insert(obj_ids['uuid'], obj_cols)
- bch.send()
-
- # Update fqname table
- fq_name_str = ':'.join(obj_dict['fq_name'])
- fq_name_cols = {utils.encode_string(fq_name_str) + ':' + obj_ids['uuid']: json.dumps(None)}
- self._obj_fq_name_cf.insert('namespace', fq_name_cols)
-
- return (True, '')
- #end _cassandra_namespace_create
-
- def _cassandra_namespace_read(self, obj_uuids, field_names = None):
- # if field_names = None, all fields will be read/returned
-
- obj_uuid_cf = self._obj_uuid_cf
-
- # optimize for common case of reading non-backref, non-children fields
- # ignoring columns starting from 'b' and 'c' - significant performance
- # impact in scaled setting. e.g. read of project
- if (field_names is None or
- (set(field_names) & (Namespace.backref_fields | Namespace.children_fields))):
- # atleast one backref/children field is needed
- obj_rows = obj_uuid_cf.multiget(obj_uuids,
- column_count = 10000000,
- include_timestamp = True)
- else: # ignore reading backref + children columns
- obj_rows = obj_uuid_cf.multiget(obj_uuids,
- column_start = 'd',
- column_count = 10000000,
- include_timestamp = True)
-
- if (len(obj_uuids) == 1) and not obj_rows:
- raise cfgm_common.exceptions.NoIdError(obj_uuids[0])
-
- results = []
- for row_key in obj_rows:
- # give chance for zk heartbeat/ping
- gevent.sleep(0)
- obj_uuid = row_key
- obj_cols = obj_rows[obj_uuid]
- result = {}
- result['uuid'] = obj_uuid
- result['fq_name'] = json.loads(obj_cols['fq_name'][0])
- for col_name in obj_cols.keys():
- if self._re_match_parent.match(col_name):
- # non config-root child
- (_, _, parent_uuid) = col_name.split(':')
- parent_type = json.loads(obj_cols['parent_type'][0])
- result['parent_type'] = parent_type
- try:
- result['parent_uuid'] = parent_uuid
- result['parent_href'] = self._generate_url(parent_type, parent_uuid)
- except cfgm_common.exceptions.NoIdError:
- err_msg = 'Unknown uuid for parent ' + result['fq_name'][-2]
- return (False, err_msg)
-
- # TODO use compiled RE
- if self._re_match_prop.match(col_name):
- (_, prop_name) = col_name.split(':')
- result[prop_name] = json.loads(obj_cols[col_name][0])
-
- # TODO use compiled RE
- if self._re_match_children.match(col_name):
- (_, child_type, child_uuid) = col_name.split(':')
- if field_names and '%ss' %(child_type) not in field_names:
- continue
-
- child_tstamp = obj_cols[col_name][1]
- try:
- self._read_child(result, obj_uuid, child_type, child_uuid, child_tstamp)
- except cfgm_common.exceptions.NoIdError:
- continue
-
- # TODO use compiled RE
- if self._re_match_ref.match(col_name):
- (_, ref_type, ref_uuid) = col_name.split(':')
- self._read_ref(result, obj_uuid, ref_type, ref_uuid, obj_cols[col_name][0])
-
- if self._re_match_backref.match(col_name):
- (_, back_ref_type, back_ref_uuid) = col_name.split(':')
- if field_names and '%s_back_refs' %(back_ref_type) not in field_names:
- continue
-
- try:
- self._read_back_ref(result, obj_uuid, back_ref_type, back_ref_uuid,
- obj_cols[col_name][0])
- except cfgm_common.exceptions.NoIdError:
- continue
-
- # for all column names
-
-
- results.append(result)
- # end for all rows
-
- return (True, results)
- #end _cassandra_namespace_read
-
- def _cassandra_namespace_count_children(self, obj_uuid, child_type):
- # if child_type = None, return
- if child_type is None:
- return (False, '')
-
- obj_uuid_cf = self._obj_uuid_cf
- if child_type not in Namespace.children_fields:
- return (False, '%s is not a valid children of Namespace' %(child_type))
-
- col_start = 'children:'+child_type[:-1]+':'
- col_finish = 'children:'+child_type[:-1]+';'
- num_children = obj_uuid_cf.get_count(obj_uuid,
- column_start = col_start,
- column_finish = col_finish,
- max_count = 10000000)
- return (True, num_children)
- #end _cassandra_namespace_count_children
-
- def _cassandra_namespace_update(self, obj_uuid, new_obj_dict):
- # Grab ref-uuids and properties in new version
- new_ref_infos = {}
-
- new_props = {}
- if 'namespace_cidr' in new_obj_dict:
- new_props['namespace_cidr'] = new_obj_dict['namespace_cidr']
- if 'id_perms' in new_obj_dict:
- new_props['id_perms'] = new_obj_dict['id_perms']
- if 'display_name' in new_obj_dict:
- new_props['display_name'] = new_obj_dict['display_name']
- # Gather column values for obj and updates to backrefs
- # in a batch and write it at the end
- obj_uuid_cf = self._obj_uuid_cf
- obj_cols_iter = obj_uuid_cf.xget(obj_uuid)
- # TODO optimize this (converts tuple to dict)
- obj_cols = {}
- for col_info in obj_cols_iter:
- obj_cols[col_info[0]] = col_info[1]
-
- bch = obj_uuid_cf.batch()
- for col_name in obj_cols.keys():
- # TODO use compiled RE
- if re.match('prop:', col_name):
- (_, prop_name) = col_name.split(':')
- if prop_name == 'id_perms':
- # id-perms always has to be updated for last-mod timestamp
- # get it from request dict(or from db if not in request dict)
- new_id_perms = new_obj_dict.get(prop_name, json.loads(obj_cols[col_name]))
- self.update_last_modified(bch, obj_uuid, new_id_perms)
- elif prop_name in new_obj_dict:
- self._update_prop(bch, obj_uuid, prop_name, new_props)
-
- # TODO use compiled RE
- if re.match('ref:', col_name):
- (_, ref_type, ref_uuid) = col_name.split(':')
- self._update_ref(bch, 'namespace', obj_uuid, ref_type, ref_uuid, new_ref_infos)
- # for all column names
-
- # create new refs
- for ref_type in new_ref_infos.keys():
- for ref_uuid in new_ref_infos[ref_type].keys():
- ref_data = new_ref_infos[ref_type][ref_uuid]
- self._create_ref(bch, 'namespace', obj_uuid, ref_type, ref_uuid, ref_data)
-
- # create new props
- for prop_name in new_props.keys():
- self._create_prop(bch, obj_uuid, prop_name, new_props[prop_name])
-
- bch.send()
-
- return (True, '')
- #end _cassandra_namespace_update
-
- def _cassandra_namespace_list(self, parent_uuids=None, back_ref_uuids=None,
- obj_uuids=None, count=False, filters=None):
- children_fq_names_uuids = []
- if filters:
- fnames = filters.get('field_names', [])
- fvalues = filters.get('field_values', [])
- filter_fields = [(fnames[i], fvalues[i]) for i in range(len(fnames))]
- else:
- filter_fields = []
-
- def filter_rows(coll_infos, filter_cols, filter_params):
- filt_infos = {}
- coll_rows = obj_uuid_cf.multiget(coll_infos.keys(),
- columns=filter_cols,
- column_count=10000000)
- for row in coll_rows:
- # give chance for zk heartbeat/ping
- gevent.sleep(0)
- full_match = True
- for fname, fval in filter_params:
- if coll_rows[row]['prop:%s' %(fname)] != fval:
- full_match = False
- break
- if full_match:
- filt_infos[row] = coll_infos[row]
- return filt_infos
- # end filter_rows
-
- def get_fq_name_uuid_list(obj_uuids):
- ret_list = []
- for obj_uuid in obj_uuids:
- try:
- obj_fq_name = self.uuid_to_fq_name(obj_uuid)
- ret_list.append((obj_fq_name, obj_uuid))
- except cfgm_common.exceptions.NoIdError:
- pass
- return ret_list
- # end get_fq_name_uuid_list
-
- if parent_uuids:
- # go from parent to child
- obj_uuid_cf = self._obj_uuid_cf
- col_start = 'children:namespace:'
- col_fin = 'children:namespace;'
- try:
- obj_rows = obj_uuid_cf.multiget(parent_uuids,
- column_start = col_start,
- column_finish = col_fin,
- column_count = 10000000,
- include_timestamp = True)
- except pycassa.NotFoundException:
- if count:
- return (True, 0)
- else:
- return (True, children_fq_names_uuids)
-
- def filter_rows_parent_anchor(sort=False):
- # flatten to [('children:<type>:<uuid>', (<val>,<ts>), *]
- all_cols = [cols for obj_key in obj_rows.keys() for cols in obj_rows[obj_key].items()]
- all_child_infos = {}
- for col_name, col_val_ts in all_cols:
- # give chance for zk heartbeat/ping
- gevent.sleep(0)
- child_uuid = col_name.split(':')[2]
- if obj_uuids and child_uuid not in obj_uuids:
- continue
- all_child_infos[child_uuid] = {'uuid': child_uuid, 'tstamp': col_val_ts[1]}
-
- filter_cols = ['prop:%s' %(fname) for fname, _ in filter_fields]
- if filter_cols:
- filt_child_infos = filter_rows(all_child_infos, filter_cols, filter_fields)
- else: # no filter specified
- filt_child_infos = all_child_infos
-
- if not sort:
- ret_child_infos = filt_child_infos.values()
- else:
- ret_child_infos = sorted(filt_child_infos.values(), key=itemgetter('tstamp'))
-
- return get_fq_name_uuid_list(r['uuid'] for r in ret_child_infos)
- # end filter_rows_parent_anchor
-
- if count:
- return (True, len(filter_rows_parent_anchor()))
-
- children_fq_names_uuids = filter_rows_parent_anchor(sort=True)
-
- if back_ref_uuids:
- # go from anchor to backrefs
- obj_uuid_cf = self._obj_uuid_cf
- col_start = 'backref:namespace:'
- col_fin = 'backref:namespace;'
- try:
- obj_rows = obj_uuid_cf.multiget(back_ref_uuids,
- column_start = col_start,
- column_finish = col_fin,
- column_count = 10000000,
- include_timestamp = True)
- except pycassa.NotFoundException:
- if count:
- return (True, 0)
- else:
- return (True, children_fq_names_uuids)
-
- def filter_rows_backref_anchor():
- # flatten to [('<fqnstr>:<uuid>', (<val>,<ts>), *]
- all_cols = [cols for obj_key in obj_rows.keys() for cols in obj_rows[obj_key].items()]
- all_backref_infos = {}
- for col_name, col_val_ts in all_cols:
- # give chance for zk heartbeat/ping
- gevent.sleep(0)
- col_name_arr = col_name.split(':')
- fq_name = col_name_arr[:-1]
- obj_uuid = col_name_arr[-1]
- if obj_uuids and obj_uuid not in obj_uuids:
- continue
- all_backref_infos[obj_uuid] = \
- {'uuid': obj_uuid, 'fq_name': fq_name, 'tstamp': col_val_ts[1]}
-
- filter_cols = ['prop:%s' %(fname) for fname, _ in filter_fields]
- if filter_cols:
- filt_backref_infos = filter_rows(all_backref_infos, filter_cols, filter_fields)
- else: # no filter specified
- filt_backref_infos = all_backref_infos
-
- return [(br_info['fq_name'], br_info['uuid']) for br_info in filt_backref_infos.values()]
- # end filter_rows_backref_anchor
-
- if count:
- return (True, len(filter_rows_backref_anchor()))
-
- children_fq_names_uuids = filter_rows_backref_anchor()
-
- if not parent_uuids and not back_ref_uuids:
- obj_uuid_cf = self._obj_uuid_cf
- if obj_uuids:
- # exact objects specified
- def filter_rows_object_list():
- all_obj_infos = {}
- for obj_uuid in obj_uuids:
- all_obj_infos[obj_uuid] = None
-
- filter_cols = ['prop:%s' %(fname) for fname, _ in filter_fields]
- if filter_cols:
- filt_obj_infos = filter_rows(all_obj_infos, filter_cols, filter_fields)
- else: # no filters specified
- filt_obj_infos = all_obj_infos
-
- return get_fq_name_uuid_list(filt_obj_infos.keys())
- # end filter_rows_object_list
-
- if count:
- return (True, len(filter_rows_object_list()))
- children_fq_names_uuids = filter_rows_object_list()
-
- else: # grab all resources of this type
- obj_fq_name_cf = self._obj_fq_name_cf
- try:
- cols = obj_fq_name_cf.get('namespace', column_count = 10000000)
- except pycassa.NotFoundException:
- if count:
- return (True, 0)
- else:
- return (True, children_fq_names_uuids)
-
- def filter_rows_no_anchor():
- all_obj_infos = {}
- for col_name, col_val in cols.items():
- # give chance for zk heartbeat/ping
- gevent.sleep(0)
- col_name_arr = utils.decode_string(col_name).split(':')
- obj_uuid = col_name_arr[-1]
- all_obj_infos[obj_uuid] = (col_name_arr[:-1], obj_uuid)
-
- filter_cols = ['prop:%s' %(fname) for fname, _ in filter_fields]
- if filter_cols:
- filt_obj_infos = filter_rows(all_obj_infos, filter_cols, filter_fields)
- else: # no filters specified
- filt_obj_infos = all_obj_infos
-
- return filt_obj_infos.values()
- # end filter_rows_no_anchor
-
- if count:
- return (True, len(filter_rows_no_anchor()))
-
- children_fq_names_uuids = filter_rows_no_anchor()
-
- return (True, children_fq_names_uuids)
- #end _cassandra_namespace_list
-
- def _cassandra_namespace_delete(self, obj_uuid):
- obj_uuid_cf = self._obj_uuid_cf
- fq_name = json.loads(obj_uuid_cf.get(obj_uuid, columns = ['fq_name'])['fq_name'])
- bch = obj_uuid_cf.batch()
-
- # unlink from parent
- col_start = 'parent:'
- col_fin = 'parent;'
- col_name_iter = obj_uuid_cf.xget(obj_uuid, column_start = col_start, column_finish = col_fin)
- for (col_name, col_val) in col_name_iter:
- (_, parent_type, parent_uuid) = col_name.split(':')
- self._delete_child(bch, parent_type, parent_uuid, 'namespace', obj_uuid)
-
- # remove refs
- col_start = 'ref:'
- col_fin = 'ref;'
- col_name_iter = obj_uuid_cf.xget(obj_uuid, column_start = col_start, column_finish = col_fin)
- for (col_name, col_val) in col_name_iter:
- (_, ref_type, ref_uuid) = col_name.split(':')
- self._delete_ref(bch, 'namespace', obj_uuid, ref_type, ref_uuid)
-
- bch.remove(obj_uuid)
- bch.send()
-
- # Update fqname table
- fq_name_str = ':'.join(fq_name)
- fq_name_col = utils.encode_string(fq_name_str) + ':' + obj_uuid
- self._obj_fq_name_cf.remove('namespace', columns = [fq_name_col])
-
-
- return (True, '')
- #end _cassandra_namespace_delete
-
- def _cassandra_logical_interface_alloc(self, fq_name):
- return (True, '')
- #end _cassandra_logical_interface_alloc
-
- def _cassandra_logical_interface_create(self, obj_ids, obj_dict):
- # Gather column values for obj and updates to backrefs
- # in a batch and write it at the end
- bch = self._obj_uuid_cf.batch()
-
- obj_cols = {}
- obj_cols['fq_name'] = json.dumps(obj_dict['fq_name'])
- obj_cols['type'] = json.dumps('logical_interface')
- if 'parent_type' in obj_dict:
- # non config-root child
- parent_type = obj_dict['parent_type']
- parent_method_type = parent_type.replace('-', '_')
- parent_fq_name = obj_dict['fq_name'][:-1]
- obj_cols['parent_type'] = json.dumps(parent_type)
- parent_uuid = self.fq_name_to_uuid(parent_method_type, parent_fq_name)
- self._create_child(bch, parent_method_type, parent_uuid, 'logical_interface', obj_ids['uuid'])
-
- # Properties
- field = obj_dict.get('logical_interface_vlan_tag', None)
- if field is not None:
- self._create_prop(bch, obj_ids['uuid'], 'logical_interface_vlan_tag', field)
-
- field = obj_dict.get('logical_interface_type', None)
- if field is not None:
- self._create_prop(bch, obj_ids['uuid'], 'logical_interface_type', field)
-
- field = obj_dict.get('id_perms', None)
- if field is not None:
- field['created'] = datetime.datetime.utcnow().isoformat()
- field['last_modified'] = field['created']
- self._create_prop(bch, obj_ids['uuid'], 'id_perms', field)
-
- field = obj_dict.get('display_name', None)
- if field is not None:
- self._create_prop(bch, obj_ids['uuid'], 'display_name', field)
-
-
- # References
- refs = obj_dict.get('virtual_machine_interface_refs', [])
- for ref in refs:
- ref_uuid = self.fq_name_to_uuid('virtual_machine_interface', ref['to'])
- ref_attr = ref.get('attr', None)
- ref_data = {'attr': ref_attr, 'is_weakref': False}
- self._create_ref(bch, 'logical_interface', obj_ids['uuid'], 'virtual_machine_interface', ref_uuid, ref_data)
-
- bch.insert(obj_ids['uuid'], obj_cols)
- bch.send()
-
- # Update fqname table
- fq_name_str = ':'.join(obj_dict['fq_name'])
- fq_name_cols = {utils.encode_string(fq_name_str) + ':' + obj_ids['uuid']: json.dumps(None)}
- self._obj_fq_name_cf.insert('logical_interface', fq_name_cols)
-
- return (True, '')
- #end _cassandra_logical_interface_create
-
- def _cassandra_logical_interface_read(self, obj_uuids, field_names = None):
- # if field_names = None, all fields will be read/returned
-
- obj_uuid_cf = self._obj_uuid_cf
-
- # optimize for common case of reading non-backref, non-children fields
- # ignoring columns starting from 'b' and 'c' - significant performance
- # impact in scaled setting. e.g. read of project
- if (field_names is None or
- (set(field_names) & (LogicalInterface.backref_fields | LogicalInterface.children_fields))):
- # atleast one backref/children field is needed
- obj_rows = obj_uuid_cf.multiget(obj_uuids,
- column_count = 10000000,
- include_timestamp = True)
- else: # ignore reading backref + children columns
- obj_rows = obj_uuid_cf.multiget(obj_uuids,
- column_start = 'd',
- column_count = 10000000,
- include_timestamp = True)
-
- if (len(obj_uuids) == 1) and not obj_rows:
- raise cfgm_common.exceptions.NoIdError(obj_uuids[0])
-
- results = []
- for row_key in obj_rows:
- # give chance for zk heartbeat/ping
- gevent.sleep(0)
- obj_uuid = row_key
- obj_cols = obj_rows[obj_uuid]
- result = {}
- result['uuid'] = obj_uuid
- result['fq_name'] = json.loads(obj_cols['fq_name'][0])
- for col_name in obj_cols.keys():
- if self._re_match_parent.match(col_name):
- # non config-root child
- (_, _, parent_uuid) = col_name.split(':')
- parent_type = json.loads(obj_cols['parent_type'][0])
- result['parent_type'] = parent_type
- try:
- result['parent_uuid'] = parent_uuid
- result['parent_href'] = self._generate_url(parent_type, parent_uuid)
- except cfgm_common.exceptions.NoIdError:
- err_msg = 'Unknown uuid for parent ' + result['fq_name'][-2]
- return (False, err_msg)
-
- # TODO use compiled RE
- if self._re_match_prop.match(col_name):
- (_, prop_name) = col_name.split(':')
- result[prop_name] = json.loads(obj_cols[col_name][0])
-
- # TODO use compiled RE
- if self._re_match_children.match(col_name):
- (_, child_type, child_uuid) = col_name.split(':')
- if field_names and '%ss' %(child_type) not in field_names:
- continue
-
- child_tstamp = obj_cols[col_name][1]
- try:
- self._read_child(result, obj_uuid, child_type, child_uuid, child_tstamp)
- except cfgm_common.exceptions.NoIdError:
- continue
-
- # TODO use compiled RE
- if self._re_match_ref.match(col_name):
- (_, ref_type, ref_uuid) = col_name.split(':')
- self._read_ref(result, obj_uuid, ref_type, ref_uuid, obj_cols[col_name][0])
-
- if self._re_match_backref.match(col_name):
- (_, back_ref_type, back_ref_uuid) = col_name.split(':')
- if field_names and '%s_back_refs' %(back_ref_type) not in field_names:
- continue
-
- try:
- self._read_back_ref(result, obj_uuid, back_ref_type, back_ref_uuid,
- obj_cols[col_name][0])
- except cfgm_common.exceptions.NoIdError:
- continue
-
- # for all column names
-
-
- results.append(result)
- # end for all rows
-
- return (True, results)
- #end _cassandra_logical_interface_read
-
- def _cassandra_logical_interface_count_children(self, obj_uuid, child_type):
- # if child_type = None, return
- if child_type is None:
- return (False, '')
-
- obj_uuid_cf = self._obj_uuid_cf
- if child_type not in LogicalInterface.children_fields:
- return (False, '%s is not a valid children of LogicalInterface' %(child_type))
-
- col_start = 'children:'+child_type[:-1]+':'
- col_finish = 'children:'+child_type[:-1]+';'
- num_children = obj_uuid_cf.get_count(obj_uuid,
- column_start = col_start,
- column_finish = col_finish,
- max_count = 10000000)
- return (True, num_children)
- #end _cassandra_logical_interface_count_children
-
- def _cassandra_logical_interface_update(self, obj_uuid, new_obj_dict):
- # Grab ref-uuids and properties in new version
- new_ref_infos = {}
-
- if 'virtual_machine_interface_refs' in new_obj_dict:
- new_ref_infos['virtual_machine_interface'] = {}
- new_refs = new_obj_dict['virtual_machine_interface_refs']
- if new_refs:
- for new_ref in new_refs:
- new_ref_uuid = self.fq_name_to_uuid('virtual_machine_interface', new_ref['to'])
- new_ref_attr = new_ref.get('attr', None)
- new_ref_data = {'attr': new_ref_attr, 'is_weakref': False}
- new_ref_infos['virtual_machine_interface'][new_ref_uuid] = new_ref_data
-
- new_props = {}
- if 'logical_interface_vlan_tag' in new_obj_dict:
- new_props['logical_interface_vlan_tag'] = new_obj_dict['logical_interface_vlan_tag']
- if 'logical_interface_type' in new_obj_dict:
- new_props['logical_interface_type'] = new_obj_dict['logical_interface_type']
- if 'id_perms' in new_obj_dict:
- new_props['id_perms'] = new_obj_dict['id_perms']
- if 'display_name' in new_obj_dict:
- new_props['display_name'] = new_obj_dict['display_name']
- # Gather column values for obj and updates to backrefs
- # in a batch and write it at the end
- obj_uuid_cf = self._obj_uuid_cf
- obj_cols_iter = obj_uuid_cf.xget(obj_uuid)
- # TODO optimize this (converts tuple to dict)
- obj_cols = {}
- for col_info in obj_cols_iter:
- obj_cols[col_info[0]] = col_info[1]
-
- bch = obj_uuid_cf.batch()
- for col_name in obj_cols.keys():
- # TODO use compiled RE
- if re.match('prop:', col_name):
- (_, prop_name) = col_name.split(':')
- if prop_name == 'id_perms':
- # id-perms always has to be updated for last-mod timestamp
- # get it from request dict(or from db if not in request dict)
- new_id_perms = new_obj_dict.get(prop_name, json.loads(obj_cols[col_name]))
- self.update_last_modified(bch, obj_uuid, new_id_perms)
- elif prop_name in new_obj_dict:
- self._update_prop(bch, obj_uuid, prop_name, new_props)
-
- # TODO use compiled RE
- if re.match('ref:', col_name):
- (_, ref_type, ref_uuid) = col_name.split(':')
- self._update_ref(bch, 'logical_interface', obj_uuid, ref_type, ref_uuid, new_ref_infos)
- # for all column names
-
- # create new refs
- for ref_type in new_ref_infos.keys():
- for ref_uuid in new_ref_infos[ref_type].keys():
- ref_data = new_ref_infos[ref_type][ref_uuid]
- self._create_ref(bch, 'logical_interface', obj_uuid, ref_type, ref_uuid, ref_data)
-
- # create new props
- for prop_name in new_props.keys():
- self._create_prop(bch, obj_uuid, prop_name, new_props[prop_name])
-
- bch.send()
-
- return (True, '')
- #end _cassandra_logical_interface_update
-
- def _cassandra_logical_interface_list(self, parent_uuids=None, back_ref_uuids=None,
- obj_uuids=None, count=False, filters=None):
- children_fq_names_uuids = []
- if filters:
- fnames = filters.get('field_names', [])
- fvalues = filters.get('field_values', [])
- filter_fields = [(fnames[i], fvalues[i]) for i in range(len(fnames))]
- else:
- filter_fields = []
-
- def filter_rows(coll_infos, filter_cols, filter_params):
- filt_infos = {}
- coll_rows = obj_uuid_cf.multiget(coll_infos.keys(),
- columns=filter_cols,
- column_count=10000000)
- for row in coll_rows:
- # give chance for zk heartbeat/ping
- gevent.sleep(0)
- full_match = True
- for fname, fval in filter_params:
- if coll_rows[row]['prop:%s' %(fname)] != fval:
- full_match = False
- break
- if full_match:
- filt_infos[row] = coll_infos[row]
- return filt_infos
- # end filter_rows
-
- def get_fq_name_uuid_list(obj_uuids):
- ret_list = []
- for obj_uuid in obj_uuids:
- try:
- obj_fq_name = self.uuid_to_fq_name(obj_uuid)
- ret_list.append((obj_fq_name, obj_uuid))
- except cfgm_common.exceptions.NoIdError:
- pass
- return ret_list
- # end get_fq_name_uuid_list
-
- if parent_uuids:
- # go from parent to child
- obj_uuid_cf = self._obj_uuid_cf
- col_start = 'children:logical_interface:'
- col_fin = 'children:logical_interface;'
- try:
- obj_rows = obj_uuid_cf.multiget(parent_uuids,
- column_start = col_start,
- column_finish = col_fin,
- column_count = 10000000,
- include_timestamp = True)
- except pycassa.NotFoundException:
- if count:
- return (True, 0)
- else:
- return (True, children_fq_names_uuids)
-
- def filter_rows_parent_anchor(sort=False):
- # flatten to [('children:<type>:<uuid>', (<val>,<ts>), *]
- all_cols = [cols for obj_key in obj_rows.keys() for cols in obj_rows[obj_key].items()]
- all_child_infos = {}
- for col_name, col_val_ts in all_cols:
- # give chance for zk heartbeat/ping
- gevent.sleep(0)
- child_uuid = col_name.split(':')[2]
- if obj_uuids and child_uuid not in obj_uuids:
- continue
- all_child_infos[child_uuid] = {'uuid': child_uuid, 'tstamp': col_val_ts[1]}
-
- filter_cols = ['prop:%s' %(fname) for fname, _ in filter_fields]
- if filter_cols:
- filt_child_infos = filter_rows(all_child_infos, filter_cols, filter_fields)
- else: # no filter specified
- filt_child_infos = all_child_infos
-
- if not sort:
- ret_child_infos = filt_child_infos.values()
- else:
- ret_child_infos = sorted(filt_child_infos.values(), key=itemgetter('tstamp'))
-
- return get_fq_name_uuid_list(r['uuid'] for r in ret_child_infos)
- # end filter_rows_parent_anchor
-
- if count:
- return (True, len(filter_rows_parent_anchor()))
-
- children_fq_names_uuids = filter_rows_parent_anchor(sort=True)
-
- if back_ref_uuids:
- # go from anchor to backrefs
- obj_uuid_cf = self._obj_uuid_cf
- col_start = 'backref:logical_interface:'
- col_fin = 'backref:logical_interface;'
- try:
- obj_rows = obj_uuid_cf.multiget(back_ref_uuids,
- column_start = col_start,
- column_finish = col_fin,
- column_count = 10000000,
- include_timestamp = True)
- except pycassa.NotFoundException:
- if count:
- return (True, 0)
- else:
- return (True, children_fq_names_uuids)
-
- def filter_rows_backref_anchor():
- # flatten to [('<fqnstr>:<uuid>', (<val>,<ts>), *]
- all_cols = [cols for obj_key in obj_rows.keys() for cols in obj_rows[obj_key].items()]
- all_backref_infos = {}
- for col_name, col_val_ts in all_cols:
- # give chance for zk heartbeat/ping
- gevent.sleep(0)
- col_name_arr = col_name.split(':')
- fq_name = col_name_arr[:-1]
- obj_uuid = col_name_arr[-1]
- if obj_uuids and obj_uuid not in obj_uuids:
- continue
- all_backref_infos[obj_uuid] = \
- {'uuid': obj_uuid, 'fq_name': fq_name, 'tstamp': col_val_ts[1]}
-
- filter_cols = ['prop:%s' %(fname) for fname, _ in filter_fields]
- if filter_cols:
- filt_backref_infos = filter_rows(all_backref_infos, filter_cols, filter_fields)
- else: # no filter specified
- filt_backref_infos = all_backref_infos
-
- return [(br_info['fq_name'], br_info['uuid']) for br_info in filt_backref_infos.values()]
- # end filter_rows_backref_anchor
-
- if count:
- return (True, len(filter_rows_backref_anchor()))
-
- children_fq_names_uuids = filter_rows_backref_anchor()
-
- if not parent_uuids and not back_ref_uuids:
- obj_uuid_cf = self._obj_uuid_cf
- if obj_uuids:
- # exact objects specified
- def filter_rows_object_list():
- all_obj_infos = {}
- for obj_uuid in obj_uuids:
- all_obj_infos[obj_uuid] = None
-
- filter_cols = ['prop:%s' %(fname) for fname, _ in filter_fields]
- if filter_cols:
- filt_obj_infos = filter_rows(all_obj_infos, filter_cols, filter_fields)
- else: # no filters specified
- filt_obj_infos = all_obj_infos
-
- return get_fq_name_uuid_list(filt_obj_infos.keys())
- # end filter_rows_object_list
-
- if count:
- return (True, len(filter_rows_object_list()))
- children_fq_names_uuids = filter_rows_object_list()
-
- else: # grab all resources of this type
- obj_fq_name_cf = self._obj_fq_name_cf
- try:
- cols = obj_fq_name_cf.get('logical_interface', column_count = 10000000)
- except pycassa.NotFoundException:
- if count:
- return (True, 0)
- else:
- return (True, children_fq_names_uuids)
-
- def filter_rows_no_anchor():
- all_obj_infos = {}
- for col_name, col_val in cols.items():
- # give chance for zk heartbeat/ping
- gevent.sleep(0)
- col_name_arr = utils.decode_string(col_name).split(':')
- obj_uuid = col_name_arr[-1]
- all_obj_infos[obj_uuid] = (col_name_arr[:-1], obj_uuid)
-
- filter_cols = ['prop:%s' %(fname) for fname, _ in filter_fields]
- if filter_cols:
- filt_obj_infos = filter_rows(all_obj_infos, filter_cols, filter_fields)
- else: # no filters specified
- filt_obj_infos = all_obj_infos
-
- return filt_obj_infos.values()
- # end filter_rows_no_anchor
-
- if count:
- return (True, len(filter_rows_no_anchor()))
-
- children_fq_names_uuids = filter_rows_no_anchor()
-
- return (True, children_fq_names_uuids)
- #end _cassandra_logical_interface_list
-
- def _cassandra_logical_interface_delete(self, obj_uuid):
- obj_uuid_cf = self._obj_uuid_cf
- fq_name = json.loads(obj_uuid_cf.get(obj_uuid, columns = ['fq_name'])['fq_name'])
- bch = obj_uuid_cf.batch()
-
- # unlink from parent
- col_start = 'parent:'
- col_fin = 'parent;'
- col_name_iter = obj_uuid_cf.xget(obj_uuid, column_start = col_start, column_finish = col_fin)
- for (col_name, col_val) in col_name_iter:
- (_, parent_type, parent_uuid) = col_name.split(':')
- self._delete_child(bch, parent_type, parent_uuid, 'logical_interface', obj_uuid)
-
- # remove refs
- col_start = 'ref:'
- col_fin = 'ref;'
- col_name_iter = obj_uuid_cf.xget(obj_uuid, column_start = col_start, column_finish = col_fin)
- for (col_name, col_val) in col_name_iter:
- (_, ref_type, ref_uuid) = col_name.split(':')
- self._delete_ref(bch, 'logical_interface', obj_uuid, ref_type, ref_uuid)
-
- bch.remove(obj_uuid)
- bch.send()
-
- # Update fqname table
- fq_name_str = ':'.join(fq_name)
- fq_name_col = utils.encode_string(fq_name_str) + ':' + obj_uuid
- self._obj_fq_name_cf.remove('logical_interface', columns = [fq_name_col])
-
-
- return (True, '')
- #end _cassandra_logical_interface_delete
-
- def _cassandra_route_table_alloc(self, fq_name):
- return (True, '')
- #end _cassandra_route_table_alloc
-
- def _cassandra_route_table_create(self, obj_ids, obj_dict):
- # Gather column values for obj and updates to backrefs
- # in a batch and write it at the end
- bch = self._obj_uuid_cf.batch()
-
- obj_cols = {}
- obj_cols['fq_name'] = json.dumps(obj_dict['fq_name'])
- obj_cols['type'] = json.dumps('route_table')
- if 'parent_type' in obj_dict:
- # non config-root child
- parent_type = obj_dict['parent_type']
- parent_method_type = parent_type.replace('-', '_')
- parent_fq_name = obj_dict['fq_name'][:-1]
- obj_cols['parent_type'] = json.dumps(parent_type)
- parent_uuid = self.fq_name_to_uuid(parent_method_type, parent_fq_name)
- self._create_child(bch, parent_method_type, parent_uuid, 'route_table', obj_ids['uuid'])
-
- # Properties
- field = obj_dict.get('routes', None)
- if field is not None:
- self._create_prop(bch, obj_ids['uuid'], 'routes', field)
-
- field = obj_dict.get('id_perms', None)
- if field is not None:
- field['created'] = datetime.datetime.utcnow().isoformat()
- field['last_modified'] = field['created']
- self._create_prop(bch, obj_ids['uuid'], 'id_perms', field)
-
- field = obj_dict.get('display_name', None)
- if field is not None:
- self._create_prop(bch, obj_ids['uuid'], 'display_name', field)
-
-
- # References
-
- bch.insert(obj_ids['uuid'], obj_cols)
- bch.send()
-
- # Update fqname table
- fq_name_str = ':'.join(obj_dict['fq_name'])
- fq_name_cols = {utils.encode_string(fq_name_str) + ':' + obj_ids['uuid']: json.dumps(None)}
- self._obj_fq_name_cf.insert('route_table', fq_name_cols)
-
- return (True, '')
- #end _cassandra_route_table_create
-
- def _cassandra_route_table_read(self, obj_uuids, field_names = None):
- # if field_names = None, all fields will be read/returned
-
- obj_uuid_cf = self._obj_uuid_cf
-
- # optimize for common case of reading non-backref, non-children fields
- # ignoring columns starting from 'b' and 'c' - significant performance
- # impact in scaled setting. e.g. read of project
- if (field_names is None or
- (set(field_names) & (RouteTable.backref_fields | RouteTable.children_fields))):
- # atleast one backref/children field is needed
- obj_rows = obj_uuid_cf.multiget(obj_uuids,
- column_count = 10000000,
- include_timestamp = True)
- else: # ignore reading backref + children columns
- obj_rows = obj_uuid_cf.multiget(obj_uuids,
- column_start = 'd',
- column_count = 10000000,
- include_timestamp = True)
-
- if (len(obj_uuids) == 1) and not obj_rows:
- raise cfgm_common.exceptions.NoIdError(obj_uuids[0])
-
- results = []
- for row_key in obj_rows:
- # give chance for zk heartbeat/ping
- gevent.sleep(0)
- obj_uuid = row_key
- obj_cols = obj_rows[obj_uuid]
- result = {}
- result['uuid'] = obj_uuid
- result['fq_name'] = json.loads(obj_cols['fq_name'][0])
- for col_name in obj_cols.keys():
- if self._re_match_parent.match(col_name):
- # non config-root child
- (_, _, parent_uuid) = col_name.split(':')
- parent_type = json.loads(obj_cols['parent_type'][0])
- result['parent_type'] = parent_type
- try:
- result['parent_uuid'] = parent_uuid
- result['parent_href'] = self._generate_url(parent_type, parent_uuid)
- except cfgm_common.exceptions.NoIdError:
- err_msg = 'Unknown uuid for parent ' + result['fq_name'][-2]
- return (False, err_msg)
-
- # TODO use compiled RE
- if self._re_match_prop.match(col_name):
- (_, prop_name) = col_name.split(':')
- result[prop_name] = json.loads(obj_cols[col_name][0])
-
- # TODO use compiled RE
- if self._re_match_children.match(col_name):
- (_, child_type, child_uuid) = col_name.split(':')
- if field_names and '%ss' %(child_type) not in field_names:
- continue
-
- child_tstamp = obj_cols[col_name][1]
- try:
- self._read_child(result, obj_uuid, child_type, child_uuid, child_tstamp)
- except cfgm_common.exceptions.NoIdError:
- continue
-
- # TODO use compiled RE
- if self._re_match_ref.match(col_name):
- (_, ref_type, ref_uuid) = col_name.split(':')
- self._read_ref(result, obj_uuid, ref_type, ref_uuid, obj_cols[col_name][0])
-
- if self._re_match_backref.match(col_name):
- (_, back_ref_type, back_ref_uuid) = col_name.split(':')
- if field_names and '%s_back_refs' %(back_ref_type) not in field_names:
- continue
-
- try:
- self._read_back_ref(result, obj_uuid, back_ref_type, back_ref_uuid,
- obj_cols[col_name][0])
- except cfgm_common.exceptions.NoIdError:
- continue
-
- # for all column names
-
-
- results.append(result)
- # end for all rows
-
- return (True, results)
- #end _cassandra_route_table_read
-
- def _cassandra_route_table_count_children(self, obj_uuid, child_type):
- # if child_type = None, return
- if child_type is None:
- return (False, '')
-
- obj_uuid_cf = self._obj_uuid_cf
- if child_type not in RouteTable.children_fields:
- return (False, '%s is not a valid children of RouteTable' %(child_type))
-
- col_start = 'children:'+child_type[:-1]+':'
- col_finish = 'children:'+child_type[:-1]+';'
- num_children = obj_uuid_cf.get_count(obj_uuid,
- column_start = col_start,
- column_finish = col_finish,
- max_count = 10000000)
- return (True, num_children)
- #end _cassandra_route_table_count_children
-
- def _cassandra_route_table_update(self, obj_uuid, new_obj_dict):
- # Grab ref-uuids and properties in new version
- new_ref_infos = {}
-
- new_props = {}
- if 'routes' in new_obj_dict:
- new_props['routes'] = new_obj_dict['routes']
- if 'id_perms' in new_obj_dict:
- new_props['id_perms'] = new_obj_dict['id_perms']
- if 'display_name' in new_obj_dict:
- new_props['display_name'] = new_obj_dict['display_name']
- # Gather column values for obj and updates to backrefs
- # in a batch and write it at the end
- obj_uuid_cf = self._obj_uuid_cf
- obj_cols_iter = obj_uuid_cf.xget(obj_uuid)
- # TODO optimize this (converts tuple to dict)
- obj_cols = {}
- for col_info in obj_cols_iter:
- obj_cols[col_info[0]] = col_info[1]
-
- bch = obj_uuid_cf.batch()
- for col_name in obj_cols.keys():
- # TODO use compiled RE
- if re.match('prop:', col_name):
- (_, prop_name) = col_name.split(':')
- if prop_name == 'id_perms':
- # id-perms always has to be updated for last-mod timestamp
- # get it from request dict(or from db if not in request dict)
- new_id_perms = new_obj_dict.get(prop_name, json.loads(obj_cols[col_name]))
- self.update_last_modified(bch, obj_uuid, new_id_perms)
- elif prop_name in new_obj_dict:
- self._update_prop(bch, obj_uuid, prop_name, new_props)
-
- # TODO use compiled RE
- if re.match('ref:', col_name):
- (_, ref_type, ref_uuid) = col_name.split(':')
- self._update_ref(bch, 'route_table', obj_uuid, ref_type, ref_uuid, new_ref_infos)
- # for all column names
-
- # create new refs
- for ref_type in new_ref_infos.keys():
- for ref_uuid in new_ref_infos[ref_type].keys():
- ref_data = new_ref_infos[ref_type][ref_uuid]
- self._create_ref(bch, 'route_table', obj_uuid, ref_type, ref_uuid, ref_data)
-
- # create new props
- for prop_name in new_props.keys():
- self._create_prop(bch, obj_uuid, prop_name, new_props[prop_name])
-
- bch.send()
-
- return (True, '')
- #end _cassandra_route_table_update
-
- def _cassandra_route_table_list(self, parent_uuids=None, back_ref_uuids=None,
- obj_uuids=None, count=False, filters=None):
- children_fq_names_uuids = []
- if filters:
- fnames = filters.get('field_names', [])
- fvalues = filters.get('field_values', [])
- filter_fields = [(fnames[i], fvalues[i]) for i in range(len(fnames))]
- else:
- filter_fields = []
-
- def filter_rows(coll_infos, filter_cols, filter_params):
- filt_infos = {}
- coll_rows = obj_uuid_cf.multiget(coll_infos.keys(),
- columns=filter_cols,
- column_count=10000000)
- for row in coll_rows:
- # give chance for zk heartbeat/ping
- gevent.sleep(0)
- full_match = True
- for fname, fval in filter_params:
- if coll_rows[row]['prop:%s' %(fname)] != fval:
- full_match = False
- break
- if full_match:
- filt_infos[row] = coll_infos[row]
- return filt_infos
- # end filter_rows
-
- def get_fq_name_uuid_list(obj_uuids):
- ret_list = []
- for obj_uuid in obj_uuids:
- try:
- obj_fq_name = self.uuid_to_fq_name(obj_uuid)
- ret_list.append((obj_fq_name, obj_uuid))
- except cfgm_common.exceptions.NoIdError:
- pass
- return ret_list
- # end get_fq_name_uuid_list
-
- if parent_uuids:
- # go from parent to child
- obj_uuid_cf = self._obj_uuid_cf
- col_start = 'children:route_table:'
- col_fin = 'children:route_table;'
- try:
- obj_rows = obj_uuid_cf.multiget(parent_uuids,
- column_start = col_start,
- column_finish = col_fin,
- column_count = 10000000,
- include_timestamp = True)
- except pycassa.NotFoundException:
- if count:
- return (True, 0)
- else:
- return (True, children_fq_names_uuids)
-
- def filter_rows_parent_anchor(sort=False):
- # flatten to [('children:<type>:<uuid>', (<val>,<ts>), *]
- all_cols = [cols for obj_key in obj_rows.keys() for cols in obj_rows[obj_key].items()]
- all_child_infos = {}
- for col_name, col_val_ts in all_cols:
- # give chance for zk heartbeat/ping
- gevent.sleep(0)
- child_uuid = col_name.split(':')[2]
- if obj_uuids and child_uuid not in obj_uuids:
- continue
- all_child_infos[child_uuid] = {'uuid': child_uuid, 'tstamp': col_val_ts[1]}
-
- filter_cols = ['prop:%s' %(fname) for fname, _ in filter_fields]
- if filter_cols:
- filt_child_infos = filter_rows(all_child_infos, filter_cols, filter_fields)
- else: # no filter specified
- filt_child_infos = all_child_infos
-
- if not sort:
- ret_child_infos = filt_child_infos.values()
- else:
- ret_child_infos = sorted(filt_child_infos.values(), key=itemgetter('tstamp'))
-
- return get_fq_name_uuid_list(r['uuid'] for r in ret_child_infos)
- # end filter_rows_parent_anchor
-
- if count:
- return (True, len(filter_rows_parent_anchor()))
-
- children_fq_names_uuids = filter_rows_parent_anchor(sort=True)
-
- if back_ref_uuids:
- # go from anchor to backrefs
- obj_uuid_cf = self._obj_uuid_cf
- col_start = 'backref:route_table:'
- col_fin = 'backref:route_table;'
- try:
- obj_rows = obj_uuid_cf.multiget(back_ref_uuids,
- column_start = col_start,
- column_finish = col_fin,
- column_count = 10000000,
- include_timestamp = True)
- except pycassa.NotFoundException:
- if count:
- return (True, 0)
- else:
- return (True, children_fq_names_uuids)
-
- def filter_rows_backref_anchor():
- # flatten to [('<fqnstr>:<uuid>', (<val>,<ts>), *]
- all_cols = [cols for obj_key in obj_rows.keys() for cols in obj_rows[obj_key].items()]
- all_backref_infos = {}
- for col_name, col_val_ts in all_cols:
- # give chance for zk heartbeat/ping
- gevent.sleep(0)
- col_name_arr = col_name.split(':')
- fq_name = col_name_arr[:-1]
- obj_uuid = col_name_arr[-1]
- if obj_uuids and obj_uuid not in obj_uuids:
- continue
- all_backref_infos[obj_uuid] = \
- {'uuid': obj_uuid, 'fq_name': fq_name, 'tstamp': col_val_ts[1]}
-
- filter_cols = ['prop:%s' %(fname) for fname, _ in filter_fields]
- if filter_cols:
- filt_backref_infos = filter_rows(all_backref_infos, filter_cols, filter_fields)
- else: # no filter specified
- filt_backref_infos = all_backref_infos
-
- return [(br_info['fq_name'], br_info['uuid']) for br_info in filt_backref_infos.values()]
- # end filter_rows_backref_anchor
-
- if count:
- return (True, len(filter_rows_backref_anchor()))
-
- children_fq_names_uuids = filter_rows_backref_anchor()
-
- if not parent_uuids and not back_ref_uuids:
- obj_uuid_cf = self._obj_uuid_cf
- if obj_uuids:
- # exact objects specified
- def filter_rows_object_list():
- all_obj_infos = {}
- for obj_uuid in obj_uuids:
- all_obj_infos[obj_uuid] = None
-
- filter_cols = ['prop:%s' %(fname) for fname, _ in filter_fields]
- if filter_cols:
- filt_obj_infos = filter_rows(all_obj_infos, filter_cols, filter_fields)
- else: # no filters specified
- filt_obj_infos = all_obj_infos
-
- return get_fq_name_uuid_list(filt_obj_infos.keys())
- # end filter_rows_object_list
-
- if count:
- return (True, len(filter_rows_object_list()))
- children_fq_names_uuids = filter_rows_object_list()
-
- else: # grab all resources of this type
- obj_fq_name_cf = self._obj_fq_name_cf
- try:
- cols = obj_fq_name_cf.get('route_table', column_count = 10000000)
- except pycassa.NotFoundException:
- if count:
- return (True, 0)
- else:
- return (True, children_fq_names_uuids)
-
- def filter_rows_no_anchor():
- all_obj_infos = {}
- for col_name, col_val in cols.items():
- # give chance for zk heartbeat/ping
- gevent.sleep(0)
- col_name_arr = utils.decode_string(col_name).split(':')
- obj_uuid = col_name_arr[-1]
- all_obj_infos[obj_uuid] = (col_name_arr[:-1], obj_uuid)
-
- filter_cols = ['prop:%s' %(fname) for fname, _ in filter_fields]
- if filter_cols:
- filt_obj_infos = filter_rows(all_obj_infos, filter_cols, filter_fields)
- else: # no filters specified
- filt_obj_infos = all_obj_infos
-
- return filt_obj_infos.values()
- # end filter_rows_no_anchor
-
- if count:
- return (True, len(filter_rows_no_anchor()))
-
- children_fq_names_uuids = filter_rows_no_anchor()
-
- return (True, children_fq_names_uuids)
- #end _cassandra_route_table_list
-
- def _cassandra_route_table_delete(self, obj_uuid):
- obj_uuid_cf = self._obj_uuid_cf
- fq_name = json.loads(obj_uuid_cf.get(obj_uuid, columns = ['fq_name'])['fq_name'])
- bch = obj_uuid_cf.batch()
-
- # unlink from parent
- col_start = 'parent:'
- col_fin = 'parent;'
- col_name_iter = obj_uuid_cf.xget(obj_uuid, column_start = col_start, column_finish = col_fin)
- for (col_name, col_val) in col_name_iter:
- (_, parent_type, parent_uuid) = col_name.split(':')
- self._delete_child(bch, parent_type, parent_uuid, 'route_table', obj_uuid)
-
- # remove refs
- col_start = 'ref:'
- col_fin = 'ref;'
- col_name_iter = obj_uuid_cf.xget(obj_uuid, column_start = col_start, column_finish = col_fin)
- for (col_name, col_val) in col_name_iter:
- (_, ref_type, ref_uuid) = col_name.split(':')
- self._delete_ref(bch, 'route_table', obj_uuid, ref_type, ref_uuid)
-
- bch.remove(obj_uuid)
- bch.send()
-
- # Update fqname table
- fq_name_str = ':'.join(fq_name)
- fq_name_col = utils.encode_string(fq_name_str) + ':' + obj_uuid
- self._obj_fq_name_cf.remove('route_table', columns = [fq_name_col])
-
-
- return (True, '')
- #end _cassandra_route_table_delete
-
- def _cassandra_physical_interface_alloc(self, fq_name):
- return (True, '')
- #end _cassandra_physical_interface_alloc
-
- def _cassandra_physical_interface_create(self, obj_ids, obj_dict):
- # Gather column values for obj and updates to backrefs
- # in a batch and write it at the end
- bch = self._obj_uuid_cf.batch()
-
- obj_cols = {}
- obj_cols['fq_name'] = json.dumps(obj_dict['fq_name'])
- obj_cols['type'] = json.dumps('physical_interface')
- if 'parent_type' in obj_dict:
- # non config-root child
- parent_type = obj_dict['parent_type']
- parent_method_type = parent_type.replace('-', '_')
- parent_fq_name = obj_dict['fq_name'][:-1]
- obj_cols['parent_type'] = json.dumps(parent_type)
- parent_uuid = self.fq_name_to_uuid(parent_method_type, parent_fq_name)
- self._create_child(bch, parent_method_type, parent_uuid, 'physical_interface', obj_ids['uuid'])
-
- # Properties
- field = obj_dict.get('id_perms', None)
- if field is not None:
- field['created'] = datetime.datetime.utcnow().isoformat()
- field['last_modified'] = field['created']
- self._create_prop(bch, obj_ids['uuid'], 'id_perms', field)
-
- field = obj_dict.get('display_name', None)
- if field is not None:
- self._create_prop(bch, obj_ids['uuid'], 'display_name', field)
-
-
- # References
-
- bch.insert(obj_ids['uuid'], obj_cols)
- bch.send()
-
- # Update fqname table
- fq_name_str = ':'.join(obj_dict['fq_name'])
- fq_name_cols = {utils.encode_string(fq_name_str) + ':' + obj_ids['uuid']: json.dumps(None)}
- self._obj_fq_name_cf.insert('physical_interface', fq_name_cols)
-
- return (True, '')
- #end _cassandra_physical_interface_create
-
- def _cassandra_physical_interface_read(self, obj_uuids, field_names = None):
- # if field_names = None, all fields will be read/returned
-
- obj_uuid_cf = self._obj_uuid_cf
-
- # optimize for common case of reading non-backref, non-children fields
- # ignoring columns starting from 'b' and 'c' - significant performance
- # impact in scaled setting. e.g. read of project
- if (field_names is None or
- (set(field_names) & (PhysicalInterface.backref_fields | PhysicalInterface.children_fields))):
- # atleast one backref/children field is needed
- obj_rows = obj_uuid_cf.multiget(obj_uuids,
- column_count = 10000000,
- include_timestamp = True)
- else: # ignore reading backref + children columns
- obj_rows = obj_uuid_cf.multiget(obj_uuids,
- column_start = 'd',
- column_count = 10000000,
- include_timestamp = True)
-
- if (len(obj_uuids) == 1) and not obj_rows:
- raise cfgm_common.exceptions.NoIdError(obj_uuids[0])
-
- results = []
- for row_key in obj_rows:
- # give chance for zk heartbeat/ping
- gevent.sleep(0)
- obj_uuid = row_key
- obj_cols = obj_rows[obj_uuid]
- result = {}
- result['uuid'] = obj_uuid
- result['fq_name'] = json.loads(obj_cols['fq_name'][0])
- for col_name in obj_cols.keys():
- if self._re_match_parent.match(col_name):
- # non config-root child
- (_, _, parent_uuid) = col_name.split(':')
- parent_type = json.loads(obj_cols['parent_type'][0])
- result['parent_type'] = parent_type
- try:
- result['parent_uuid'] = parent_uuid
- result['parent_href'] = self._generate_url(parent_type, parent_uuid)
- except cfgm_common.exceptions.NoIdError:
- err_msg = 'Unknown uuid for parent ' + result['fq_name'][-2]
- return (False, err_msg)
-
- # TODO use compiled RE
- if self._re_match_prop.match(col_name):
- (_, prop_name) = col_name.split(':')
- result[prop_name] = json.loads(obj_cols[col_name][0])
-
- # TODO use compiled RE
- if self._re_match_children.match(col_name):
- (_, child_type, child_uuid) = col_name.split(':')
- if field_names and '%ss' %(child_type) not in field_names:
- continue
-
- child_tstamp = obj_cols[col_name][1]
- try:
- self._read_child(result, obj_uuid, child_type, child_uuid, child_tstamp)
- except cfgm_common.exceptions.NoIdError:
- continue
-
- # TODO use compiled RE
- if self._re_match_ref.match(col_name):
- (_, ref_type, ref_uuid) = col_name.split(':')
- self._read_ref(result, obj_uuid, ref_type, ref_uuid, obj_cols[col_name][0])
-
- if self._re_match_backref.match(col_name):
- (_, back_ref_type, back_ref_uuid) = col_name.split(':')
- if field_names and '%s_back_refs' %(back_ref_type) not in field_names:
- continue
-
- try:
- self._read_back_ref(result, obj_uuid, back_ref_type, back_ref_uuid,
- obj_cols[col_name][0])
- except cfgm_common.exceptions.NoIdError:
- continue
-
- # for all column names
-
- if 'logical_interfaces' in result:
- # sort children; TODO do this based on schema
- sorted_children = sorted(result['logical_interfaces'], key = itemgetter('tstamp'))
- # re-write result's children without timestamp
- result['logical_interfaces'] = sorted_children
- [child.pop('tstamp') for child in result['logical_interfaces']]
-
- results.append(result)
- # end for all rows
-
- return (True, results)
- #end _cassandra_physical_interface_read
-
- def _cassandra_physical_interface_count_children(self, obj_uuid, child_type):
- # if child_type = None, return
- if child_type is None:
- return (False, '')
-
- obj_uuid_cf = self._obj_uuid_cf
- if child_type not in PhysicalInterface.children_fields:
- return (False, '%s is not a valid children of PhysicalInterface' %(child_type))
-
- col_start = 'children:'+child_type[:-1]+':'
- col_finish = 'children:'+child_type[:-1]+';'
- num_children = obj_uuid_cf.get_count(obj_uuid,
- column_start = col_start,
- column_finish = col_finish,
- max_count = 10000000)
- return (True, num_children)
- #end _cassandra_physical_interface_count_children
-
- def _cassandra_physical_interface_update(self, obj_uuid, new_obj_dict):
- # Grab ref-uuids and properties in new version
- new_ref_infos = {}
-
- new_props = {}
- if 'id_perms' in new_obj_dict:
- new_props['id_perms'] = new_obj_dict['id_perms']
- if 'display_name' in new_obj_dict:
- new_props['display_name'] = new_obj_dict['display_name']
- # Gather column values for obj and updates to backrefs
- # in a batch and write it at the end
- obj_uuid_cf = self._obj_uuid_cf
- obj_cols_iter = obj_uuid_cf.xget(obj_uuid)
- # TODO optimize this (converts tuple to dict)
- obj_cols = {}
- for col_info in obj_cols_iter:
- obj_cols[col_info[0]] = col_info[1]
-
- bch = obj_uuid_cf.batch()
- for col_name in obj_cols.keys():
- # TODO use compiled RE
- if re.match('prop:', col_name):
- (_, prop_name) = col_name.split(':')
- if prop_name == 'id_perms':
- # id-perms always has to be updated for last-mod timestamp
- # get it from request dict(or from db if not in request dict)
- new_id_perms = new_obj_dict.get(prop_name, json.loads(obj_cols[col_name]))
- self.update_last_modified(bch, obj_uuid, new_id_perms)
- elif prop_name in new_obj_dict:
- self._update_prop(bch, obj_uuid, prop_name, new_props)
-
- # TODO use compiled RE
- if re.match('ref:', col_name):
- (_, ref_type, ref_uuid) = col_name.split(':')
- self._update_ref(bch, 'physical_interface', obj_uuid, ref_type, ref_uuid, new_ref_infos)
- # for all column names
-
- # create new refs
- for ref_type in new_ref_infos.keys():
- for ref_uuid in new_ref_infos[ref_type].keys():
- ref_data = new_ref_infos[ref_type][ref_uuid]
- self._create_ref(bch, 'physical_interface', obj_uuid, ref_type, ref_uuid, ref_data)
-
- # create new props
- for prop_name in new_props.keys():
- self._create_prop(bch, obj_uuid, prop_name, new_props[prop_name])
-
- bch.send()
-
- return (True, '')
- #end _cassandra_physical_interface_update
-
- def _cassandra_physical_interface_list(self, parent_uuids=None, back_ref_uuids=None,
- obj_uuids=None, count=False, filters=None):
- children_fq_names_uuids = []
- if filters:
- fnames = filters.get('field_names', [])
- fvalues = filters.get('field_values', [])
- filter_fields = [(fnames[i], fvalues[i]) for i in range(len(fnames))]
- else:
- filter_fields = []
-
- def filter_rows(coll_infos, filter_cols, filter_params):
- filt_infos = {}
- coll_rows = obj_uuid_cf.multiget(coll_infos.keys(),
- columns=filter_cols,
- column_count=10000000)
- for row in coll_rows:
- # give chance for zk heartbeat/ping
- gevent.sleep(0)
- full_match = True
- for fname, fval in filter_params:
- if coll_rows[row]['prop:%s' %(fname)] != fval:
- full_match = False
- break
- if full_match:
- filt_infos[row] = coll_infos[row]
- return filt_infos
- # end filter_rows
-
- def get_fq_name_uuid_list(obj_uuids):
- ret_list = []
- for obj_uuid in obj_uuids:
- try:
- obj_fq_name = self.uuid_to_fq_name(obj_uuid)
- ret_list.append((obj_fq_name, obj_uuid))
- except cfgm_common.exceptions.NoIdError:
- pass
- return ret_list
- # end get_fq_name_uuid_list
-
- if parent_uuids:
- # go from parent to child
- obj_uuid_cf = self._obj_uuid_cf
- col_start = 'children:physical_interface:'
- col_fin = 'children:physical_interface;'
- try:
- obj_rows = obj_uuid_cf.multiget(parent_uuids,
- column_start = col_start,
- column_finish = col_fin,
- column_count = 10000000,
- include_timestamp = True)
- except pycassa.NotFoundException:
- if count:
- return (True, 0)
- else:
- return (True, children_fq_names_uuids)
-
- def filter_rows_parent_anchor(sort=False):
- # flatten to [('children:<type>:<uuid>', (<val>,<ts>), *]
- all_cols = [cols for obj_key in obj_rows.keys() for cols in obj_rows[obj_key].items()]
- all_child_infos = {}
- for col_name, col_val_ts in all_cols:
- # give chance for zk heartbeat/ping
- gevent.sleep(0)
- child_uuid = col_name.split(':')[2]
- if obj_uuids and child_uuid not in obj_uuids:
- continue
- all_child_infos[child_uuid] = {'uuid': child_uuid, 'tstamp': col_val_ts[1]}
-
- filter_cols = ['prop:%s' %(fname) for fname, _ in filter_fields]
- if filter_cols:
- filt_child_infos = filter_rows(all_child_infos, filter_cols, filter_fields)
- else: # no filter specified
- filt_child_infos = all_child_infos
-
- if not sort:
- ret_child_infos = filt_child_infos.values()
- else:
- ret_child_infos = sorted(filt_child_infos.values(), key=itemgetter('tstamp'))
-
- return get_fq_name_uuid_list(r['uuid'] for r in ret_child_infos)
- # end filter_rows_parent_anchor
-
- if count:
- return (True, len(filter_rows_parent_anchor()))
-
- children_fq_names_uuids = filter_rows_parent_anchor(sort=True)
-
- if back_ref_uuids:
- # go from anchor to backrefs
- obj_uuid_cf = self._obj_uuid_cf
- col_start = 'backref:physical_interface:'
- col_fin = 'backref:physical_interface;'
- try:
- obj_rows = obj_uuid_cf.multiget(back_ref_uuids,
- column_start = col_start,
- column_finish = col_fin,
- column_count = 10000000,
- include_timestamp = True)
- except pycassa.NotFoundException:
- if count:
- return (True, 0)
- else:
- return (True, children_fq_names_uuids)
-
- def filter_rows_backref_anchor():
- # flatten to [('<fqnstr>:<uuid>', (<val>,<ts>), *]
- all_cols = [cols for obj_key in obj_rows.keys() for cols in obj_rows[obj_key].items()]
- all_backref_infos = {}
- for col_name, col_val_ts in all_cols:
- # give chance for zk heartbeat/ping
- gevent.sleep(0)
- col_name_arr = col_name.split(':')
- fq_name = col_name_arr[:-1]
- obj_uuid = col_name_arr[-1]
- if obj_uuids and obj_uuid not in obj_uuids:
- continue
- all_backref_infos[obj_uuid] = \
- {'uuid': obj_uuid, 'fq_name': fq_name, 'tstamp': col_val_ts[1]}
-
- filter_cols = ['prop:%s' %(fname) for fname, _ in filter_fields]
- if filter_cols:
- filt_backref_infos = filter_rows(all_backref_infos, filter_cols, filter_fields)
- else: # no filter specified
- filt_backref_infos = all_backref_infos
-
- return [(br_info['fq_name'], br_info['uuid']) for br_info in filt_backref_infos.values()]
- # end filter_rows_backref_anchor
-
- if count:
- return (True, len(filter_rows_backref_anchor()))
-
- children_fq_names_uuids = filter_rows_backref_anchor()
-
- if not parent_uuids and not back_ref_uuids:
- obj_uuid_cf = self._obj_uuid_cf
- if obj_uuids:
- # exact objects specified
- def filter_rows_object_list():
- all_obj_infos = {}
- for obj_uuid in obj_uuids:
- all_obj_infos[obj_uuid] = None
-
- filter_cols = ['prop:%s' %(fname) for fname, _ in filter_fields]
- if filter_cols:
- filt_obj_infos = filter_rows(all_obj_infos, filter_cols, filter_fields)
- else: # no filters specified
- filt_obj_infos = all_obj_infos
-
- return get_fq_name_uuid_list(filt_obj_infos.keys())
- # end filter_rows_object_list
-
- if count:
- return (True, len(filter_rows_object_list()))
- children_fq_names_uuids = filter_rows_object_list()
-
- else: # grab all resources of this type
- obj_fq_name_cf = self._obj_fq_name_cf
- try:
- cols = obj_fq_name_cf.get('physical_interface', column_count = 10000000)
- except pycassa.NotFoundException:
- if count:
- return (True, 0)
- else:
- return (True, children_fq_names_uuids)
-
- def filter_rows_no_anchor():
- all_obj_infos = {}
- for col_name, col_val in cols.items():
- # give chance for zk heartbeat/ping
- gevent.sleep(0)
- col_name_arr = utils.decode_string(col_name).split(':')
- obj_uuid = col_name_arr[-1]
- all_obj_infos[obj_uuid] = (col_name_arr[:-1], obj_uuid)
-
- filter_cols = ['prop:%s' %(fname) for fname, _ in filter_fields]
- if filter_cols:
- filt_obj_infos = filter_rows(all_obj_infos, filter_cols, filter_fields)
- else: # no filters specified
- filt_obj_infos = all_obj_infos
-
- return filt_obj_infos.values()
- # end filter_rows_no_anchor
-
- if count:
- return (True, len(filter_rows_no_anchor()))
-
- children_fq_names_uuids = filter_rows_no_anchor()
-
- return (True, children_fq_names_uuids)
- #end _cassandra_physical_interface_list
-
- def _cassandra_physical_interface_delete(self, obj_uuid):
- obj_uuid_cf = self._obj_uuid_cf
- fq_name = json.loads(obj_uuid_cf.get(obj_uuid, columns = ['fq_name'])['fq_name'])
- bch = obj_uuid_cf.batch()
-
- # unlink from parent
- col_start = 'parent:'
- col_fin = 'parent;'
- col_name_iter = obj_uuid_cf.xget(obj_uuid, column_start = col_start, column_finish = col_fin)
- for (col_name, col_val) in col_name_iter:
- (_, parent_type, parent_uuid) = col_name.split(':')
- self._delete_child(bch, parent_type, parent_uuid, 'physical_interface', obj_uuid)
-
- # remove refs
- col_start = 'ref:'
- col_fin = 'ref;'
- col_name_iter = obj_uuid_cf.xget(obj_uuid, column_start = col_start, column_finish = col_fin)
- for (col_name, col_val) in col_name_iter:
- (_, ref_type, ref_uuid) = col_name.split(':')
- self._delete_ref(bch, 'physical_interface', obj_uuid, ref_type, ref_uuid)
-
- bch.remove(obj_uuid)
- bch.send()
-
- # Update fqname table
- fq_name_str = ':'.join(fq_name)
- fq_name_col = utils.encode_string(fq_name_str) + ':' + obj_uuid
- self._obj_fq_name_cf.remove('physical_interface', columns = [fq_name_col])
-
-
- return (True, '')
- #end _cassandra_physical_interface_delete
-
- def _cassandra_access_control_list_alloc(self, fq_name):
- return (True, '')
- #end _cassandra_access_control_list_alloc
-
- def _cassandra_access_control_list_create(self, obj_ids, obj_dict):
- # Gather column values for obj and updates to backrefs
- # in a batch and write it at the end
- bch = self._obj_uuid_cf.batch()
-
- obj_cols = {}
- obj_cols['fq_name'] = json.dumps(obj_dict['fq_name'])
- obj_cols['type'] = json.dumps('access_control_list')
- if 'parent_type' in obj_dict:
- # non config-root child
- parent_type = obj_dict['parent_type']
- parent_method_type = parent_type.replace('-', '_')
- parent_fq_name = obj_dict['fq_name'][:-1]
- obj_cols['parent_type'] = json.dumps(parent_type)
- parent_uuid = self.fq_name_to_uuid(parent_method_type, parent_fq_name)
- self._create_child(bch, parent_method_type, parent_uuid, 'access_control_list', obj_ids['uuid'])
-
- # Properties
- field = obj_dict.get('access_control_list_entries', None)
- if field is not None:
- self._create_prop(bch, obj_ids['uuid'], 'access_control_list_entries', field)
-
- field = obj_dict.get('id_perms', None)
- if field is not None:
- field['created'] = datetime.datetime.utcnow().isoformat()
- field['last_modified'] = field['created']
- self._create_prop(bch, obj_ids['uuid'], 'id_perms', field)
-
- field = obj_dict.get('display_name', None)
- if field is not None:
- self._create_prop(bch, obj_ids['uuid'], 'display_name', field)
-
-
- # References
-
- bch.insert(obj_ids['uuid'], obj_cols)
- bch.send()
-
- # Update fqname table
- fq_name_str = ':'.join(obj_dict['fq_name'])
- fq_name_cols = {utils.encode_string(fq_name_str) + ':' + obj_ids['uuid']: json.dumps(None)}
- self._obj_fq_name_cf.insert('access_control_list', fq_name_cols)
-
- return (True, '')
- #end _cassandra_access_control_list_create
-
- def _cassandra_access_control_list_read(self, obj_uuids, field_names = None):
- # if field_names = None, all fields will be read/returned
-
- obj_uuid_cf = self._obj_uuid_cf
-
- # optimize for common case of reading non-backref, non-children fields
- # ignoring columns starting from 'b' and 'c' - significant performance
- # impact in scaled setting. e.g. read of project
- if (field_names is None or
- (set(field_names) & (AccessControlList.backref_fields | AccessControlList.children_fields))):
- # atleast one backref/children field is needed
- obj_rows = obj_uuid_cf.multiget(obj_uuids,
- column_count = 10000000,
- include_timestamp = True)
- else: # ignore reading backref + children columns
- obj_rows = obj_uuid_cf.multiget(obj_uuids,
- column_start = 'd',
- column_count = 10000000,
- include_timestamp = True)
-
- if (len(obj_uuids) == 1) and not obj_rows:
- raise cfgm_common.exceptions.NoIdError(obj_uuids[0])
-
- results = []
- for row_key in obj_rows:
- # give chance for zk heartbeat/ping
- gevent.sleep(0)
- obj_uuid = row_key
- obj_cols = obj_rows[obj_uuid]
- result = {}
- result['uuid'] = obj_uuid
- result['fq_name'] = json.loads(obj_cols['fq_name'][0])
- for col_name in obj_cols.keys():
- if self._re_match_parent.match(col_name):
- # non config-root child
- (_, _, parent_uuid) = col_name.split(':')
- parent_type = json.loads(obj_cols['parent_type'][0])
- result['parent_type'] = parent_type
- try:
- result['parent_uuid'] = parent_uuid
- result['parent_href'] = self._generate_url(parent_type, parent_uuid)
- except cfgm_common.exceptions.NoIdError:
- err_msg = 'Unknown uuid for parent ' + result['fq_name'][-2]
- return (False, err_msg)
-
- # TODO use compiled RE
- if self._re_match_prop.match(col_name):
- (_, prop_name) = col_name.split(':')
- result[prop_name] = json.loads(obj_cols[col_name][0])
-
- # TODO use compiled RE
- if self._re_match_children.match(col_name):
- (_, child_type, child_uuid) = col_name.split(':')
- if field_names and '%ss' %(child_type) not in field_names:
- continue
-
- child_tstamp = obj_cols[col_name][1]
- try:
- self._read_child(result, obj_uuid, child_type, child_uuid, child_tstamp)
- except cfgm_common.exceptions.NoIdError:
- continue
-
- # TODO use compiled RE
- if self._re_match_ref.match(col_name):
- (_, ref_type, ref_uuid) = col_name.split(':')
- self._read_ref(result, obj_uuid, ref_type, ref_uuid, obj_cols[col_name][0])
-
- if self._re_match_backref.match(col_name):
- (_, back_ref_type, back_ref_uuid) = col_name.split(':')
- if field_names and '%s_back_refs' %(back_ref_type) not in field_names:
- continue
-
- try:
- self._read_back_ref(result, obj_uuid, back_ref_type, back_ref_uuid,
- obj_cols[col_name][0])
- except cfgm_common.exceptions.NoIdError:
- continue
-
- # for all column names
-
-
- results.append(result)
- # end for all rows
-
- return (True, results)
- #end _cassandra_access_control_list_read
-
- def _cassandra_access_control_list_count_children(self, obj_uuid, child_type):
- # if child_type = None, return
- if child_type is None:
- return (False, '')
-
- obj_uuid_cf = self._obj_uuid_cf
- if child_type not in AccessControlList.children_fields:
- return (False, '%s is not a valid children of AccessControlList' %(child_type))
-
- col_start = 'children:'+child_type[:-1]+':'
- col_finish = 'children:'+child_type[:-1]+';'
- num_children = obj_uuid_cf.get_count(obj_uuid,
- column_start = col_start,
- column_finish = col_finish,
- max_count = 10000000)
- return (True, num_children)
- #end _cassandra_access_control_list_count_children
-
- def _cassandra_access_control_list_update(self, obj_uuid, new_obj_dict):
- # Grab ref-uuids and properties in new version
- new_ref_infos = {}
-
- new_props = {}
- if 'access_control_list_entries' in new_obj_dict:
- new_props['access_control_list_entries'] = new_obj_dict['access_control_list_entries']
- if 'id_perms' in new_obj_dict:
- new_props['id_perms'] = new_obj_dict['id_perms']
- if 'display_name' in new_obj_dict:
- new_props['display_name'] = new_obj_dict['display_name']
- # Gather column values for obj and updates to backrefs
- # in a batch and write it at the end
- obj_uuid_cf = self._obj_uuid_cf
- obj_cols_iter = obj_uuid_cf.xget(obj_uuid)
- # TODO optimize this (converts tuple to dict)
- obj_cols = {}
- for col_info in obj_cols_iter:
- obj_cols[col_info[0]] = col_info[1]
-
- bch = obj_uuid_cf.batch()
- for col_name in obj_cols.keys():
- # TODO use compiled RE
- if re.match('prop:', col_name):
- (_, prop_name) = col_name.split(':')
- if prop_name == 'id_perms':
- # id-perms always has to be updated for last-mod timestamp
- # get it from request dict(or from db if not in request dict)
- new_id_perms = new_obj_dict.get(prop_name, json.loads(obj_cols[col_name]))
- self.update_last_modified(bch, obj_uuid, new_id_perms)
- elif prop_name in new_obj_dict:
- self._update_prop(bch, obj_uuid, prop_name, new_props)
-
- # TODO use compiled RE
- if re.match('ref:', col_name):
- (_, ref_type, ref_uuid) = col_name.split(':')
- self._update_ref(bch, 'access_control_list', obj_uuid, ref_type, ref_uuid, new_ref_infos)
- # for all column names
-
- # create new refs
- for ref_type in new_ref_infos.keys():
- for ref_uuid in new_ref_infos[ref_type].keys():
- ref_data = new_ref_infos[ref_type][ref_uuid]
- self._create_ref(bch, 'access_control_list', obj_uuid, ref_type, ref_uuid, ref_data)
-
- # create new props
- for prop_name in new_props.keys():
- self._create_prop(bch, obj_uuid, prop_name, new_props[prop_name])
-
- bch.send()
-
- return (True, '')
- #end _cassandra_access_control_list_update
-
- def _cassandra_access_control_list_list(self, parent_uuids=None, back_ref_uuids=None,
- obj_uuids=None, count=False, filters=None):
- children_fq_names_uuids = []
- if filters:
- fnames = filters.get('field_names', [])
- fvalues = filters.get('field_values', [])
- filter_fields = [(fnames[i], fvalues[i]) for i in range(len(fnames))]
- else:
- filter_fields = []
-
- def filter_rows(coll_infos, filter_cols, filter_params):
- filt_infos = {}
- coll_rows = obj_uuid_cf.multiget(coll_infos.keys(),
- columns=filter_cols,
- column_count=10000000)
- for row in coll_rows:
- # give chance for zk heartbeat/ping
- gevent.sleep(0)
- full_match = True
- for fname, fval in filter_params:
- if coll_rows[row]['prop:%s' %(fname)] != fval:
- full_match = False
- break
- if full_match:
- filt_infos[row] = coll_infos[row]
- return filt_infos
- # end filter_rows
-
- def get_fq_name_uuid_list(obj_uuids):
- ret_list = []
- for obj_uuid in obj_uuids:
- try:
- obj_fq_name = self.uuid_to_fq_name(obj_uuid)
- ret_list.append((obj_fq_name, obj_uuid))
- except cfgm_common.exceptions.NoIdError:
- pass
- return ret_list
- # end get_fq_name_uuid_list
-
- if parent_uuids:
- # go from parent to child
- obj_uuid_cf = self._obj_uuid_cf
- col_start = 'children:access_control_list:'
- col_fin = 'children:access_control_list;'
- try:
- obj_rows = obj_uuid_cf.multiget(parent_uuids,
- column_start = col_start,
- column_finish = col_fin,
- column_count = 10000000,
- include_timestamp = True)
- except pycassa.NotFoundException:
- if count:
- return (True, 0)
- else:
- return (True, children_fq_names_uuids)
-
- def filter_rows_parent_anchor(sort=False):
- # flatten to [('children:<type>:<uuid>', (<val>,<ts>), *]
- all_cols = [cols for obj_key in obj_rows.keys() for cols in obj_rows[obj_key].items()]
- all_child_infos = {}
- for col_name, col_val_ts in all_cols:
- # give chance for zk heartbeat/ping
- gevent.sleep(0)
- child_uuid = col_name.split(':')[2]
- if obj_uuids and child_uuid not in obj_uuids:
- continue
- all_child_infos[child_uuid] = {'uuid': child_uuid, 'tstamp': col_val_ts[1]}
-
- filter_cols = ['prop:%s' %(fname) for fname, _ in filter_fields]
- if filter_cols:
- filt_child_infos = filter_rows(all_child_infos, filter_cols, filter_fields)
- else: # no filter specified
- filt_child_infos = all_child_infos
-
- if not sort:
- ret_child_infos = filt_child_infos.values()
- else:
- ret_child_infos = sorted(filt_child_infos.values(), key=itemgetter('tstamp'))
-
- return get_fq_name_uuid_list(r['uuid'] for r in ret_child_infos)
- # end filter_rows_parent_anchor
-
- if count:
- return (True, len(filter_rows_parent_anchor()))
-
- children_fq_names_uuids = filter_rows_parent_anchor(sort=True)
-
- if back_ref_uuids:
- # go from anchor to backrefs
- obj_uuid_cf = self._obj_uuid_cf
- col_start = 'backref:access_control_list:'
- col_fin = 'backref:access_control_list;'
- try:
- obj_rows = obj_uuid_cf.multiget(back_ref_uuids,
- column_start = col_start,
- column_finish = col_fin,
- column_count = 10000000,
- include_timestamp = True)
- except pycassa.NotFoundException:
- if count:
- return (True, 0)
- else:
- return (True, children_fq_names_uuids)
-
- def filter_rows_backref_anchor():
- # flatten to [('<fqnstr>:<uuid>', (<val>,<ts>), *]
- all_cols = [cols for obj_key in obj_rows.keys() for cols in obj_rows[obj_key].items()]
- all_backref_infos = {}
- for col_name, col_val_ts in all_cols:
- # give chance for zk heartbeat/ping
- gevent.sleep(0)
- col_name_arr = col_name.split(':')
- fq_name = col_name_arr[:-1]
- obj_uuid = col_name_arr[-1]
- if obj_uuids and obj_uuid not in obj_uuids:
- continue
- all_backref_infos[obj_uuid] = \
- {'uuid': obj_uuid, 'fq_name': fq_name, 'tstamp': col_val_ts[1]}
-
- filter_cols = ['prop:%s' %(fname) for fname, _ in filter_fields]
- if filter_cols:
- filt_backref_infos = filter_rows(all_backref_infos, filter_cols, filter_fields)
- else: # no filter specified
- filt_backref_infos = all_backref_infos
-
- return [(br_info['fq_name'], br_info['uuid']) for br_info in filt_backref_infos.values()]
- # end filter_rows_backref_anchor
-
- if count:
- return (True, len(filter_rows_backref_anchor()))
-
- children_fq_names_uuids = filter_rows_backref_anchor()
-
- if not parent_uuids and not back_ref_uuids:
- obj_uuid_cf = self._obj_uuid_cf
- if obj_uuids:
- # exact objects specified
- def filter_rows_object_list():
- all_obj_infos = {}
- for obj_uuid in obj_uuids:
- all_obj_infos[obj_uuid] = None
-
- filter_cols = ['prop:%s' %(fname) for fname, _ in filter_fields]
- if filter_cols:
- filt_obj_infos = filter_rows(all_obj_infos, filter_cols, filter_fields)
- else: # no filters specified
- filt_obj_infos = all_obj_infos
-
- return get_fq_name_uuid_list(filt_obj_infos.keys())
- # end filter_rows_object_list
-
- if count:
- return (True, len(filter_rows_object_list()))
- children_fq_names_uuids = filter_rows_object_list()
-
- else: # grab all resources of this type
- obj_fq_name_cf = self._obj_fq_name_cf
- try:
- cols = obj_fq_name_cf.get('access_control_list', column_count = 10000000)
- except pycassa.NotFoundException:
- if count:
- return (True, 0)
- else:
- return (True, children_fq_names_uuids)
-
- def filter_rows_no_anchor():
- all_obj_infos = {}
- for col_name, col_val in cols.items():
- # give chance for zk heartbeat/ping
- gevent.sleep(0)
- col_name_arr = utils.decode_string(col_name).split(':')
- obj_uuid = col_name_arr[-1]
- all_obj_infos[obj_uuid] = (col_name_arr[:-1], obj_uuid)
-
- filter_cols = ['prop:%s' %(fname) for fname, _ in filter_fields]
- if filter_cols:
- filt_obj_infos = filter_rows(all_obj_infos, filter_cols, filter_fields)
- else: # no filters specified
- filt_obj_infos = all_obj_infos
-
- return filt_obj_infos.values()
- # end filter_rows_no_anchor
-
- if count:
- return (True, len(filter_rows_no_anchor()))
-
- children_fq_names_uuids = filter_rows_no_anchor()
-
- return (True, children_fq_names_uuids)
- #end _cassandra_access_control_list_list
-
- def _cassandra_access_control_list_delete(self, obj_uuid):
- obj_uuid_cf = self._obj_uuid_cf
- fq_name = json.loads(obj_uuid_cf.get(obj_uuid, columns = ['fq_name'])['fq_name'])
- bch = obj_uuid_cf.batch()
-
- # unlink from parent
- col_start = 'parent:'
- col_fin = 'parent;'
- col_name_iter = obj_uuid_cf.xget(obj_uuid, column_start = col_start, column_finish = col_fin)
- for (col_name, col_val) in col_name_iter:
- (_, parent_type, parent_uuid) = col_name.split(':')
- self._delete_child(bch, parent_type, parent_uuid, 'access_control_list', obj_uuid)
-
- # remove refs
- col_start = 'ref:'
- col_fin = 'ref;'
- col_name_iter = obj_uuid_cf.xget(obj_uuid, column_start = col_start, column_finish = col_fin)
- for (col_name, col_val) in col_name_iter:
- (_, ref_type, ref_uuid) = col_name.split(':')
- self._delete_ref(bch, 'access_control_list', obj_uuid, ref_type, ref_uuid)
-
- bch.remove(obj_uuid)
- bch.send()
-
- # Update fqname table
- fq_name_str = ':'.join(fq_name)
- fq_name_col = utils.encode_string(fq_name_str) + ':' + obj_uuid
- self._obj_fq_name_cf.remove('access_control_list', columns = [fq_name_col])
-
-
- return (True, '')
- #end _cassandra_access_control_list_delete
-
- def _cassandra_analytics_node_alloc(self, fq_name):
- return (True, '')
- #end _cassandra_analytics_node_alloc
-
- def _cassandra_analytics_node_create(self, obj_ids, obj_dict):
- # Gather column values for obj and updates to backrefs
- # in a batch and write it at the end
- bch = self._obj_uuid_cf.batch()
-
- obj_cols = {}
- obj_cols['fq_name'] = json.dumps(obj_dict['fq_name'])
- obj_cols['type'] = json.dumps('analytics_node')
- if 'parent_type' in obj_dict:
- # non config-root child
- parent_type = obj_dict['parent_type']
- parent_method_type = parent_type.replace('-', '_')
- parent_fq_name = obj_dict['fq_name'][:-1]
- obj_cols['parent_type'] = json.dumps(parent_type)
- parent_uuid = self.fq_name_to_uuid(parent_method_type, parent_fq_name)
- self._create_child(bch, parent_method_type, parent_uuid, 'analytics_node', obj_ids['uuid'])
-
- # Properties
- field = obj_dict.get('analytics_node_ip_address', None)
- if field is not None:
- self._create_prop(bch, obj_ids['uuid'], 'analytics_node_ip_address', field)
-
- field = obj_dict.get('id_perms', None)
- if field is not None:
- field['created'] = datetime.datetime.utcnow().isoformat()
- field['last_modified'] = field['created']
- self._create_prop(bch, obj_ids['uuid'], 'id_perms', field)
-
- field = obj_dict.get('display_name', None)
- if field is not None:
- self._create_prop(bch, obj_ids['uuid'], 'display_name', field)
-
-
- # References
-
- bch.insert(obj_ids['uuid'], obj_cols)
- bch.send()
-
- # Update fqname table
- fq_name_str = ':'.join(obj_dict['fq_name'])
- fq_name_cols = {utils.encode_string(fq_name_str) + ':' + obj_ids['uuid']: json.dumps(None)}
- self._obj_fq_name_cf.insert('analytics_node', fq_name_cols)
-
- return (True, '')
- #end _cassandra_analytics_node_create
-
- def _cassandra_analytics_node_read(self, obj_uuids, field_names = None):
- # if field_names = None, all fields will be read/returned
-
- obj_uuid_cf = self._obj_uuid_cf
-
- # optimize for common case of reading non-backref, non-children fields
- # ignoring columns starting from 'b' and 'c' - significant performance
- # impact in scaled setting. e.g. read of project
- if (field_names is None or
- (set(field_names) & (AnalyticsNode.backref_fields | AnalyticsNode.children_fields))):
- # atleast one backref/children field is needed
- obj_rows = obj_uuid_cf.multiget(obj_uuids,
- column_count = 10000000,
- include_timestamp = True)
- else: # ignore reading backref + children columns
- obj_rows = obj_uuid_cf.multiget(obj_uuids,
- column_start = 'd',
- column_count = 10000000,
- include_timestamp = True)
-
- if (len(obj_uuids) == 1) and not obj_rows:
- raise cfgm_common.exceptions.NoIdError(obj_uuids[0])
-
- results = []
- for row_key in obj_rows:
- # give chance for zk heartbeat/ping
- gevent.sleep(0)
- obj_uuid = row_key
- obj_cols = obj_rows[obj_uuid]
- result = {}
- result['uuid'] = obj_uuid
- result['fq_name'] = json.loads(obj_cols['fq_name'][0])
- for col_name in obj_cols.keys():
- if self._re_match_parent.match(col_name):
- # non config-root child
- (_, _, parent_uuid) = col_name.split(':')
- parent_type = json.loads(obj_cols['parent_type'][0])
- result['parent_type'] = parent_type
- try:
- result['parent_uuid'] = parent_uuid
- result['parent_href'] = self._generate_url(parent_type, parent_uuid)
- except cfgm_common.exceptions.NoIdError:
- err_msg = 'Unknown uuid for parent ' + result['fq_name'][-2]
- return (False, err_msg)
-
- # TODO use compiled RE
- if self._re_match_prop.match(col_name):
- (_, prop_name) = col_name.split(':')
- result[prop_name] = json.loads(obj_cols[col_name][0])
-
- # TODO use compiled RE
- if self._re_match_children.match(col_name):
- (_, child_type, child_uuid) = col_name.split(':')
- if field_names and '%ss' %(child_type) not in field_names:
- continue
-
- child_tstamp = obj_cols[col_name][1]
- try:
- self._read_child(result, obj_uuid, child_type, child_uuid, child_tstamp)
- except cfgm_common.exceptions.NoIdError:
- continue
-
- # TODO use compiled RE
- if self._re_match_ref.match(col_name):
- (_, ref_type, ref_uuid) = col_name.split(':')
- self._read_ref(result, obj_uuid, ref_type, ref_uuid, obj_cols[col_name][0])
-
- if self._re_match_backref.match(col_name):
- (_, back_ref_type, back_ref_uuid) = col_name.split(':')
- if field_names and '%s_back_refs' %(back_ref_type) not in field_names:
- continue
-
- try:
- self._read_back_ref(result, obj_uuid, back_ref_type, back_ref_uuid,
- obj_cols[col_name][0])
- except cfgm_common.exceptions.NoIdError:
- continue
-
- # for all column names
-
-
- results.append(result)
- # end for all rows
-
- return (True, results)
- #end _cassandra_analytics_node_read
-
- def _cassandra_analytics_node_count_children(self, obj_uuid, child_type):
- # if child_type = None, return
- if child_type is None:
- return (False, '')
-
- obj_uuid_cf = self._obj_uuid_cf
- if child_type not in AnalyticsNode.children_fields:
- return (False, '%s is not a valid children of AnalyticsNode' %(child_type))
-
- col_start = 'children:'+child_type[:-1]+':'
- col_finish = 'children:'+child_type[:-1]+';'
- num_children = obj_uuid_cf.get_count(obj_uuid,
- column_start = col_start,
- column_finish = col_finish,
- max_count = 10000000)
- return (True, num_children)
- #end _cassandra_analytics_node_count_children
-
- def _cassandra_analytics_node_update(self, obj_uuid, new_obj_dict):
- # Grab ref-uuids and properties in new version
- new_ref_infos = {}
-
- new_props = {}
- if 'analytics_node_ip_address' in new_obj_dict:
- new_props['analytics_node_ip_address'] = new_obj_dict['analytics_node_ip_address']
- if 'id_perms' in new_obj_dict:
- new_props['id_perms'] = new_obj_dict['id_perms']
- if 'display_name' in new_obj_dict:
- new_props['display_name'] = new_obj_dict['display_name']
- # Gather column values for obj and updates to backrefs
- # in a batch and write it at the end
- obj_uuid_cf = self._obj_uuid_cf
- obj_cols_iter = obj_uuid_cf.xget(obj_uuid)
- # TODO optimize this (converts tuple to dict)
- obj_cols = {}
- for col_info in obj_cols_iter:
- obj_cols[col_info[0]] = col_info[1]
-
- bch = obj_uuid_cf.batch()
- for col_name in obj_cols.keys():
- # TODO use compiled RE
- if re.match('prop:', col_name):
- (_, prop_name) = col_name.split(':')
- if prop_name == 'id_perms':
- # id-perms always has to be updated for last-mod timestamp
- # get it from request dict(or from db if not in request dict)
- new_id_perms = new_obj_dict.get(prop_name, json.loads(obj_cols[col_name]))
- self.update_last_modified(bch, obj_uuid, new_id_perms)
- elif prop_name in new_obj_dict:
- self._update_prop(bch, obj_uuid, prop_name, new_props)
-
- # TODO use compiled RE
- if re.match('ref:', col_name):
- (_, ref_type, ref_uuid) = col_name.split(':')
- self._update_ref(bch, 'analytics_node', obj_uuid, ref_type, ref_uuid, new_ref_infos)
- # for all column names
-
- # create new refs
- for ref_type in new_ref_infos.keys():
- for ref_uuid in new_ref_infos[ref_type].keys():
- ref_data = new_ref_infos[ref_type][ref_uuid]
- self._create_ref(bch, 'analytics_node', obj_uuid, ref_type, ref_uuid, ref_data)
-
- # create new props
- for prop_name in new_props.keys():
- self._create_prop(bch, obj_uuid, prop_name, new_props[prop_name])
-
- bch.send()
-
- return (True, '')
- #end _cassandra_analytics_node_update
-
- def _cassandra_analytics_node_list(self, parent_uuids=None, back_ref_uuids=None,
- obj_uuids=None, count=False, filters=None):
- children_fq_names_uuids = []
- if filters:
- fnames = filters.get('field_names', [])
- fvalues = filters.get('field_values', [])
- filter_fields = [(fnames[i], fvalues[i]) for i in range(len(fnames))]
- else:
- filter_fields = []
-
- def filter_rows(coll_infos, filter_cols, filter_params):
- filt_infos = {}
- coll_rows = obj_uuid_cf.multiget(coll_infos.keys(),
- columns=filter_cols,
- column_count=10000000)
- for row in coll_rows:
- # give chance for zk heartbeat/ping
- gevent.sleep(0)
- full_match = True
- for fname, fval in filter_params:
- if coll_rows[row]['prop:%s' %(fname)] != fval:
- full_match = False
- break
- if full_match:
- filt_infos[row] = coll_infos[row]
- return filt_infos
- # end filter_rows
-
- def get_fq_name_uuid_list(obj_uuids):
- ret_list = []
- for obj_uuid in obj_uuids:
- try:
- obj_fq_name = self.uuid_to_fq_name(obj_uuid)
- ret_list.append((obj_fq_name, obj_uuid))
- except cfgm_common.exceptions.NoIdError:
- pass
- return ret_list
- # end get_fq_name_uuid_list
-
- if parent_uuids:
- # go from parent to child
- obj_uuid_cf = self._obj_uuid_cf
- col_start = 'children:analytics_node:'
- col_fin = 'children:analytics_node;'
- try:
- obj_rows = obj_uuid_cf.multiget(parent_uuids,
- column_start = col_start,
- column_finish = col_fin,
- column_count = 10000000,
- include_timestamp = True)
- except pycassa.NotFoundException:
- if count:
- return (True, 0)
- else:
- return (True, children_fq_names_uuids)
-
- def filter_rows_parent_anchor(sort=False):
- # flatten to [('children:<type>:<uuid>', (<val>,<ts>), *]
- all_cols = [cols for obj_key in obj_rows.keys() for cols in obj_rows[obj_key].items()]
- all_child_infos = {}
- for col_name, col_val_ts in all_cols:
- # give chance for zk heartbeat/ping
- gevent.sleep(0)
- child_uuid = col_name.split(':')[2]
- if obj_uuids and child_uuid not in obj_uuids:
- continue
- all_child_infos[child_uuid] = {'uuid': child_uuid, 'tstamp': col_val_ts[1]}
-
- filter_cols = ['prop:%s' %(fname) for fname, _ in filter_fields]
- if filter_cols:
- filt_child_infos = filter_rows(all_child_infos, filter_cols, filter_fields)
- else: # no filter specified
- filt_child_infos = all_child_infos
-
- if not sort:
- ret_child_infos = filt_child_infos.values()
- else:
- ret_child_infos = sorted(filt_child_infos.values(), key=itemgetter('tstamp'))
-
- return get_fq_name_uuid_list(r['uuid'] for r in ret_child_infos)
- # end filter_rows_parent_anchor
-
- if count:
- return (True, len(filter_rows_parent_anchor()))
-
- children_fq_names_uuids = filter_rows_parent_anchor(sort=True)
-
- if back_ref_uuids:
- # go from anchor to backrefs
- obj_uuid_cf = self._obj_uuid_cf
- col_start = 'backref:analytics_node:'
- col_fin = 'backref:analytics_node;'
- try:
- obj_rows = obj_uuid_cf.multiget(back_ref_uuids,
- column_start = col_start,
- column_finish = col_fin,
- column_count = 10000000,
- include_timestamp = True)
- except pycassa.NotFoundException:
- if count:
- return (True, 0)
- else:
- return (True, children_fq_names_uuids)
-
- def filter_rows_backref_anchor():
- # flatten to [('<fqnstr>:<uuid>', (<val>,<ts>), *]
- all_cols = [cols for obj_key in obj_rows.keys() for cols in obj_rows[obj_key].items()]
- all_backref_infos = {}
- for col_name, col_val_ts in all_cols:
- # give chance for zk heartbeat/ping
- gevent.sleep(0)
- col_name_arr = col_name.split(':')
- fq_name = col_name_arr[:-1]
- obj_uuid = col_name_arr[-1]
- if obj_uuids and obj_uuid not in obj_uuids:
- continue
- all_backref_infos[obj_uuid] = \
- {'uuid': obj_uuid, 'fq_name': fq_name, 'tstamp': col_val_ts[1]}
-
- filter_cols = ['prop:%s' %(fname) for fname, _ in filter_fields]
- if filter_cols:
- filt_backref_infos = filter_rows(all_backref_infos, filter_cols, filter_fields)
- else: # no filter specified
- filt_backref_infos = all_backref_infos
-
- return [(br_info['fq_name'], br_info['uuid']) for br_info in filt_backref_infos.values()]
- # end filter_rows_backref_anchor
-
- if count:
- return (True, len(filter_rows_backref_anchor()))
-
- children_fq_names_uuids = filter_rows_backref_anchor()
-
- if not parent_uuids and not back_ref_uuids:
- obj_uuid_cf = self._obj_uuid_cf
- if obj_uuids:
- # exact objects specified
- def filter_rows_object_list():
- all_obj_infos = {}
- for obj_uuid in obj_uuids:
- all_obj_infos[obj_uuid] = None
-
- filter_cols = ['prop:%s' %(fname) for fname, _ in filter_fields]
- if filter_cols:
- filt_obj_infos = filter_rows(all_obj_infos, filter_cols, filter_fields)
- else: # no filters specified
- filt_obj_infos = all_obj_infos
-
- return get_fq_name_uuid_list(filt_obj_infos.keys())
- # end filter_rows_object_list
-
- if count:
- return (True, len(filter_rows_object_list()))
- children_fq_names_uuids = filter_rows_object_list()
-
- else: # grab all resources of this type
- obj_fq_name_cf = self._obj_fq_name_cf
- try:
- cols = obj_fq_name_cf.get('analytics_node', column_count = 10000000)
- except pycassa.NotFoundException:
- if count:
- return (True, 0)
- else:
- return (True, children_fq_names_uuids)
-
- def filter_rows_no_anchor():
- all_obj_infos = {}
- for col_name, col_val in cols.items():
- # give chance for zk heartbeat/ping
- gevent.sleep(0)
- col_name_arr = utils.decode_string(col_name).split(':')
- obj_uuid = col_name_arr[-1]
- all_obj_infos[obj_uuid] = (col_name_arr[:-1], obj_uuid)
-
- filter_cols = ['prop:%s' %(fname) for fname, _ in filter_fields]
- if filter_cols:
- filt_obj_infos = filter_rows(all_obj_infos, filter_cols, filter_fields)
- else: # no filters specified
- filt_obj_infos = all_obj_infos
-
- return filt_obj_infos.values()
- # end filter_rows_no_anchor
-
- if count:
- return (True, len(filter_rows_no_anchor()))
-
- children_fq_names_uuids = filter_rows_no_anchor()
-
- return (True, children_fq_names_uuids)
- #end _cassandra_analytics_node_list
-
- def _cassandra_analytics_node_delete(self, obj_uuid):
- obj_uuid_cf = self._obj_uuid_cf
- fq_name = json.loads(obj_uuid_cf.get(obj_uuid, columns = ['fq_name'])['fq_name'])
- bch = obj_uuid_cf.batch()
-
- # unlink from parent
- col_start = 'parent:'
- col_fin = 'parent;'
- col_name_iter = obj_uuid_cf.xget(obj_uuid, column_start = col_start, column_finish = col_fin)
- for (col_name, col_val) in col_name_iter:
- (_, parent_type, parent_uuid) = col_name.split(':')
- self._delete_child(bch, parent_type, parent_uuid, 'analytics_node', obj_uuid)
-
- # remove refs
- col_start = 'ref:'
- col_fin = 'ref;'
- col_name_iter = obj_uuid_cf.xget(obj_uuid, column_start = col_start, column_finish = col_fin)
- for (col_name, col_val) in col_name_iter:
- (_, ref_type, ref_uuid) = col_name.split(':')
- self._delete_ref(bch, 'analytics_node', obj_uuid, ref_type, ref_uuid)
-
- bch.remove(obj_uuid)
- bch.send()
-
- # Update fqname table
- fq_name_str = ':'.join(fq_name)
- fq_name_col = utils.encode_string(fq_name_str) + ':' + obj_uuid
- self._obj_fq_name_cf.remove('analytics_node', columns = [fq_name_col])
-
-
- return (True, '')
- #end _cassandra_analytics_node_delete
-
- def _cassandra_virtual_DNS_alloc(self, fq_name):
- return (True, '')
- #end _cassandra_virtual_DNS_alloc
-
- def _cassandra_virtual_DNS_create(self, obj_ids, obj_dict):
- # Gather column values for obj and updates to backrefs
- # in a batch and write it at the end
- bch = self._obj_uuid_cf.batch()
-
- obj_cols = {}
- obj_cols['fq_name'] = json.dumps(obj_dict['fq_name'])
- obj_cols['type'] = json.dumps('virtual_DNS')
- if 'parent_type' in obj_dict:
- # non config-root child
- parent_type = obj_dict['parent_type']
- parent_method_type = parent_type.replace('-', '_')
- parent_fq_name = obj_dict['fq_name'][:-1]
- obj_cols['parent_type'] = json.dumps(parent_type)
- parent_uuid = self.fq_name_to_uuid(parent_method_type, parent_fq_name)
- self._create_child(bch, parent_method_type, parent_uuid, 'virtual_DNS', obj_ids['uuid'])
-
- # Properties
- field = obj_dict.get('virtual_DNS_data', None)
- if field is not None:
- self._create_prop(bch, obj_ids['uuid'], 'virtual_DNS_data', field)
-
- field = obj_dict.get('id_perms', None)
- if field is not None:
- field['created'] = datetime.datetime.utcnow().isoformat()
- field['last_modified'] = field['created']
- self._create_prop(bch, obj_ids['uuid'], 'id_perms', field)
-
- field = obj_dict.get('display_name', None)
- if field is not None:
- self._create_prop(bch, obj_ids['uuid'], 'display_name', field)
-
-
- # References
-
- bch.insert(obj_ids['uuid'], obj_cols)
- bch.send()
-
- # Update fqname table
- fq_name_str = ':'.join(obj_dict['fq_name'])
- fq_name_cols = {utils.encode_string(fq_name_str) + ':' + obj_ids['uuid']: json.dumps(None)}
- self._obj_fq_name_cf.insert('virtual_DNS', fq_name_cols)
-
- return (True, '')
- #end _cassandra_virtual_DNS_create
-
- def _cassandra_virtual_DNS_read(self, obj_uuids, field_names = None):
- # if field_names = None, all fields will be read/returned
-
- obj_uuid_cf = self._obj_uuid_cf
-
- # optimize for common case of reading non-backref, non-children fields
- # ignoring columns starting from 'b' and 'c' - significant performance
- # impact in scaled setting. e.g. read of project
- if (field_names is None or
- (set(field_names) & (VirtualDns.backref_fields | VirtualDns.children_fields))):
- # atleast one backref/children field is needed
- obj_rows = obj_uuid_cf.multiget(obj_uuids,
- column_count = 10000000,
- include_timestamp = True)
- else: # ignore reading backref + children columns
- obj_rows = obj_uuid_cf.multiget(obj_uuids,
- column_start = 'd',
- column_count = 10000000,
- include_timestamp = True)
-
- if (len(obj_uuids) == 1) and not obj_rows:
- raise cfgm_common.exceptions.NoIdError(obj_uuids[0])
-
- results = []
- for row_key in obj_rows:
- # give chance for zk heartbeat/ping
- gevent.sleep(0)
- obj_uuid = row_key
- obj_cols = obj_rows[obj_uuid]
- result = {}
- result['uuid'] = obj_uuid
- result['fq_name'] = json.loads(obj_cols['fq_name'][0])
- for col_name in obj_cols.keys():
- if self._re_match_parent.match(col_name):
- # non config-root child
- (_, _, parent_uuid) = col_name.split(':')
- parent_type = json.loads(obj_cols['parent_type'][0])
- result['parent_type'] = parent_type
- try:
- result['parent_uuid'] = parent_uuid
- result['parent_href'] = self._generate_url(parent_type, parent_uuid)
- except cfgm_common.exceptions.NoIdError:
- err_msg = 'Unknown uuid for parent ' + result['fq_name'][-2]
- return (False, err_msg)
-
- # TODO use compiled RE
- if self._re_match_prop.match(col_name):
- (_, prop_name) = col_name.split(':')
- result[prop_name] = json.loads(obj_cols[col_name][0])
-
- # TODO use compiled RE
- if self._re_match_children.match(col_name):
- (_, child_type, child_uuid) = col_name.split(':')
- if field_names and '%ss' %(child_type) not in field_names:
- continue
-
- child_tstamp = obj_cols[col_name][1]
- try:
- self._read_child(result, obj_uuid, child_type, child_uuid, child_tstamp)
- except cfgm_common.exceptions.NoIdError:
- continue
-
- # TODO use compiled RE
- if self._re_match_ref.match(col_name):
- (_, ref_type, ref_uuid) = col_name.split(':')
- self._read_ref(result, obj_uuid, ref_type, ref_uuid, obj_cols[col_name][0])
-
- if self._re_match_backref.match(col_name):
- (_, back_ref_type, back_ref_uuid) = col_name.split(':')
- if field_names and '%s_back_refs' %(back_ref_type) not in field_names:
- continue
-
- try:
- self._read_back_ref(result, obj_uuid, back_ref_type, back_ref_uuid,
- obj_cols[col_name][0])
- except cfgm_common.exceptions.NoIdError:
- continue
-
- # for all column names
-
- if 'virtual_DNS_records' in result:
- # sort children; TODO do this based on schema
- sorted_children = sorted(result['virtual_DNS_records'], key = itemgetter('tstamp'))
- # re-write result's children without timestamp
- result['virtual_DNS_records'] = sorted_children
- [child.pop('tstamp') for child in result['virtual_DNS_records']]
-
- results.append(result)
- # end for all rows
-
- return (True, results)
- #end _cassandra_virtual_DNS_read
-
- def _cassandra_virtual_DNS_count_children(self, obj_uuid, child_type):
- # if child_type = None, return
- if child_type is None:
- return (False, '')
-
- obj_uuid_cf = self._obj_uuid_cf
- if child_type not in VirtualDns.children_fields:
- return (False, '%s is not a valid children of VirtualDns' %(child_type))
-
- col_start = 'children:'+child_type[:-1]+':'
- col_finish = 'children:'+child_type[:-1]+';'
- num_children = obj_uuid_cf.get_count(obj_uuid,
- column_start = col_start,
- column_finish = col_finish,
- max_count = 10000000)
- return (True, num_children)
- #end _cassandra_virtual_DNS_count_children
-
- def _cassandra_virtual_DNS_update(self, obj_uuid, new_obj_dict):
- # Grab ref-uuids and properties in new version
- new_ref_infos = {}
-
- new_props = {}
- if 'virtual_DNS_data' in new_obj_dict:
- new_props['virtual_DNS_data'] = new_obj_dict['virtual_DNS_data']
- if 'id_perms' in new_obj_dict:
- new_props['id_perms'] = new_obj_dict['id_perms']
- if 'display_name' in new_obj_dict:
- new_props['display_name'] = new_obj_dict['display_name']
- # Gather column values for obj and updates to backrefs
- # in a batch and write it at the end
- obj_uuid_cf = self._obj_uuid_cf
- obj_cols_iter = obj_uuid_cf.xget(obj_uuid)
- # TODO optimize this (converts tuple to dict)
- obj_cols = {}
- for col_info in obj_cols_iter:
- obj_cols[col_info[0]] = col_info[1]
-
- bch = obj_uuid_cf.batch()
- for col_name in obj_cols.keys():
- # TODO use compiled RE
- if re.match('prop:', col_name):
- (_, prop_name) = col_name.split(':')
- if prop_name == 'id_perms':
- # id-perms always has to be updated for last-mod timestamp
- # get it from request dict(or from db if not in request dict)
- new_id_perms = new_obj_dict.get(prop_name, json.loads(obj_cols[col_name]))
- self.update_last_modified(bch, obj_uuid, new_id_perms)
- elif prop_name in new_obj_dict:
- self._update_prop(bch, obj_uuid, prop_name, new_props)
-
- # TODO use compiled RE
- if re.match('ref:', col_name):
- (_, ref_type, ref_uuid) = col_name.split(':')
- self._update_ref(bch, 'virtual_DNS', obj_uuid, ref_type, ref_uuid, new_ref_infos)
- # for all column names
-
- # create new refs
- for ref_type in new_ref_infos.keys():
- for ref_uuid in new_ref_infos[ref_type].keys():
- ref_data = new_ref_infos[ref_type][ref_uuid]
- self._create_ref(bch, 'virtual_DNS', obj_uuid, ref_type, ref_uuid, ref_data)
-
- # create new props
- for prop_name in new_props.keys():
- self._create_prop(bch, obj_uuid, prop_name, new_props[prop_name])
-
- bch.send()
-
- return (True, '')
- #end _cassandra_virtual_DNS_update
-
- def _cassandra_virtual_DNS_list(self, parent_uuids=None, back_ref_uuids=None,
- obj_uuids=None, count=False, filters=None):
- children_fq_names_uuids = []
- if filters:
- fnames = filters.get('field_names', [])
- fvalues = filters.get('field_values', [])
- filter_fields = [(fnames[i], fvalues[i]) for i in range(len(fnames))]
- else:
- filter_fields = []
-
- def filter_rows(coll_infos, filter_cols, filter_params):
- filt_infos = {}
- coll_rows = obj_uuid_cf.multiget(coll_infos.keys(),
- columns=filter_cols,
- column_count=10000000)
- for row in coll_rows:
- # give chance for zk heartbeat/ping
- gevent.sleep(0)
- full_match = True
- for fname, fval in filter_params:
- if coll_rows[row]['prop:%s' %(fname)] != fval:
- full_match = False
- break
- if full_match:
- filt_infos[row] = coll_infos[row]
- return filt_infos
- # end filter_rows
-
- def get_fq_name_uuid_list(obj_uuids):
- ret_list = []
- for obj_uuid in obj_uuids:
- try:
- obj_fq_name = self.uuid_to_fq_name(obj_uuid)
- ret_list.append((obj_fq_name, obj_uuid))
- except cfgm_common.exceptions.NoIdError:
- pass
- return ret_list
- # end get_fq_name_uuid_list
-
- if parent_uuids:
- # go from parent to child
- obj_uuid_cf = self._obj_uuid_cf
- col_start = 'children:virtual_DNS:'
- col_fin = 'children:virtual_DNS;'
- try:
- obj_rows = obj_uuid_cf.multiget(parent_uuids,
- column_start = col_start,
- column_finish = col_fin,
- column_count = 10000000,
- include_timestamp = True)
- except pycassa.NotFoundException:
- if count:
- return (True, 0)
- else:
- return (True, children_fq_names_uuids)
-
- def filter_rows_parent_anchor(sort=False):
- # flatten to [('children:<type>:<uuid>', (<val>,<ts>), *]
- all_cols = [cols for obj_key in obj_rows.keys() for cols in obj_rows[obj_key].items()]
- all_child_infos = {}
- for col_name, col_val_ts in all_cols:
- # give chance for zk heartbeat/ping
- gevent.sleep(0)
- child_uuid = col_name.split(':')[2]
- if obj_uuids and child_uuid not in obj_uuids:
- continue
- all_child_infos[child_uuid] = {'uuid': child_uuid, 'tstamp': col_val_ts[1]}
-
- filter_cols = ['prop:%s' %(fname) for fname, _ in filter_fields]
- if filter_cols:
- filt_child_infos = filter_rows(all_child_infos, filter_cols, filter_fields)
- else: # no filter specified
- filt_child_infos = all_child_infos
-
- if not sort:
- ret_child_infos = filt_child_infos.values()
- else:
- ret_child_infos = sorted(filt_child_infos.values(), key=itemgetter('tstamp'))
-
- return get_fq_name_uuid_list(r['uuid'] for r in ret_child_infos)
- # end filter_rows_parent_anchor
-
- if count:
- return (True, len(filter_rows_parent_anchor()))
-
- children_fq_names_uuids = filter_rows_parent_anchor(sort=True)
-
- if back_ref_uuids:
- # go from anchor to backrefs
- obj_uuid_cf = self._obj_uuid_cf
- col_start = 'backref:virtual_DNS:'
- col_fin = 'backref:virtual_DNS;'
- try:
- obj_rows = obj_uuid_cf.multiget(back_ref_uuids,
- column_start = col_start,
- column_finish = col_fin,
- column_count = 10000000,
- include_timestamp = True)
- except pycassa.NotFoundException:
- if count:
- return (True, 0)
- else:
- return (True, children_fq_names_uuids)
-
- def filter_rows_backref_anchor():
- # flatten to [('<fqnstr>:<uuid>', (<val>,<ts>), *]
- all_cols = [cols for obj_key in obj_rows.keys() for cols in obj_rows[obj_key].items()]
- all_backref_infos = {}
- for col_name, col_val_ts in all_cols:
- # give chance for zk heartbeat/ping
- gevent.sleep(0)
- col_name_arr = col_name.split(':')
- fq_name = col_name_arr[:-1]
- obj_uuid = col_name_arr[-1]
- if obj_uuids and obj_uuid not in obj_uuids:
- continue
- all_backref_infos[obj_uuid] = \
- {'uuid': obj_uuid, 'fq_name': fq_name, 'tstamp': col_val_ts[1]}
-
- filter_cols = ['prop:%s' %(fname) for fname, _ in filter_fields]
- if filter_cols:
- filt_backref_infos = filter_rows(all_backref_infos, filter_cols, filter_fields)
- else: # no filter specified
- filt_backref_infos = all_backref_infos
-
- return [(br_info['fq_name'], br_info['uuid']) for br_info in filt_backref_infos.values()]
- # end filter_rows_backref_anchor
-
- if count:
- return (True, len(filter_rows_backref_anchor()))
-
- children_fq_names_uuids = filter_rows_backref_anchor()
-
- if not parent_uuids and not back_ref_uuids:
- obj_uuid_cf = self._obj_uuid_cf
- if obj_uuids:
- # exact objects specified
- def filter_rows_object_list():
- all_obj_infos = {}
- for obj_uuid in obj_uuids:
- all_obj_infos[obj_uuid] = None
-
- filter_cols = ['prop:%s' %(fname) for fname, _ in filter_fields]
- if filter_cols:
- filt_obj_infos = filter_rows(all_obj_infos, filter_cols, filter_fields)
- else: # no filters specified
- filt_obj_infos = all_obj_infos
-
- return get_fq_name_uuid_list(filt_obj_infos.keys())
- # end filter_rows_object_list
-
- if count:
- return (True, len(filter_rows_object_list()))
- children_fq_names_uuids = filter_rows_object_list()
-
- else: # grab all resources of this type
- obj_fq_name_cf = self._obj_fq_name_cf
- try:
- cols = obj_fq_name_cf.get('virtual_DNS', column_count = 10000000)
- except pycassa.NotFoundException:
- if count:
- return (True, 0)
- else:
- return (True, children_fq_names_uuids)
-
- def filter_rows_no_anchor():
- all_obj_infos = {}
- for col_name, col_val in cols.items():
- # give chance for zk heartbeat/ping
- gevent.sleep(0)
- col_name_arr = utils.decode_string(col_name).split(':')
- obj_uuid = col_name_arr[-1]
- all_obj_infos[obj_uuid] = (col_name_arr[:-1], obj_uuid)
-
- filter_cols = ['prop:%s' %(fname) for fname, _ in filter_fields]
- if filter_cols:
- filt_obj_infos = filter_rows(all_obj_infos, filter_cols, filter_fields)
- else: # no filters specified
- filt_obj_infos = all_obj_infos
-
- return filt_obj_infos.values()
- # end filter_rows_no_anchor
-
- if count:
- return (True, len(filter_rows_no_anchor()))
-
- children_fq_names_uuids = filter_rows_no_anchor()
-
- return (True, children_fq_names_uuids)
- #end _cassandra_virtual_DNS_list
-
- def _cassandra_virtual_DNS_delete(self, obj_uuid):
- obj_uuid_cf = self._obj_uuid_cf
- fq_name = json.loads(obj_uuid_cf.get(obj_uuid, columns = ['fq_name'])['fq_name'])
- bch = obj_uuid_cf.batch()
-
- # unlink from parent
- col_start = 'parent:'
- col_fin = 'parent;'
- col_name_iter = obj_uuid_cf.xget(obj_uuid, column_start = col_start, column_finish = col_fin)
- for (col_name, col_val) in col_name_iter:
- (_, parent_type, parent_uuid) = col_name.split(':')
- self._delete_child(bch, parent_type, parent_uuid, 'virtual_DNS', obj_uuid)
-
- # remove refs
- col_start = 'ref:'
- col_fin = 'ref;'
- col_name_iter = obj_uuid_cf.xget(obj_uuid, column_start = col_start, column_finish = col_fin)
- for (col_name, col_val) in col_name_iter:
- (_, ref_type, ref_uuid) = col_name.split(':')
- self._delete_ref(bch, 'virtual_DNS', obj_uuid, ref_type, ref_uuid)
-
- bch.remove(obj_uuid)
- bch.send()
-
- # Update fqname table
- fq_name_str = ':'.join(fq_name)
- fq_name_col = utils.encode_string(fq_name_str) + ':' + obj_uuid
- self._obj_fq_name_cf.remove('virtual_DNS', columns = [fq_name_col])
-
-
- return (True, '')
- #end _cassandra_virtual_DNS_delete
-
- def _cassandra_customer_attachment_alloc(self, fq_name):
- return (True, '')
- #end _cassandra_customer_attachment_alloc
-
- def _cassandra_customer_attachment_create(self, obj_ids, obj_dict):
- # Gather column values for obj and updates to backrefs
- # in a batch and write it at the end
- bch = self._obj_uuid_cf.batch()
-
- obj_cols = {}
- obj_cols['fq_name'] = json.dumps(obj_dict['fq_name'])
- obj_cols['type'] = json.dumps('customer_attachment')
-
- # Properties
- field = obj_dict.get('attachment_address', None)
- if field is not None:
- self._create_prop(bch, obj_ids['uuid'], 'attachment_address', field)
-
- field = obj_dict.get('id_perms', None)
- if field is not None:
- field['created'] = datetime.datetime.utcnow().isoformat()
- field['last_modified'] = field['created']
- self._create_prop(bch, obj_ids['uuid'], 'id_perms', field)
-
- field = obj_dict.get('display_name', None)
- if field is not None:
- self._create_prop(bch, obj_ids['uuid'], 'display_name', field)
-
-
- # References
- refs = obj_dict.get('virtual_machine_interface_refs', [])
- for ref in refs:
- ref_uuid = self.fq_name_to_uuid('virtual_machine_interface', ref['to'])
- ref_attr = ref.get('attr', None)
- ref_data = {'attr': ref_attr, 'is_weakref': False}
- self._create_ref(bch, 'customer_attachment', obj_ids['uuid'], 'virtual_machine_interface', ref_uuid, ref_data)
- refs = obj_dict.get('floating_ip_refs', [])
- for ref in refs:
- ref_uuid = self.fq_name_to_uuid('floating_ip', ref['to'])
- ref_attr = ref.get('attr', None)
- ref_data = {'attr': ref_attr, 'is_weakref': False}
- self._create_ref(bch, 'customer_attachment', obj_ids['uuid'], 'floating_ip', ref_uuid, ref_data)
-
- bch.insert(obj_ids['uuid'], obj_cols)
- bch.send()
-
- # Update fqname table
- fq_name_str = ':'.join(obj_dict['fq_name'])
- fq_name_cols = {utils.encode_string(fq_name_str) + ':' + obj_ids['uuid']: json.dumps(None)}
- self._obj_fq_name_cf.insert('customer_attachment', fq_name_cols)
-
- return (True, '')
- #end _cassandra_customer_attachment_create
-
- def _cassandra_customer_attachment_read(self, obj_uuids, field_names = None):
- # if field_names = None, all fields will be read/returned
-
- obj_uuid_cf = self._obj_uuid_cf
-
- # optimize for common case of reading non-backref, non-children fields
- # ignoring columns starting from 'b' and 'c' - significant performance
- # impact in scaled setting. e.g. read of project
- if (field_names is None or
- (set(field_names) & (CustomerAttachment.backref_fields | CustomerAttachment.children_fields))):
- # atleast one backref/children field is needed
- obj_rows = obj_uuid_cf.multiget(obj_uuids,
- column_count = 10000000,
- include_timestamp = True)
- else: # ignore reading backref + children columns
- obj_rows = obj_uuid_cf.multiget(obj_uuids,
- column_start = 'd',
- column_count = 10000000,
- include_timestamp = True)
-
- if (len(obj_uuids) == 1) and not obj_rows:
- raise cfgm_common.exceptions.NoIdError(obj_uuids[0])
-
- results = []
- for row_key in obj_rows:
- # give chance for zk heartbeat/ping
- gevent.sleep(0)
- obj_uuid = row_key
- obj_cols = obj_rows[obj_uuid]
- result = {}
- result['uuid'] = obj_uuid
- result['fq_name'] = json.loads(obj_cols['fq_name'][0])
- for col_name in obj_cols.keys():
- if self._re_match_parent.match(col_name):
- # non config-root child
- (_, _, parent_uuid) = col_name.split(':')
- parent_type = json.loads(obj_cols['parent_type'][0])
- result['parent_type'] = parent_type
- try:
- result['parent_uuid'] = parent_uuid
- result['parent_href'] = self._generate_url(parent_type, parent_uuid)
- except cfgm_common.exceptions.NoIdError:
- err_msg = 'Unknown uuid for parent ' + result['fq_name'][-2]
- return (False, err_msg)
-
- # TODO use compiled RE
- if self._re_match_prop.match(col_name):
- (_, prop_name) = col_name.split(':')
- result[prop_name] = json.loads(obj_cols[col_name][0])
-
- # TODO use compiled RE
- if self._re_match_children.match(col_name):
- (_, child_type, child_uuid) = col_name.split(':')
- if field_names and '%ss' %(child_type) not in field_names:
- continue
-
- child_tstamp = obj_cols[col_name][1]
- try:
- self._read_child(result, obj_uuid, child_type, child_uuid, child_tstamp)
- except cfgm_common.exceptions.NoIdError:
- continue
-
- # TODO use compiled RE
- if self._re_match_ref.match(col_name):
- (_, ref_type, ref_uuid) = col_name.split(':')
- self._read_ref(result, obj_uuid, ref_type, ref_uuid, obj_cols[col_name][0])
-
- if self._re_match_backref.match(col_name):
- (_, back_ref_type, back_ref_uuid) = col_name.split(':')
- if field_names and '%s_back_refs' %(back_ref_type) not in field_names:
- continue
-
- try:
- self._read_back_ref(result, obj_uuid, back_ref_type, back_ref_uuid,
- obj_cols[col_name][0])
- except cfgm_common.exceptions.NoIdError:
- continue
-
- # for all column names
-
-
- results.append(result)
- # end for all rows
-
- return (True, results)
- #end _cassandra_customer_attachment_read
-
- def _cassandra_customer_attachment_count_children(self, obj_uuid, child_type):
- # if child_type = None, return
- if child_type is None:
- return (False, '')
-
- obj_uuid_cf = self._obj_uuid_cf
- if child_type not in CustomerAttachment.children_fields:
- return (False, '%s is not a valid children of CustomerAttachment' %(child_type))
-
- col_start = 'children:'+child_type[:-1]+':'
- col_finish = 'children:'+child_type[:-1]+';'
- num_children = obj_uuid_cf.get_count(obj_uuid,
- column_start = col_start,
- column_finish = col_finish,
- max_count = 10000000)
- return (True, num_children)
- #end _cassandra_customer_attachment_count_children
-
- def _cassandra_customer_attachment_update(self, obj_uuid, new_obj_dict):
- # Grab ref-uuids and properties in new version
- new_ref_infos = {}
-
- if 'virtual_machine_interface_refs' in new_obj_dict:
- new_ref_infos['virtual_machine_interface'] = {}
- new_refs = new_obj_dict['virtual_machine_interface_refs']
- if new_refs:
- for new_ref in new_refs:
- new_ref_uuid = self.fq_name_to_uuid('virtual_machine_interface', new_ref['to'])
- new_ref_attr = new_ref.get('attr', None)
- new_ref_data = {'attr': new_ref_attr, 'is_weakref': False}
- new_ref_infos['virtual_machine_interface'][new_ref_uuid] = new_ref_data
-
- if 'floating_ip_refs' in new_obj_dict:
- new_ref_infos['floating_ip'] = {}
- new_refs = new_obj_dict['floating_ip_refs']
- if new_refs:
- for new_ref in new_refs:
- new_ref_uuid = self.fq_name_to_uuid('floating_ip', new_ref['to'])
- new_ref_attr = new_ref.get('attr', None)
- new_ref_data = {'attr': new_ref_attr, 'is_weakref': False}
- new_ref_infos['floating_ip'][new_ref_uuid] = new_ref_data
-
- new_props = {}
- if 'attachment_address' in new_obj_dict:
- new_props['attachment_address'] = new_obj_dict['attachment_address']
- if 'id_perms' in new_obj_dict:
- new_props['id_perms'] = new_obj_dict['id_perms']
- if 'display_name' in new_obj_dict:
- new_props['display_name'] = new_obj_dict['display_name']
- # Gather column values for obj and updates to backrefs
- # in a batch and write it at the end
- obj_uuid_cf = self._obj_uuid_cf
- obj_cols_iter = obj_uuid_cf.xget(obj_uuid)
- # TODO optimize this (converts tuple to dict)
- obj_cols = {}
- for col_info in obj_cols_iter:
- obj_cols[col_info[0]] = col_info[1]
-
- bch = obj_uuid_cf.batch()
- for col_name in obj_cols.keys():
- # TODO use compiled RE
- if re.match('prop:', col_name):
- (_, prop_name) = col_name.split(':')
- if prop_name == 'id_perms':
- # id-perms always has to be updated for last-mod timestamp
- # get it from request dict(or from db if not in request dict)
- new_id_perms = new_obj_dict.get(prop_name, json.loads(obj_cols[col_name]))
- self.update_last_modified(bch, obj_uuid, new_id_perms)
- elif prop_name in new_obj_dict:
- self._update_prop(bch, obj_uuid, prop_name, new_props)
-
- # TODO use compiled RE
- if re.match('ref:', col_name):
- (_, ref_type, ref_uuid) = col_name.split(':')
- self._update_ref(bch, 'customer_attachment', obj_uuid, ref_type, ref_uuid, new_ref_infos)
- # for all column names
-
- # create new refs
- for ref_type in new_ref_infos.keys():
- for ref_uuid in new_ref_infos[ref_type].keys():
- ref_data = new_ref_infos[ref_type][ref_uuid]
- self._create_ref(bch, 'customer_attachment', obj_uuid, ref_type, ref_uuid, ref_data)
-
- # create new props
- for prop_name in new_props.keys():
- self._create_prop(bch, obj_uuid, prop_name, new_props[prop_name])
-
- bch.send()
-
- return (True, '')
- #end _cassandra_customer_attachment_update
-
- def _cassandra_customer_attachment_list(self, parent_uuids=None, back_ref_uuids=None,
- obj_uuids=None, count=False, filters=None):
- children_fq_names_uuids = []
- if filters:
- fnames = filters.get('field_names', [])
- fvalues = filters.get('field_values', [])
- filter_fields = [(fnames[i], fvalues[i]) for i in range(len(fnames))]
- else:
- filter_fields = []
-
- def filter_rows(coll_infos, filter_cols, filter_params):
- filt_infos = {}
- coll_rows = obj_uuid_cf.multiget(coll_infos.keys(),
- columns=filter_cols,
- column_count=10000000)
- for row in coll_rows:
- # give chance for zk heartbeat/ping
- gevent.sleep(0)
- full_match = True
- for fname, fval in filter_params:
- if coll_rows[row]['prop:%s' %(fname)] != fval:
- full_match = False
- break
- if full_match:
- filt_infos[row] = coll_infos[row]
- return filt_infos
- # end filter_rows
-
- def get_fq_name_uuid_list(obj_uuids):
- ret_list = []
- for obj_uuid in obj_uuids:
- try:
- obj_fq_name = self.uuid_to_fq_name(obj_uuid)
- ret_list.append((obj_fq_name, obj_uuid))
- except cfgm_common.exceptions.NoIdError:
- pass
- return ret_list
- # end get_fq_name_uuid_list
-
- if parent_uuids:
- # go from parent to child
- obj_uuid_cf = self._obj_uuid_cf
- col_start = 'children:customer_attachment:'
- col_fin = 'children:customer_attachment;'
- try:
- obj_rows = obj_uuid_cf.multiget(parent_uuids,
- column_start = col_start,
- column_finish = col_fin,
- column_count = 10000000,
- include_timestamp = True)
- except pycassa.NotFoundException:
- if count:
- return (True, 0)
- else:
- return (True, children_fq_names_uuids)
-
- def filter_rows_parent_anchor(sort=False):
- # flatten to [('children:<type>:<uuid>', (<val>,<ts>), *]
- all_cols = [cols for obj_key in obj_rows.keys() for cols in obj_rows[obj_key].items()]
- all_child_infos = {}
- for col_name, col_val_ts in all_cols:
- # give chance for zk heartbeat/ping
- gevent.sleep(0)
- child_uuid = col_name.split(':')[2]
- if obj_uuids and child_uuid not in obj_uuids:
- continue
- all_child_infos[child_uuid] = {'uuid': child_uuid, 'tstamp': col_val_ts[1]}
-
- filter_cols = ['prop:%s' %(fname) for fname, _ in filter_fields]
- if filter_cols:
- filt_child_infos = filter_rows(all_child_infos, filter_cols, filter_fields)
- else: # no filter specified
- filt_child_infos = all_child_infos
-
- if not sort:
- ret_child_infos = filt_child_infos.values()
- else:
- ret_child_infos = sorted(filt_child_infos.values(), key=itemgetter('tstamp'))
-
- return get_fq_name_uuid_list(r['uuid'] for r in ret_child_infos)
- # end filter_rows_parent_anchor
-
- if count:
- return (True, len(filter_rows_parent_anchor()))
-
- children_fq_names_uuids = filter_rows_parent_anchor(sort=True)
-
- if back_ref_uuids:
- # go from anchor to backrefs
- obj_uuid_cf = self._obj_uuid_cf
- col_start = 'backref:customer_attachment:'
- col_fin = 'backref:customer_attachment;'
- try:
- obj_rows = obj_uuid_cf.multiget(back_ref_uuids,
- column_start = col_start,
- column_finish = col_fin,
- column_count = 10000000,
- include_timestamp = True)
- except pycassa.NotFoundException:
- if count:
- return (True, 0)
- else:
- return (True, children_fq_names_uuids)
-
- def filter_rows_backref_anchor():
- # flatten to [('<fqnstr>:<uuid>', (<val>,<ts>), *]
- all_cols = [cols for obj_key in obj_rows.keys() for cols in obj_rows[obj_key].items()]
- all_backref_infos = {}
- for col_name, col_val_ts in all_cols:
- # give chance for zk heartbeat/ping
- gevent.sleep(0)
- col_name_arr = col_name.split(':')
- fq_name = col_name_arr[:-1]
- obj_uuid = col_name_arr[-1]
- if obj_uuids and obj_uuid not in obj_uuids:
- continue
- all_backref_infos[obj_uuid] = \
- {'uuid': obj_uuid, 'fq_name': fq_name, 'tstamp': col_val_ts[1]}
-
- filter_cols = ['prop:%s' %(fname) for fname, _ in filter_fields]
- if filter_cols:
- filt_backref_infos = filter_rows(all_backref_infos, filter_cols, filter_fields)
- else: # no filter specified
- filt_backref_infos = all_backref_infos
-
- return [(br_info['fq_name'], br_info['uuid']) for br_info in filt_backref_infos.values()]
- # end filter_rows_backref_anchor
-
- if count:
- return (True, len(filter_rows_backref_anchor()))
-
- children_fq_names_uuids = filter_rows_backref_anchor()
-
- if not parent_uuids and not back_ref_uuids:
- obj_uuid_cf = self._obj_uuid_cf
- if obj_uuids:
- # exact objects specified
- def filter_rows_object_list():
- all_obj_infos = {}
- for obj_uuid in obj_uuids:
- all_obj_infos[obj_uuid] = None
-
- filter_cols = ['prop:%s' %(fname) for fname, _ in filter_fields]
- if filter_cols:
- filt_obj_infos = filter_rows(all_obj_infos, filter_cols, filter_fields)
- else: # no filters specified
- filt_obj_infos = all_obj_infos
-
- return get_fq_name_uuid_list(filt_obj_infos.keys())
- # end filter_rows_object_list
-
- if count:
- return (True, len(filter_rows_object_list()))
- children_fq_names_uuids = filter_rows_object_list()
-
- else: # grab all resources of this type
- obj_fq_name_cf = self._obj_fq_name_cf
- try:
- cols = obj_fq_name_cf.get('customer_attachment', column_count = 10000000)
- except pycassa.NotFoundException:
- if count:
- return (True, 0)
- else:
- return (True, children_fq_names_uuids)
-
- def filter_rows_no_anchor():
- all_obj_infos = {}
- for col_name, col_val in cols.items():
- # give chance for zk heartbeat/ping
- gevent.sleep(0)
- col_name_arr = utils.decode_string(col_name).split(':')
- obj_uuid = col_name_arr[-1]
- all_obj_infos[obj_uuid] = (col_name_arr[:-1], obj_uuid)
-
- filter_cols = ['prop:%s' %(fname) for fname, _ in filter_fields]
- if filter_cols:
- filt_obj_infos = filter_rows(all_obj_infos, filter_cols, filter_fields)
- else: # no filters specified
- filt_obj_infos = all_obj_infos
-
- return filt_obj_infos.values()
- # end filter_rows_no_anchor
-
- if count:
- return (True, len(filter_rows_no_anchor()))
-
- children_fq_names_uuids = filter_rows_no_anchor()
-
- return (True, children_fq_names_uuids)
- #end _cassandra_customer_attachment_list
-
- def _cassandra_customer_attachment_delete(self, obj_uuid):
- obj_uuid_cf = self._obj_uuid_cf
- fq_name = json.loads(obj_uuid_cf.get(obj_uuid, columns = ['fq_name'])['fq_name'])
- bch = obj_uuid_cf.batch()
-
- # unlink from parent
- col_start = 'parent:'
- col_fin = 'parent;'
- col_name_iter = obj_uuid_cf.xget(obj_uuid, column_start = col_start, column_finish = col_fin)
- for (col_name, col_val) in col_name_iter:
- (_, parent_type, parent_uuid) = col_name.split(':')
- self._delete_child(bch, parent_type, parent_uuid, 'customer_attachment', obj_uuid)
-
- # remove refs
- col_start = 'ref:'
- col_fin = 'ref;'
- col_name_iter = obj_uuid_cf.xget(obj_uuid, column_start = col_start, column_finish = col_fin)
- for (col_name, col_val) in col_name_iter:
- (_, ref_type, ref_uuid) = col_name.split(':')
- self._delete_ref(bch, 'customer_attachment', obj_uuid, ref_type, ref_uuid)
-
- bch.remove(obj_uuid)
- bch.send()
-
- # Update fqname table
- fq_name_str = ':'.join(fq_name)
- fq_name_col = utils.encode_string(fq_name_str) + ':' + obj_uuid
- self._obj_fq_name_cf.remove('customer_attachment', columns = [fq_name_col])
-
-
- return (True, '')
- #end _cassandra_customer_attachment_delete
-
- def _cassandra_service_appliance_set_alloc(self, fq_name):
- return (True, '')
- #end _cassandra_service_appliance_set_alloc
-
- def _cassandra_service_appliance_set_create(self, obj_ids, obj_dict):
- # Gather column values for obj and updates to backrefs
- # in a batch and write it at the end
- bch = self._obj_uuid_cf.batch()
-
- obj_cols = {}
- obj_cols['fq_name'] = json.dumps(obj_dict['fq_name'])
- obj_cols['type'] = json.dumps('service_appliance_set')
- if 'parent_type' in obj_dict:
- # non config-root child
- parent_type = obj_dict['parent_type']
- parent_method_type = parent_type.replace('-', '_')
- parent_fq_name = obj_dict['fq_name'][:-1]
- obj_cols['parent_type'] = json.dumps(parent_type)
- parent_uuid = self.fq_name_to_uuid(parent_method_type, parent_fq_name)
- self._create_child(bch, parent_method_type, parent_uuid, 'service_appliance_set', obj_ids['uuid'])
-
- # Properties
- field = obj_dict.get('service_appliance_set_properties', None)
- if field is not None:
- self._create_prop(bch, obj_ids['uuid'], 'service_appliance_set_properties', field)
-
- field = obj_dict.get('service_appliance_driver', None)
- if field is not None:
- self._create_prop(bch, obj_ids['uuid'], 'service_appliance_driver', field)
-
- field = obj_dict.get('service_appliance_ha_mode', None)
- if field is not None:
- self._create_prop(bch, obj_ids['uuid'], 'service_appliance_ha_mode', field)
-
- field = obj_dict.get('id_perms', None)
- if field is not None:
- field['created'] = datetime.datetime.utcnow().isoformat()
- field['last_modified'] = field['created']
- self._create_prop(bch, obj_ids['uuid'], 'id_perms', field)
-
- field = obj_dict.get('display_name', None)
- if field is not None:
- self._create_prop(bch, obj_ids['uuid'], 'display_name', field)
-
-
- # References
-
- bch.insert(obj_ids['uuid'], obj_cols)
- bch.send()
-
- # Update fqname table
- fq_name_str = ':'.join(obj_dict['fq_name'])
- fq_name_cols = {utils.encode_string(fq_name_str) + ':' + obj_ids['uuid']: json.dumps(None)}
- self._obj_fq_name_cf.insert('service_appliance_set', fq_name_cols)
-
- return (True, '')
- #end _cassandra_service_appliance_set_create
-
- def _cassandra_service_appliance_set_read(self, obj_uuids, field_names = None):
- # if field_names = None, all fields will be read/returned
-
- obj_uuid_cf = self._obj_uuid_cf
-
- # optimize for common case of reading non-backref, non-children fields
- # ignoring columns starting from 'b' and 'c' - significant performance
- # impact in scaled setting. e.g. read of project
- if (field_names is None or
- (set(field_names) & (ServiceApplianceSet.backref_fields | ServiceApplianceSet.children_fields))):
- # atleast one backref/children field is needed
- obj_rows = obj_uuid_cf.multiget(obj_uuids,
- column_count = 10000000,
- include_timestamp = True)
- else: # ignore reading backref + children columns
- obj_rows = obj_uuid_cf.multiget(obj_uuids,
- column_start = 'd',
- column_count = 10000000,
- include_timestamp = True)
-
- if (len(obj_uuids) == 1) and not obj_rows:
- raise cfgm_common.exceptions.NoIdError(obj_uuids[0])
-
- results = []
- for row_key in obj_rows:
- # give chance for zk heartbeat/ping
- gevent.sleep(0)
- obj_uuid = row_key
- obj_cols = obj_rows[obj_uuid]
- result = {}
- result['uuid'] = obj_uuid
- result['fq_name'] = json.loads(obj_cols['fq_name'][0])
- for col_name in obj_cols.keys():
- if self._re_match_parent.match(col_name):
- # non config-root child
- (_, _, parent_uuid) = col_name.split(':')
- parent_type = json.loads(obj_cols['parent_type'][0])
- result['parent_type'] = parent_type
- try:
- result['parent_uuid'] = parent_uuid
- result['parent_href'] = self._generate_url(parent_type, parent_uuid)
- except cfgm_common.exceptions.NoIdError:
- err_msg = 'Unknown uuid for parent ' + result['fq_name'][-2]
- return (False, err_msg)
-
- # TODO use compiled RE
- if self._re_match_prop.match(col_name):
- (_, prop_name) = col_name.split(':')
- result[prop_name] = json.loads(obj_cols[col_name][0])
-
- # TODO use compiled RE
- if self._re_match_children.match(col_name):
- (_, child_type, child_uuid) = col_name.split(':')
- if field_names and '%ss' %(child_type) not in field_names:
- continue
-
- child_tstamp = obj_cols[col_name][1]
- try:
- self._read_child(result, obj_uuid, child_type, child_uuid, child_tstamp)
- except cfgm_common.exceptions.NoIdError:
- continue
-
- # TODO use compiled RE
- if self._re_match_ref.match(col_name):
- (_, ref_type, ref_uuid) = col_name.split(':')
- self._read_ref(result, obj_uuid, ref_type, ref_uuid, obj_cols[col_name][0])
-
- if self._re_match_backref.match(col_name):
- (_, back_ref_type, back_ref_uuid) = col_name.split(':')
- if field_names and '%s_back_refs' %(back_ref_type) not in field_names:
- continue
-
- try:
- self._read_back_ref(result, obj_uuid, back_ref_type, back_ref_uuid,
- obj_cols[col_name][0])
- except cfgm_common.exceptions.NoIdError:
- continue
-
- # for all column names
-
- if 'service_appliances' in result:
- # sort children; TODO do this based on schema
- sorted_children = sorted(result['service_appliances'], key = itemgetter('tstamp'))
- # re-write result's children without timestamp
- result['service_appliances'] = sorted_children
- [child.pop('tstamp') for child in result['service_appliances']]
-
- results.append(result)
- # end for all rows
-
- return (True, results)
- #end _cassandra_service_appliance_set_read
-
- def _cassandra_service_appliance_set_count_children(self, obj_uuid, child_type):
- # if child_type = None, return
- if child_type is None:
- return (False, '')
-
- obj_uuid_cf = self._obj_uuid_cf
- if child_type not in ServiceApplianceSet.children_fields:
- return (False, '%s is not a valid children of ServiceApplianceSet' %(child_type))
-
- col_start = 'children:'+child_type[:-1]+':'
- col_finish = 'children:'+child_type[:-1]+';'
- num_children = obj_uuid_cf.get_count(obj_uuid,
- column_start = col_start,
- column_finish = col_finish,
- max_count = 10000000)
- return (True, num_children)
- #end _cassandra_service_appliance_set_count_children
-
- def _cassandra_service_appliance_set_update(self, obj_uuid, new_obj_dict):
- # Grab ref-uuids and properties in new version
- new_ref_infos = {}
-
- new_props = {}
- if 'service_appliance_set_properties' in new_obj_dict:
- new_props['service_appliance_set_properties'] = new_obj_dict['service_appliance_set_properties']
- if 'service_appliance_driver' in new_obj_dict:
- new_props['service_appliance_driver'] = new_obj_dict['service_appliance_driver']
- if 'service_appliance_ha_mode' in new_obj_dict:
- new_props['service_appliance_ha_mode'] = new_obj_dict['service_appliance_ha_mode']
- if 'id_perms' in new_obj_dict:
- new_props['id_perms'] = new_obj_dict['id_perms']
- if 'display_name' in new_obj_dict:
- new_props['display_name'] = new_obj_dict['display_name']
- # Gather column values for obj and updates to backrefs
- # in a batch and write it at the end
- obj_uuid_cf = self._obj_uuid_cf
- obj_cols_iter = obj_uuid_cf.xget(obj_uuid)
- # TODO optimize this (converts tuple to dict)
- obj_cols = {}
- for col_info in obj_cols_iter:
- obj_cols[col_info[0]] = col_info[1]
-
- bch = obj_uuid_cf.batch()
- for col_name in obj_cols.keys():
- # TODO use compiled RE
- if re.match('prop:', col_name):
- (_, prop_name) = col_name.split(':')
- if prop_name == 'id_perms':
- # id-perms always has to be updated for last-mod timestamp
- # get it from request dict(or from db if not in request dict)
- new_id_perms = new_obj_dict.get(prop_name, json.loads(obj_cols[col_name]))
- self.update_last_modified(bch, obj_uuid, new_id_perms)
- elif prop_name in new_obj_dict:
- self._update_prop(bch, obj_uuid, prop_name, new_props)
-
- # TODO use compiled RE
- if re.match('ref:', col_name):
- (_, ref_type, ref_uuid) = col_name.split(':')
- self._update_ref(bch, 'service_appliance_set', obj_uuid, ref_type, ref_uuid, new_ref_infos)
- # for all column names
-
- # create new refs
- for ref_type in new_ref_infos.keys():
- for ref_uuid in new_ref_infos[ref_type].keys():
- ref_data = new_ref_infos[ref_type][ref_uuid]
- self._create_ref(bch, 'service_appliance_set', obj_uuid, ref_type, ref_uuid, ref_data)
-
- # create new props
- for prop_name in new_props.keys():
- self._create_prop(bch, obj_uuid, prop_name, new_props[prop_name])
-
- bch.send()
-
- return (True, '')
- #end _cassandra_service_appliance_set_update
-
- def _cassandra_service_appliance_set_list(self, parent_uuids=None, back_ref_uuids=None,
- obj_uuids=None, count=False, filters=None):
- children_fq_names_uuids = []
- if filters:
- fnames = filters.get('field_names', [])
- fvalues = filters.get('field_values', [])
- filter_fields = [(fnames[i], fvalues[i]) for i in range(len(fnames))]
- else:
- filter_fields = []
-
- def filter_rows(coll_infos, filter_cols, filter_params):
- filt_infos = {}
- coll_rows = obj_uuid_cf.multiget(coll_infos.keys(),
- columns=filter_cols,
- column_count=10000000)
- for row in coll_rows:
- # give chance for zk heartbeat/ping
- gevent.sleep(0)
- full_match = True
- for fname, fval in filter_params:
- if coll_rows[row]['prop:%s' %(fname)] != fval:
- full_match = False
- break
- if full_match:
- filt_infos[row] = coll_infos[row]
- return filt_infos
- # end filter_rows
-
- def get_fq_name_uuid_list(obj_uuids):
- ret_list = []
- for obj_uuid in obj_uuids:
- try:
- obj_fq_name = self.uuid_to_fq_name(obj_uuid)
- ret_list.append((obj_fq_name, obj_uuid))
- except cfgm_common.exceptions.NoIdError:
- pass
- return ret_list
- # end get_fq_name_uuid_list
-
- if parent_uuids:
- # go from parent to child
- obj_uuid_cf = self._obj_uuid_cf
- col_start = 'children:service_appliance_set:'
- col_fin = 'children:service_appliance_set;'
- try:
- obj_rows = obj_uuid_cf.multiget(parent_uuids,
- column_start = col_start,
- column_finish = col_fin,
- column_count = 10000000,
- include_timestamp = True)
- except pycassa.NotFoundException:
- if count:
- return (True, 0)
- else:
- return (True, children_fq_names_uuids)
-
- def filter_rows_parent_anchor(sort=False):
- # flatten to [('children:<type>:<uuid>', (<val>,<ts>), *]
- all_cols = [cols for obj_key in obj_rows.keys() for cols in obj_rows[obj_key].items()]
- all_child_infos = {}
- for col_name, col_val_ts in all_cols:
- # give chance for zk heartbeat/ping
- gevent.sleep(0)
- child_uuid = col_name.split(':')[2]
- if obj_uuids and child_uuid not in obj_uuids:
- continue
- all_child_infos[child_uuid] = {'uuid': child_uuid, 'tstamp': col_val_ts[1]}
-
- filter_cols = ['prop:%s' %(fname) for fname, _ in filter_fields]
- if filter_cols:
- filt_child_infos = filter_rows(all_child_infos, filter_cols, filter_fields)
- else: # no filter specified
- filt_child_infos = all_child_infos
-
- if not sort:
- ret_child_infos = filt_child_infos.values()
- else:
- ret_child_infos = sorted(filt_child_infos.values(), key=itemgetter('tstamp'))
-
- return get_fq_name_uuid_list(r['uuid'] for r in ret_child_infos)
- # end filter_rows_parent_anchor
-
- if count:
- return (True, len(filter_rows_parent_anchor()))
-
- children_fq_names_uuids = filter_rows_parent_anchor(sort=True)
-
- if back_ref_uuids:
- # go from anchor to backrefs
- obj_uuid_cf = self._obj_uuid_cf
- col_start = 'backref:service_appliance_set:'
- col_fin = 'backref:service_appliance_set;'
- try:
- obj_rows = obj_uuid_cf.multiget(back_ref_uuids,
- column_start = col_start,
- column_finish = col_fin,
- column_count = 10000000,
- include_timestamp = True)
- except pycassa.NotFoundException:
- if count:
- return (True, 0)
- else:
- return (True, children_fq_names_uuids)
-
- def filter_rows_backref_anchor():
- # flatten to [('<fqnstr>:<uuid>', (<val>,<ts>), *]
- all_cols = [cols for obj_key in obj_rows.keys() for cols in obj_rows[obj_key].items()]
- all_backref_infos = {}
- for col_name, col_val_ts in all_cols:
- # give chance for zk heartbeat/ping
- gevent.sleep(0)
- col_name_arr = col_name.split(':')
- fq_name = col_name_arr[:-1]
- obj_uuid = col_name_arr[-1]
- if obj_uuids and obj_uuid not in obj_uuids:
- continue
- all_backref_infos[obj_uuid] = \
- {'uuid': obj_uuid, 'fq_name': fq_name, 'tstamp': col_val_ts[1]}
-
- filter_cols = ['prop:%s' %(fname) for fname, _ in filter_fields]
- if filter_cols:
- filt_backref_infos = filter_rows(all_backref_infos, filter_cols, filter_fields)
- else: # no filter specified
- filt_backref_infos = all_backref_infos
-
- return [(br_info['fq_name'], br_info['uuid']) for br_info in filt_backref_infos.values()]
- # end filter_rows_backref_anchor
-
- if count:
- return (True, len(filter_rows_backref_anchor()))
-
- children_fq_names_uuids = filter_rows_backref_anchor()
-
- if not parent_uuids and not back_ref_uuids:
- obj_uuid_cf = self._obj_uuid_cf
- if obj_uuids:
- # exact objects specified
- def filter_rows_object_list():
- all_obj_infos = {}
- for obj_uuid in obj_uuids:
- all_obj_infos[obj_uuid] = None
-
- filter_cols = ['prop:%s' %(fname) for fname, _ in filter_fields]
- if filter_cols:
- filt_obj_infos = filter_rows(all_obj_infos, filter_cols, filter_fields)
- else: # no filters specified
- filt_obj_infos = all_obj_infos
-
- return get_fq_name_uuid_list(filt_obj_infos.keys())
- # end filter_rows_object_list
-
- if count:
- return (True, len(filter_rows_object_list()))
- children_fq_names_uuids = filter_rows_object_list()
-
- else: # grab all resources of this type
- obj_fq_name_cf = self._obj_fq_name_cf
- try:
- cols = obj_fq_name_cf.get('service_appliance_set', column_count = 10000000)
- except pycassa.NotFoundException:
- if count:
- return (True, 0)
- else:
- return (True, children_fq_names_uuids)
-
- def filter_rows_no_anchor():
- all_obj_infos = {}
- for col_name, col_val in cols.items():
- # give chance for zk heartbeat/ping
- gevent.sleep(0)
- col_name_arr = utils.decode_string(col_name).split(':')
- obj_uuid = col_name_arr[-1]
- all_obj_infos[obj_uuid] = (col_name_arr[:-1], obj_uuid)
-
- filter_cols = ['prop:%s' %(fname) for fname, _ in filter_fields]
- if filter_cols:
- filt_obj_infos = filter_rows(all_obj_infos, filter_cols, filter_fields)
- else: # no filters specified
- filt_obj_infos = all_obj_infos
-
- return filt_obj_infos.values()
- # end filter_rows_no_anchor
-
- if count:
- return (True, len(filter_rows_no_anchor()))
-
- children_fq_names_uuids = filter_rows_no_anchor()
-
- return (True, children_fq_names_uuids)
- #end _cassandra_service_appliance_set_list
-
- def _cassandra_service_appliance_set_delete(self, obj_uuid):
- obj_uuid_cf = self._obj_uuid_cf
- fq_name = json.loads(obj_uuid_cf.get(obj_uuid, columns = ['fq_name'])['fq_name'])
- bch = obj_uuid_cf.batch()
-
- # unlink from parent
- col_start = 'parent:'
- col_fin = 'parent;'
- col_name_iter = obj_uuid_cf.xget(obj_uuid, column_start = col_start, column_finish = col_fin)
- for (col_name, col_val) in col_name_iter:
- (_, parent_type, parent_uuid) = col_name.split(':')
- self._delete_child(bch, parent_type, parent_uuid, 'service_appliance_set', obj_uuid)
-
- # remove refs
- col_start = 'ref:'
- col_fin = 'ref;'
- col_name_iter = obj_uuid_cf.xget(obj_uuid, column_start = col_start, column_finish = col_fin)
- for (col_name, col_val) in col_name_iter:
- (_, ref_type, ref_uuid) = col_name.split(':')
- self._delete_ref(bch, 'service_appliance_set', obj_uuid, ref_type, ref_uuid)
-
- bch.remove(obj_uuid)
- bch.send()
-
- # Update fqname table
- fq_name_str = ':'.join(fq_name)
- fq_name_col = utils.encode_string(fq_name_str) + ':' + obj_uuid
- self._obj_fq_name_cf.remove('service_appliance_set', columns = [fq_name_col])
-
-
- return (True, '')
- #end _cassandra_service_appliance_set_delete
-
- def _cassandra_config_node_alloc(self, fq_name):
- return (True, '')
- #end _cassandra_config_node_alloc
-
- def _cassandra_config_node_create(self, obj_ids, obj_dict):
- # Gather column values for obj and updates to backrefs
- # in a batch and write it at the end
- bch = self._obj_uuid_cf.batch()
-
- obj_cols = {}
- obj_cols['fq_name'] = json.dumps(obj_dict['fq_name'])
- obj_cols['type'] = json.dumps('config_node')
- if 'parent_type' in obj_dict:
- # non config-root child
- parent_type = obj_dict['parent_type']
- parent_method_type = parent_type.replace('-', '_')
- parent_fq_name = obj_dict['fq_name'][:-1]
- obj_cols['parent_type'] = json.dumps(parent_type)
- parent_uuid = self.fq_name_to_uuid(parent_method_type, parent_fq_name)
- self._create_child(bch, parent_method_type, parent_uuid, 'config_node', obj_ids['uuid'])
-
- # Properties
- field = obj_dict.get('config_node_ip_address', None)
- if field is not None:
- self._create_prop(bch, obj_ids['uuid'], 'config_node_ip_address', field)
-
- field = obj_dict.get('id_perms', None)
- if field is not None:
- field['created'] = datetime.datetime.utcnow().isoformat()
- field['last_modified'] = field['created']
- self._create_prop(bch, obj_ids['uuid'], 'id_perms', field)
-
- field = obj_dict.get('display_name', None)
- if field is not None:
- self._create_prop(bch, obj_ids['uuid'], 'display_name', field)
-
-
- # References
-
- bch.insert(obj_ids['uuid'], obj_cols)
- bch.send()
-
- # Update fqname table
- fq_name_str = ':'.join(obj_dict['fq_name'])
- fq_name_cols = {utils.encode_string(fq_name_str) + ':' + obj_ids['uuid']: json.dumps(None)}
- self._obj_fq_name_cf.insert('config_node', fq_name_cols)
-
- return (True, '')
- #end _cassandra_config_node_create
-
- def _cassandra_config_node_read(self, obj_uuids, field_names = None):
- # if field_names = None, all fields will be read/returned
-
- obj_uuid_cf = self._obj_uuid_cf
-
- # optimize for common case of reading non-backref, non-children fields
- # ignoring columns starting from 'b' and 'c' - significant performance
- # impact in scaled setting. e.g. read of project
- if (field_names is None or
- (set(field_names) & (ConfigNode.backref_fields | ConfigNode.children_fields))):
- # atleast one backref/children field is needed
- obj_rows = obj_uuid_cf.multiget(obj_uuids,
- column_count = 10000000,
- include_timestamp = True)
- else: # ignore reading backref + children columns
- obj_rows = obj_uuid_cf.multiget(obj_uuids,
- column_start = 'd',
- column_count = 10000000,
- include_timestamp = True)
-
- if (len(obj_uuids) == 1) and not obj_rows:
- raise cfgm_common.exceptions.NoIdError(obj_uuids[0])
-
- results = []
- for row_key in obj_rows:
- # give chance for zk heartbeat/ping
- gevent.sleep(0)
- obj_uuid = row_key
- obj_cols = obj_rows[obj_uuid]
- result = {}
- result['uuid'] = obj_uuid
- result['fq_name'] = json.loads(obj_cols['fq_name'][0])
- for col_name in obj_cols.keys():
- if self._re_match_parent.match(col_name):
- # non config-root child
- (_, _, parent_uuid) = col_name.split(':')
- parent_type = json.loads(obj_cols['parent_type'][0])
- result['parent_type'] = parent_type
- try:
- result['parent_uuid'] = parent_uuid
- result['parent_href'] = self._generate_url(parent_type, parent_uuid)
- except cfgm_common.exceptions.NoIdError:
- err_msg = 'Unknown uuid for parent ' + result['fq_name'][-2]
- return (False, err_msg)
-
- # TODO use compiled RE
- if self._re_match_prop.match(col_name):
- (_, prop_name) = col_name.split(':')
- result[prop_name] = json.loads(obj_cols[col_name][0])
-
- # TODO use compiled RE
- if self._re_match_children.match(col_name):
- (_, child_type, child_uuid) = col_name.split(':')
- if field_names and '%ss' %(child_type) not in field_names:
- continue
-
- child_tstamp = obj_cols[col_name][1]
- try:
- self._read_child(result, obj_uuid, child_type, child_uuid, child_tstamp)
- except cfgm_common.exceptions.NoIdError:
- continue
-
- # TODO use compiled RE
- if self._re_match_ref.match(col_name):
- (_, ref_type, ref_uuid) = col_name.split(':')
- self._read_ref(result, obj_uuid, ref_type, ref_uuid, obj_cols[col_name][0])
-
- if self._re_match_backref.match(col_name):
- (_, back_ref_type, back_ref_uuid) = col_name.split(':')
- if field_names and '%s_back_refs' %(back_ref_type) not in field_names:
- continue
-
- try:
- self._read_back_ref(result, obj_uuid, back_ref_type, back_ref_uuid,
- obj_cols[col_name][0])
- except cfgm_common.exceptions.NoIdError:
- continue
-
- # for all column names
-
-
- results.append(result)
- # end for all rows
-
- return (True, results)
- #end _cassandra_config_node_read
-
- def _cassandra_config_node_count_children(self, obj_uuid, child_type):
- # if child_type = None, return
- if child_type is None:
- return (False, '')
-
- obj_uuid_cf = self._obj_uuid_cf
- if child_type not in ConfigNode.children_fields:
- return (False, '%s is not a valid children of ConfigNode' %(child_type))
-
- col_start = 'children:'+child_type[:-1]+':'
- col_finish = 'children:'+child_type[:-1]+';'
- num_children = obj_uuid_cf.get_count(obj_uuid,
- column_start = col_start,
- column_finish = col_finish,
- max_count = 10000000)
- return (True, num_children)
- #end _cassandra_config_node_count_children
-
- def _cassandra_config_node_update(self, obj_uuid, new_obj_dict):
- # Grab ref-uuids and properties in new version
- new_ref_infos = {}
-
- new_props = {}
- if 'config_node_ip_address' in new_obj_dict:
- new_props['config_node_ip_address'] = new_obj_dict['config_node_ip_address']
- if 'id_perms' in new_obj_dict:
- new_props['id_perms'] = new_obj_dict['id_perms']
- if 'display_name' in new_obj_dict:
- new_props['display_name'] = new_obj_dict['display_name']
- # Gather column values for obj and updates to backrefs
- # in a batch and write it at the end
- obj_uuid_cf = self._obj_uuid_cf
- obj_cols_iter = obj_uuid_cf.xget(obj_uuid)
- # TODO optimize this (converts tuple to dict)
- obj_cols = {}
- for col_info in obj_cols_iter:
- obj_cols[col_info[0]] = col_info[1]
-
- bch = obj_uuid_cf.batch()
- for col_name in obj_cols.keys():
- # TODO use compiled RE
- if re.match('prop:', col_name):
- (_, prop_name) = col_name.split(':')
- if prop_name == 'id_perms':
- # id-perms always has to be updated for last-mod timestamp
- # get it from request dict(or from db if not in request dict)
- new_id_perms = new_obj_dict.get(prop_name, json.loads(obj_cols[col_name]))
- self.update_last_modified(bch, obj_uuid, new_id_perms)
- elif prop_name in new_obj_dict:
- self._update_prop(bch, obj_uuid, prop_name, new_props)
-
- # TODO use compiled RE
- if re.match('ref:', col_name):
- (_, ref_type, ref_uuid) = col_name.split(':')
- self._update_ref(bch, 'config_node', obj_uuid, ref_type, ref_uuid, new_ref_infos)
- # for all column names
-
- # create new refs
- for ref_type in new_ref_infos.keys():
- for ref_uuid in new_ref_infos[ref_type].keys():
- ref_data = new_ref_infos[ref_type][ref_uuid]
- self._create_ref(bch, 'config_node', obj_uuid, ref_type, ref_uuid, ref_data)
-
- # create new props
- for prop_name in new_props.keys():
- self._create_prop(bch, obj_uuid, prop_name, new_props[prop_name])
-
- bch.send()
-
- return (True, '')
- #end _cassandra_config_node_update
-
- def _cassandra_config_node_list(self, parent_uuids=None, back_ref_uuids=None,
- obj_uuids=None, count=False, filters=None):
- children_fq_names_uuids = []
- if filters:
- fnames = filters.get('field_names', [])
- fvalues = filters.get('field_values', [])
- filter_fields = [(fnames[i], fvalues[i]) for i in range(len(fnames))]
- else:
- filter_fields = []
-
- def filter_rows(coll_infos, filter_cols, filter_params):
- filt_infos = {}
- coll_rows = obj_uuid_cf.multiget(coll_infos.keys(),
- columns=filter_cols,
- column_count=10000000)
- for row in coll_rows:
- # give chance for zk heartbeat/ping
- gevent.sleep(0)
- full_match = True
- for fname, fval in filter_params:
- if coll_rows[row]['prop:%s' %(fname)] != fval:
- full_match = False
- break
- if full_match:
- filt_infos[row] = coll_infos[row]
- return filt_infos
- # end filter_rows
-
- def get_fq_name_uuid_list(obj_uuids):
- ret_list = []
- for obj_uuid in obj_uuids:
- try:
- obj_fq_name = self.uuid_to_fq_name(obj_uuid)
- ret_list.append((obj_fq_name, obj_uuid))
- except cfgm_common.exceptions.NoIdError:
- pass
- return ret_list
- # end get_fq_name_uuid_list
-
- if parent_uuids:
- # go from parent to child
- obj_uuid_cf = self._obj_uuid_cf
- col_start = 'children:config_node:'
- col_fin = 'children:config_node;'
- try:
- obj_rows = obj_uuid_cf.multiget(parent_uuids,
- column_start = col_start,
- column_finish = col_fin,
- column_count = 10000000,
- include_timestamp = True)
- except pycassa.NotFoundException:
- if count:
- return (True, 0)
- else:
- return (True, children_fq_names_uuids)
-
- def filter_rows_parent_anchor(sort=False):
- # flatten to [('children:<type>:<uuid>', (<val>,<ts>), *]
- all_cols = [cols for obj_key in obj_rows.keys() for cols in obj_rows[obj_key].items()]
- all_child_infos = {}
- for col_name, col_val_ts in all_cols:
- # give chance for zk heartbeat/ping
- gevent.sleep(0)
- child_uuid = col_name.split(':')[2]
- if obj_uuids and child_uuid not in obj_uuids:
- continue
- all_child_infos[child_uuid] = {'uuid': child_uuid, 'tstamp': col_val_ts[1]}
-
- filter_cols = ['prop:%s' %(fname) for fname, _ in filter_fields]
- if filter_cols:
- filt_child_infos = filter_rows(all_child_infos, filter_cols, filter_fields)
- else: # no filter specified
- filt_child_infos = all_child_infos
-
- if not sort:
- ret_child_infos = filt_child_infos.values()
- else:
- ret_child_infos = sorted(filt_child_infos.values(), key=itemgetter('tstamp'))
-
- return get_fq_name_uuid_list(r['uuid'] for r in ret_child_infos)
- # end filter_rows_parent_anchor
-
- if count:
- return (True, len(filter_rows_parent_anchor()))
-
- children_fq_names_uuids = filter_rows_parent_anchor(sort=True)
-
- if back_ref_uuids:
- # go from anchor to backrefs
- obj_uuid_cf = self._obj_uuid_cf
- col_start = 'backref:config_node:'
- col_fin = 'backref:config_node;'
- try:
- obj_rows = obj_uuid_cf.multiget(back_ref_uuids,
- column_start = col_start,
- column_finish = col_fin,
- column_count = 10000000,
- include_timestamp = True)
- except pycassa.NotFoundException:
- if count:
- return (True, 0)
- else:
- return (True, children_fq_names_uuids)
-
- def filter_rows_backref_anchor():
- # flatten to [('<fqnstr>:<uuid>', (<val>,<ts>), *]
- all_cols = [cols for obj_key in obj_rows.keys() for cols in obj_rows[obj_key].items()]
- all_backref_infos = {}
- for col_name, col_val_ts in all_cols:
- # give chance for zk heartbeat/ping
- gevent.sleep(0)
- col_name_arr = col_name.split(':')
- fq_name = col_name_arr[:-1]
- obj_uuid = col_name_arr[-1]
- if obj_uuids and obj_uuid not in obj_uuids:
- continue
- all_backref_infos[obj_uuid] = \
- {'uuid': obj_uuid, 'fq_name': fq_name, 'tstamp': col_val_ts[1]}
-
- filter_cols = ['prop:%s' %(fname) for fname, _ in filter_fields]
- if filter_cols:
- filt_backref_infos = filter_rows(all_backref_infos, filter_cols, filter_fields)
- else: # no filter specified
- filt_backref_infos = all_backref_infos
-
- return [(br_info['fq_name'], br_info['uuid']) for br_info in filt_backref_infos.values()]
- # end filter_rows_backref_anchor
-
- if count:
- return (True, len(filter_rows_backref_anchor()))
-
- children_fq_names_uuids = filter_rows_backref_anchor()
-
- if not parent_uuids and not back_ref_uuids:
- obj_uuid_cf = self._obj_uuid_cf
- if obj_uuids:
- # exact objects specified
- def filter_rows_object_list():
- all_obj_infos = {}
- for obj_uuid in obj_uuids:
- all_obj_infos[obj_uuid] = None
-
- filter_cols = ['prop:%s' %(fname) for fname, _ in filter_fields]
- if filter_cols:
- filt_obj_infos = filter_rows(all_obj_infos, filter_cols, filter_fields)
- else: # no filters specified
- filt_obj_infos = all_obj_infos
-
- return get_fq_name_uuid_list(filt_obj_infos.keys())
- # end filter_rows_object_list
-
- if count:
- return (True, len(filter_rows_object_list()))
- children_fq_names_uuids = filter_rows_object_list()
-
- else: # grab all resources of this type
- obj_fq_name_cf = self._obj_fq_name_cf
- try:
- cols = obj_fq_name_cf.get('config_node', column_count = 10000000)
- except pycassa.NotFoundException:
- if count:
- return (True, 0)
- else:
- return (True, children_fq_names_uuids)
-
- def filter_rows_no_anchor():
- all_obj_infos = {}
- for col_name, col_val in cols.items():
- # give chance for zk heartbeat/ping
- gevent.sleep(0)
- col_name_arr = utils.decode_string(col_name).split(':')
- obj_uuid = col_name_arr[-1]
- all_obj_infos[obj_uuid] = (col_name_arr[:-1], obj_uuid)
-
- filter_cols = ['prop:%s' %(fname) for fname, _ in filter_fields]
- if filter_cols:
- filt_obj_infos = filter_rows(all_obj_infos, filter_cols, filter_fields)
- else: # no filters specified
- filt_obj_infos = all_obj_infos
-
- return filt_obj_infos.values()
- # end filter_rows_no_anchor
-
- if count:
- return (True, len(filter_rows_no_anchor()))
-
- children_fq_names_uuids = filter_rows_no_anchor()
-
- return (True, children_fq_names_uuids)
- #end _cassandra_config_node_list
-
- def _cassandra_config_node_delete(self, obj_uuid):
- obj_uuid_cf = self._obj_uuid_cf
- fq_name = json.loads(obj_uuid_cf.get(obj_uuid, columns = ['fq_name'])['fq_name'])
- bch = obj_uuid_cf.batch()
-
- # unlink from parent
- col_start = 'parent:'
- col_fin = 'parent;'
- col_name_iter = obj_uuid_cf.xget(obj_uuid, column_start = col_start, column_finish = col_fin)
- for (col_name, col_val) in col_name_iter:
- (_, parent_type, parent_uuid) = col_name.split(':')
- self._delete_child(bch, parent_type, parent_uuid, 'config_node', obj_uuid)
-
- # remove refs
- col_start = 'ref:'
- col_fin = 'ref;'
- col_name_iter = obj_uuid_cf.xget(obj_uuid, column_start = col_start, column_finish = col_fin)
- for (col_name, col_val) in col_name_iter:
- (_, ref_type, ref_uuid) = col_name.split(':')
- self._delete_ref(bch, 'config_node', obj_uuid, ref_type, ref_uuid)
-
- bch.remove(obj_uuid)
- bch.send()
-
- # Update fqname table
- fq_name_str = ':'.join(fq_name)
- fq_name_col = utils.encode_string(fq_name_str) + ':' + obj_uuid
- self._obj_fq_name_cf.remove('config_node', columns = [fq_name_col])
-
-
- return (True, '')
- #end _cassandra_config_node_delete
-
- def _cassandra_qos_queue_alloc(self, fq_name):
- return (True, '')
- #end _cassandra_qos_queue_alloc
-
- def _cassandra_qos_queue_create(self, obj_ids, obj_dict):
- # Gather column values for obj and updates to backrefs
- # in a batch and write it at the end
- bch = self._obj_uuid_cf.batch()
-
- obj_cols = {}
- obj_cols['fq_name'] = json.dumps(obj_dict['fq_name'])
- obj_cols['type'] = json.dumps('qos_queue')
- if 'parent_type' in obj_dict:
- # non config-root child
- parent_type = obj_dict['parent_type']
- parent_method_type = parent_type.replace('-', '_')
- parent_fq_name = obj_dict['fq_name'][:-1]
- obj_cols['parent_type'] = json.dumps(parent_type)
- parent_uuid = self.fq_name_to_uuid(parent_method_type, parent_fq_name)
- self._create_child(bch, parent_method_type, parent_uuid, 'qos_queue', obj_ids['uuid'])
-
- # Properties
- field = obj_dict.get('min_bandwidth', None)
- if field is not None:
- self._create_prop(bch, obj_ids['uuid'], 'min_bandwidth', field)
-
- field = obj_dict.get('max_bandwidth', None)
- if field is not None:
- self._create_prop(bch, obj_ids['uuid'], 'max_bandwidth', field)
-
- field = obj_dict.get('id_perms', None)
- if field is not None:
- field['created'] = datetime.datetime.utcnow().isoformat()
- field['last_modified'] = field['created']
- self._create_prop(bch, obj_ids['uuid'], 'id_perms', field)
-
- field = obj_dict.get('display_name', None)
- if field is not None:
- self._create_prop(bch, obj_ids['uuid'], 'display_name', field)
-
-
- # References
-
- bch.insert(obj_ids['uuid'], obj_cols)
- bch.send()
-
- # Update fqname table
- fq_name_str = ':'.join(obj_dict['fq_name'])
- fq_name_cols = {utils.encode_string(fq_name_str) + ':' + obj_ids['uuid']: json.dumps(None)}
- self._obj_fq_name_cf.insert('qos_queue', fq_name_cols)
-
- return (True, '')
- #end _cassandra_qos_queue_create
-
- def _cassandra_qos_queue_read(self, obj_uuids, field_names = None):
- # if field_names = None, all fields will be read/returned
-
- obj_uuid_cf = self._obj_uuid_cf
-
- # optimize for common case of reading non-backref, non-children fields
- # ignoring columns starting from 'b' and 'c' - significant performance
- # impact in scaled setting. e.g. read of project
- if (field_names is None or
- (set(field_names) & (QosQueue.backref_fields | QosQueue.children_fields))):
- # atleast one backref/children field is needed
- obj_rows = obj_uuid_cf.multiget(obj_uuids,
- column_count = 10000000,
- include_timestamp = True)
- else: # ignore reading backref + children columns
- obj_rows = obj_uuid_cf.multiget(obj_uuids,
- column_start = 'd',
- column_count = 10000000,
- include_timestamp = True)
-
- if (len(obj_uuids) == 1) and not obj_rows:
- raise cfgm_common.exceptions.NoIdError(obj_uuids[0])
-
- results = []
- for row_key in obj_rows:
- # give chance for zk heartbeat/ping
- gevent.sleep(0)
- obj_uuid = row_key
- obj_cols = obj_rows[obj_uuid]
- result = {}
- result['uuid'] = obj_uuid
- result['fq_name'] = json.loads(obj_cols['fq_name'][0])
- for col_name in obj_cols.keys():
- if self._re_match_parent.match(col_name):
- # non config-root child
- (_, _, parent_uuid) = col_name.split(':')
- parent_type = json.loads(obj_cols['parent_type'][0])
- result['parent_type'] = parent_type
- try:
- result['parent_uuid'] = parent_uuid
- result['parent_href'] = self._generate_url(parent_type, parent_uuid)
- except cfgm_common.exceptions.NoIdError:
- err_msg = 'Unknown uuid for parent ' + result['fq_name'][-2]
- return (False, err_msg)
-
- # TODO use compiled RE
- if self._re_match_prop.match(col_name):
- (_, prop_name) = col_name.split(':')
- result[prop_name] = json.loads(obj_cols[col_name][0])
-
- # TODO use compiled RE
- if self._re_match_children.match(col_name):
- (_, child_type, child_uuid) = col_name.split(':')
- if field_names and '%ss' %(child_type) not in field_names:
- continue
-
- child_tstamp = obj_cols[col_name][1]
- try:
- self._read_child(result, obj_uuid, child_type, child_uuid, child_tstamp)
- except cfgm_common.exceptions.NoIdError:
- continue
-
- # TODO use compiled RE
- if self._re_match_ref.match(col_name):
- (_, ref_type, ref_uuid) = col_name.split(':')
- self._read_ref(result, obj_uuid, ref_type, ref_uuid, obj_cols[col_name][0])
-
- if self._re_match_backref.match(col_name):
- (_, back_ref_type, back_ref_uuid) = col_name.split(':')
- if field_names and '%s_back_refs' %(back_ref_type) not in field_names:
- continue
-
- try:
- self._read_back_ref(result, obj_uuid, back_ref_type, back_ref_uuid,
- obj_cols[col_name][0])
- except cfgm_common.exceptions.NoIdError:
- continue
-
- # for all column names
-
-
- results.append(result)
- # end for all rows
-
- return (True, results)
- #end _cassandra_qos_queue_read
-
- def _cassandra_qos_queue_count_children(self, obj_uuid, child_type):
- # if child_type = None, return
- if child_type is None:
- return (False, '')
-
- obj_uuid_cf = self._obj_uuid_cf
- if child_type not in QosQueue.children_fields:
- return (False, '%s is not a valid children of QosQueue' %(child_type))
-
- col_start = 'children:'+child_type[:-1]+':'
- col_finish = 'children:'+child_type[:-1]+';'
- num_children = obj_uuid_cf.get_count(obj_uuid,
- column_start = col_start,
- column_finish = col_finish,
- max_count = 10000000)
- return (True, num_children)
- #end _cassandra_qos_queue_count_children
-
- def _cassandra_qos_queue_update(self, obj_uuid, new_obj_dict):
- # Grab ref-uuids and properties in new version
- new_ref_infos = {}
-
- new_props = {}
- if 'min_bandwidth' in new_obj_dict:
- new_props['min_bandwidth'] = new_obj_dict['min_bandwidth']
- if 'max_bandwidth' in new_obj_dict:
- new_props['max_bandwidth'] = new_obj_dict['max_bandwidth']
- if 'id_perms' in new_obj_dict:
- new_props['id_perms'] = new_obj_dict['id_perms']
- if 'display_name' in new_obj_dict:
- new_props['display_name'] = new_obj_dict['display_name']
- # Gather column values for obj and updates to backrefs
- # in a batch and write it at the end
- obj_uuid_cf = self._obj_uuid_cf
- obj_cols_iter = obj_uuid_cf.xget(obj_uuid)
- # TODO optimize this (converts tuple to dict)
- obj_cols = {}
- for col_info in obj_cols_iter:
- obj_cols[col_info[0]] = col_info[1]
-
- bch = obj_uuid_cf.batch()
- for col_name in obj_cols.keys():
- # TODO use compiled RE
- if re.match('prop:', col_name):
- (_, prop_name) = col_name.split(':')
- if prop_name == 'id_perms':
- # id-perms always has to be updated for last-mod timestamp
- # get it from request dict(or from db if not in request dict)
- new_id_perms = new_obj_dict.get(prop_name, json.loads(obj_cols[col_name]))
- self.update_last_modified(bch, obj_uuid, new_id_perms)
- elif prop_name in new_obj_dict:
- self._update_prop(bch, obj_uuid, prop_name, new_props)
-
- # TODO use compiled RE
- if re.match('ref:', col_name):
- (_, ref_type, ref_uuid) = col_name.split(':')
- self._update_ref(bch, 'qos_queue', obj_uuid, ref_type, ref_uuid, new_ref_infos)
- # for all column names
-
- # create new refs
- for ref_type in new_ref_infos.keys():
- for ref_uuid in new_ref_infos[ref_type].keys():
- ref_data = new_ref_infos[ref_type][ref_uuid]
- self._create_ref(bch, 'qos_queue', obj_uuid, ref_type, ref_uuid, ref_data)
-
- # create new props
- for prop_name in new_props.keys():
- self._create_prop(bch, obj_uuid, prop_name, new_props[prop_name])
-
- bch.send()
-
- return (True, '')
- #end _cassandra_qos_queue_update
-
- def _cassandra_qos_queue_list(self, parent_uuids=None, back_ref_uuids=None,
- obj_uuids=None, count=False, filters=None):
- children_fq_names_uuids = []
- if filters:
- fnames = filters.get('field_names', [])
- fvalues = filters.get('field_values', [])
- filter_fields = [(fnames[i], fvalues[i]) for i in range(len(fnames))]
- else:
- filter_fields = []
-
- def filter_rows(coll_infos, filter_cols, filter_params):
- filt_infos = {}
- coll_rows = obj_uuid_cf.multiget(coll_infos.keys(),
- columns=filter_cols,
- column_count=10000000)
- for row in coll_rows:
- # give chance for zk heartbeat/ping
- gevent.sleep(0)
- full_match = True
- for fname, fval in filter_params:
- if coll_rows[row]['prop:%s' %(fname)] != fval:
- full_match = False
- break
- if full_match:
- filt_infos[row] = coll_infos[row]
- return filt_infos
- # end filter_rows
-
- def get_fq_name_uuid_list(obj_uuids):
- ret_list = []
- for obj_uuid in obj_uuids:
- try:
- obj_fq_name = self.uuid_to_fq_name(obj_uuid)
- ret_list.append((obj_fq_name, obj_uuid))
- except cfgm_common.exceptions.NoIdError:
- pass
- return ret_list
- # end get_fq_name_uuid_list
-
- if parent_uuids:
- # go from parent to child
- obj_uuid_cf = self._obj_uuid_cf
- col_start = 'children:qos_queue:'
- col_fin = 'children:qos_queue;'
- try:
- obj_rows = obj_uuid_cf.multiget(parent_uuids,
- column_start = col_start,
- column_finish = col_fin,
- column_count = 10000000,
- include_timestamp = True)
- except pycassa.NotFoundException:
- if count:
- return (True, 0)
- else:
- return (True, children_fq_names_uuids)
-
- def filter_rows_parent_anchor(sort=False):
- # flatten to [('children:<type>:<uuid>', (<val>,<ts>), *]
- all_cols = [cols for obj_key in obj_rows.keys() for cols in obj_rows[obj_key].items()]
- all_child_infos = {}
- for col_name, col_val_ts in all_cols:
- # give chance for zk heartbeat/ping
- gevent.sleep(0)
- child_uuid = col_name.split(':')[2]
- if obj_uuids and child_uuid not in obj_uuids:
- continue
- all_child_infos[child_uuid] = {'uuid': child_uuid, 'tstamp': col_val_ts[1]}
-
- filter_cols = ['prop:%s' %(fname) for fname, _ in filter_fields]
- if filter_cols:
- filt_child_infos = filter_rows(all_child_infos, filter_cols, filter_fields)
- else: # no filter specified
- filt_child_infos = all_child_infos
-
- if not sort:
- ret_child_infos = filt_child_infos.values()
- else:
- ret_child_infos = sorted(filt_child_infos.values(), key=itemgetter('tstamp'))
-
- return get_fq_name_uuid_list(r['uuid'] for r in ret_child_infos)
- # end filter_rows_parent_anchor
-
- if count:
- return (True, len(filter_rows_parent_anchor()))
-
- children_fq_names_uuids = filter_rows_parent_anchor(sort=True)
-
- if back_ref_uuids:
- # go from anchor to backrefs
- obj_uuid_cf = self._obj_uuid_cf
- col_start = 'backref:qos_queue:'
- col_fin = 'backref:qos_queue;'
- try:
- obj_rows = obj_uuid_cf.multiget(back_ref_uuids,
- column_start = col_start,
- column_finish = col_fin,
- column_count = 10000000,
- include_timestamp = True)
- except pycassa.NotFoundException:
- if count:
- return (True, 0)
- else:
- return (True, children_fq_names_uuids)
-
- def filter_rows_backref_anchor():
- # flatten to [('<fqnstr>:<uuid>', (<val>,<ts>), *]
- all_cols = [cols for obj_key in obj_rows.keys() for cols in obj_rows[obj_key].items()]
- all_backref_infos = {}
- for col_name, col_val_ts in all_cols:
- # give chance for zk heartbeat/ping
- gevent.sleep(0)
- col_name_arr = col_name.split(':')
- fq_name = col_name_arr[:-1]
- obj_uuid = col_name_arr[-1]
- if obj_uuids and obj_uuid not in obj_uuids:
- continue
- all_backref_infos[obj_uuid] = \
- {'uuid': obj_uuid, 'fq_name': fq_name, 'tstamp': col_val_ts[1]}
-
- filter_cols = ['prop:%s' %(fname) for fname, _ in filter_fields]
- if filter_cols:
- filt_backref_infos = filter_rows(all_backref_infos, filter_cols, filter_fields)
- else: # no filter specified
- filt_backref_infos = all_backref_infos
-
- return [(br_info['fq_name'], br_info['uuid']) for br_info in filt_backref_infos.values()]
- # end filter_rows_backref_anchor
-
- if count:
- return (True, len(filter_rows_backref_anchor()))
-
- children_fq_names_uuids = filter_rows_backref_anchor()
-
- if not parent_uuids and not back_ref_uuids:
- obj_uuid_cf = self._obj_uuid_cf
- if obj_uuids:
- # exact objects specified
- def filter_rows_object_list():
- all_obj_infos = {}
- for obj_uuid in obj_uuids:
- all_obj_infos[obj_uuid] = None
-
- filter_cols = ['prop:%s' %(fname) for fname, _ in filter_fields]
- if filter_cols:
- filt_obj_infos = filter_rows(all_obj_infos, filter_cols, filter_fields)
- else: # no filters specified
- filt_obj_infos = all_obj_infos
-
- return get_fq_name_uuid_list(filt_obj_infos.keys())
- # end filter_rows_object_list
-
- if count:
- return (True, len(filter_rows_object_list()))
- children_fq_names_uuids = filter_rows_object_list()
-
- else: # grab all resources of this type
- obj_fq_name_cf = self._obj_fq_name_cf
- try:
- cols = obj_fq_name_cf.get('qos_queue', column_count = 10000000)
- except pycassa.NotFoundException:
- if count:
- return (True, 0)
- else:
- return (True, children_fq_names_uuids)
-
- def filter_rows_no_anchor():
- all_obj_infos = {}
- for col_name, col_val in cols.items():
- # give chance for zk heartbeat/ping
- gevent.sleep(0)
- col_name_arr = utils.decode_string(col_name).split(':')
- obj_uuid = col_name_arr[-1]
- all_obj_infos[obj_uuid] = (col_name_arr[:-1], obj_uuid)
-
- filter_cols = ['prop:%s' %(fname) for fname, _ in filter_fields]
- if filter_cols:
- filt_obj_infos = filter_rows(all_obj_infos, filter_cols, filter_fields)
- else: # no filters specified
- filt_obj_infos = all_obj_infos
-
- return filt_obj_infos.values()
- # end filter_rows_no_anchor
-
- if count:
- return (True, len(filter_rows_no_anchor()))
-
- children_fq_names_uuids = filter_rows_no_anchor()
-
- return (True, children_fq_names_uuids)
- #end _cassandra_qos_queue_list
-
- def _cassandra_qos_queue_delete(self, obj_uuid):
- obj_uuid_cf = self._obj_uuid_cf
- fq_name = json.loads(obj_uuid_cf.get(obj_uuid, columns = ['fq_name'])['fq_name'])
- bch = obj_uuid_cf.batch()
-
- # unlink from parent
- col_start = 'parent:'
- col_fin = 'parent;'
- col_name_iter = obj_uuid_cf.xget(obj_uuid, column_start = col_start, column_finish = col_fin)
- for (col_name, col_val) in col_name_iter:
- (_, parent_type, parent_uuid) = col_name.split(':')
- self._delete_child(bch, parent_type, parent_uuid, 'qos_queue', obj_uuid)
-
- # remove refs
- col_start = 'ref:'
- col_fin = 'ref;'
- col_name_iter = obj_uuid_cf.xget(obj_uuid, column_start = col_start, column_finish = col_fin)
- for (col_name, col_val) in col_name_iter:
- (_, ref_type, ref_uuid) = col_name.split(':')
- self._delete_ref(bch, 'qos_queue', obj_uuid, ref_type, ref_uuid)
-
- bch.remove(obj_uuid)
- bch.send()
-
- # Update fqname table
- fq_name_str = ':'.join(fq_name)
- fq_name_col = utils.encode_string(fq_name_str) + ':' + obj_uuid
- self._obj_fq_name_cf.remove('qos_queue', columns = [fq_name_col])
-
-
- return (True, '')
- #end _cassandra_qos_queue_delete
-
- def _cassandra_virtual_machine_alloc(self, fq_name):
- return (True, '')
- #end _cassandra_virtual_machine_alloc
-
- def _cassandra_virtual_machine_create(self, obj_ids, obj_dict):
- # Gather column values for obj and updates to backrefs
- # in a batch and write it at the end
- bch = self._obj_uuid_cf.batch()
-
- obj_cols = {}
- obj_cols['fq_name'] = json.dumps(obj_dict['fq_name'])
- obj_cols['type'] = json.dumps('virtual_machine')
-
- # Properties
- field = obj_dict.get('id_perms', None)
- if field is not None:
- field['created'] = datetime.datetime.utcnow().isoformat()
- field['last_modified'] = field['created']
- self._create_prop(bch, obj_ids['uuid'], 'id_perms', field)
-
- field = obj_dict.get('display_name', None)
- if field is not None:
- self._create_prop(bch, obj_ids['uuid'], 'display_name', field)
-
-
- # References
- refs = obj_dict.get('service_instance_refs', [])
- for ref in refs:
- ref_uuid = self.fq_name_to_uuid('service_instance', ref['to'])
- ref_attr = ref.get('attr', None)
- ref_data = {'attr': ref_attr, 'is_weakref': True}
- self._create_ref(bch, 'virtual_machine', obj_ids['uuid'], 'service_instance', ref_uuid, ref_data)
-
- bch.insert(obj_ids['uuid'], obj_cols)
- bch.send()
-
- # Update fqname table
- fq_name_str = ':'.join(obj_dict['fq_name'])
- fq_name_cols = {utils.encode_string(fq_name_str) + ':' + obj_ids['uuid']: json.dumps(None)}
- self._obj_fq_name_cf.insert('virtual_machine', fq_name_cols)
-
- return (True, '')
- #end _cassandra_virtual_machine_create
-
- def _cassandra_virtual_machine_read(self, obj_uuids, field_names = None):
- # if field_names = None, all fields will be read/returned
-
- obj_uuid_cf = self._obj_uuid_cf
-
- # optimize for common case of reading non-backref, non-children fields
- # ignoring columns starting from 'b' and 'c' - significant performance
- # impact in scaled setting. e.g. read of project
- if (field_names is None or
- (set(field_names) & (VirtualMachine.backref_fields | VirtualMachine.children_fields))):
- # atleast one backref/children field is needed
- obj_rows = obj_uuid_cf.multiget(obj_uuids,
- column_count = 10000000,
- include_timestamp = True)
- else: # ignore reading backref + children columns
- obj_rows = obj_uuid_cf.multiget(obj_uuids,
- column_start = 'd',
- column_count = 10000000,
- include_timestamp = True)
-
- if (len(obj_uuids) == 1) and not obj_rows:
- raise cfgm_common.exceptions.NoIdError(obj_uuids[0])
-
- results = []
- for row_key in obj_rows:
- # give chance for zk heartbeat/ping
- gevent.sleep(0)
- obj_uuid = row_key
- obj_cols = obj_rows[obj_uuid]
- result = {}
- result['uuid'] = obj_uuid
- result['fq_name'] = json.loads(obj_cols['fq_name'][0])
- for col_name in obj_cols.keys():
- if self._re_match_parent.match(col_name):
- # non config-root child
- (_, _, parent_uuid) = col_name.split(':')
- parent_type = json.loads(obj_cols['parent_type'][0])
- result['parent_type'] = parent_type
- try:
- result['parent_uuid'] = parent_uuid
- result['parent_href'] = self._generate_url(parent_type, parent_uuid)
- except cfgm_common.exceptions.NoIdError:
- err_msg = 'Unknown uuid for parent ' + result['fq_name'][-2]
- return (False, err_msg)
-
- # TODO use compiled RE
- if self._re_match_prop.match(col_name):
- (_, prop_name) = col_name.split(':')
- result[prop_name] = json.loads(obj_cols[col_name][0])
-
- # TODO use compiled RE
- if self._re_match_children.match(col_name):
- (_, child_type, child_uuid) = col_name.split(':')
- if field_names and '%ss' %(child_type) not in field_names:
- continue
-
- child_tstamp = obj_cols[col_name][1]
- try:
- self._read_child(result, obj_uuid, child_type, child_uuid, child_tstamp)
- except cfgm_common.exceptions.NoIdError:
- continue
-
- # TODO use compiled RE
- if self._re_match_ref.match(col_name):
- (_, ref_type, ref_uuid) = col_name.split(':')
- self._read_ref(result, obj_uuid, ref_type, ref_uuid, obj_cols[col_name][0])
-
- if self._re_match_backref.match(col_name):
- (_, back_ref_type, back_ref_uuid) = col_name.split(':')
- if field_names and '%s_back_refs' %(back_ref_type) not in field_names:
- continue
-
- try:
- self._read_back_ref(result, obj_uuid, back_ref_type, back_ref_uuid,
- obj_cols[col_name][0])
- except cfgm_common.exceptions.NoIdError:
- continue
-
- # for all column names
-
- if 'virtual_machine_interfaces' in result:
- # sort children; TODO do this based on schema
- sorted_children = sorted(result['virtual_machine_interfaces'], key = itemgetter('tstamp'))
- # re-write result's children without timestamp
- result['virtual_machine_interfaces'] = sorted_children
- [child.pop('tstamp') for child in result['virtual_machine_interfaces']]
-
- results.append(result)
- # end for all rows
-
- return (True, results)
- #end _cassandra_virtual_machine_read
-
- def _cassandra_virtual_machine_count_children(self, obj_uuid, child_type):
- # if child_type = None, return
- if child_type is None:
- return (False, '')
-
- obj_uuid_cf = self._obj_uuid_cf
- if child_type not in VirtualMachine.children_fields:
- return (False, '%s is not a valid children of VirtualMachine' %(child_type))
-
- col_start = 'children:'+child_type[:-1]+':'
- col_finish = 'children:'+child_type[:-1]+';'
- num_children = obj_uuid_cf.get_count(obj_uuid,
- column_start = col_start,
- column_finish = col_finish,
- max_count = 10000000)
- return (True, num_children)
- #end _cassandra_virtual_machine_count_children
-
- def _cassandra_virtual_machine_update(self, obj_uuid, new_obj_dict):
- # Grab ref-uuids and properties in new version
- new_ref_infos = {}
-
- if 'service_instance_refs' in new_obj_dict:
- new_ref_infos['service_instance'] = {}
- new_refs = new_obj_dict['service_instance_refs']
- if new_refs:
- for new_ref in new_refs:
- new_ref_uuid = self.fq_name_to_uuid('service_instance', new_ref['to'])
- new_ref_attr = new_ref.get('attr', None)
- new_ref_data = {'attr': new_ref_attr, 'is_weakref': True}
- new_ref_infos['service_instance'][new_ref_uuid] = new_ref_data
-
- new_props = {}
- if 'id_perms' in new_obj_dict:
- new_props['id_perms'] = new_obj_dict['id_perms']
- if 'display_name' in new_obj_dict:
- new_props['display_name'] = new_obj_dict['display_name']
- # Gather column values for obj and updates to backrefs
- # in a batch and write it at the end
- obj_uuid_cf = self._obj_uuid_cf
- obj_cols_iter = obj_uuid_cf.xget(obj_uuid)
- # TODO optimize this (converts tuple to dict)
- obj_cols = {}
- for col_info in obj_cols_iter:
- obj_cols[col_info[0]] = col_info[1]
-
- bch = obj_uuid_cf.batch()
- for col_name in obj_cols.keys():
- # TODO use compiled RE
- if re.match('prop:', col_name):
- (_, prop_name) = col_name.split(':')
- if prop_name == 'id_perms':
- # id-perms always has to be updated for last-mod timestamp
- # get it from request dict(or from db if not in request dict)
- new_id_perms = new_obj_dict.get(prop_name, json.loads(obj_cols[col_name]))
- self.update_last_modified(bch, obj_uuid, new_id_perms)
- elif prop_name in new_obj_dict:
- self._update_prop(bch, obj_uuid, prop_name, new_props)
-
- # TODO use compiled RE
- if re.match('ref:', col_name):
- (_, ref_type, ref_uuid) = col_name.split(':')
- self._update_ref(bch, 'virtual_machine', obj_uuid, ref_type, ref_uuid, new_ref_infos)
- # for all column names
-
- # create new refs
- for ref_type in new_ref_infos.keys():
- for ref_uuid in new_ref_infos[ref_type].keys():
- ref_data = new_ref_infos[ref_type][ref_uuid]
- self._create_ref(bch, 'virtual_machine', obj_uuid, ref_type, ref_uuid, ref_data)
-
- # create new props
- for prop_name in new_props.keys():
- self._create_prop(bch, obj_uuid, prop_name, new_props[prop_name])
-
- bch.send()
-
- return (True, '')
- #end _cassandra_virtual_machine_update
-
- def _cassandra_virtual_machine_list(self, parent_uuids=None, back_ref_uuids=None,
- obj_uuids=None, count=False, filters=None):
- children_fq_names_uuids = []
- if filters:
- fnames = filters.get('field_names', [])
- fvalues = filters.get('field_values', [])
- filter_fields = [(fnames[i], fvalues[i]) for i in range(len(fnames))]
- else:
- filter_fields = []
-
- def filter_rows(coll_infos, filter_cols, filter_params):
- filt_infos = {}
- coll_rows = obj_uuid_cf.multiget(coll_infos.keys(),
- columns=filter_cols,
- column_count=10000000)
- for row in coll_rows:
- # give chance for zk heartbeat/ping
- gevent.sleep(0)
- full_match = True
- for fname, fval in filter_params:
- if coll_rows[row]['prop:%s' %(fname)] != fval:
- full_match = False
- break
- if full_match:
- filt_infos[row] = coll_infos[row]
- return filt_infos
- # end filter_rows
-
- def get_fq_name_uuid_list(obj_uuids):
- ret_list = []
- for obj_uuid in obj_uuids:
- try:
- obj_fq_name = self.uuid_to_fq_name(obj_uuid)
- ret_list.append((obj_fq_name, obj_uuid))
- except cfgm_common.exceptions.NoIdError:
- pass
- return ret_list
- # end get_fq_name_uuid_list
-
- if parent_uuids:
- # go from parent to child
- obj_uuid_cf = self._obj_uuid_cf
- col_start = 'children:virtual_machine:'
- col_fin = 'children:virtual_machine;'
- try:
- obj_rows = obj_uuid_cf.multiget(parent_uuids,
- column_start = col_start,
- column_finish = col_fin,
- column_count = 10000000,
- include_timestamp = True)
- except pycassa.NotFoundException:
- if count:
- return (True, 0)
- else:
- return (True, children_fq_names_uuids)
-
- def filter_rows_parent_anchor(sort=False):
- # flatten to [('children:<type>:<uuid>', (<val>,<ts>), *]
- all_cols = [cols for obj_key in obj_rows.keys() for cols in obj_rows[obj_key].items()]
- all_child_infos = {}
- for col_name, col_val_ts in all_cols:
- # give chance for zk heartbeat/ping
- gevent.sleep(0)
- child_uuid = col_name.split(':')[2]
- if obj_uuids and child_uuid not in obj_uuids:
- continue
- all_child_infos[child_uuid] = {'uuid': child_uuid, 'tstamp': col_val_ts[1]}
-
- filter_cols = ['prop:%s' %(fname) for fname, _ in filter_fields]
- if filter_cols:
- filt_child_infos = filter_rows(all_child_infos, filter_cols, filter_fields)
- else: # no filter specified
- filt_child_infos = all_child_infos
-
- if not sort:
- ret_child_infos = filt_child_infos.values()
- else:
- ret_child_infos = sorted(filt_child_infos.values(), key=itemgetter('tstamp'))
-
- return get_fq_name_uuid_list(r['uuid'] for r in ret_child_infos)
- # end filter_rows_parent_anchor
-
- if count:
- return (True, len(filter_rows_parent_anchor()))
-
- children_fq_names_uuids = filter_rows_parent_anchor(sort=True)
-
- if back_ref_uuids:
- # go from anchor to backrefs
- obj_uuid_cf = self._obj_uuid_cf
- col_start = 'backref:virtual_machine:'
- col_fin = 'backref:virtual_machine;'
- try:
- obj_rows = obj_uuid_cf.multiget(back_ref_uuids,
- column_start = col_start,
- column_finish = col_fin,
- column_count = 10000000,
- include_timestamp = True)
- except pycassa.NotFoundException:
- if count:
- return (True, 0)
- else:
- return (True, children_fq_names_uuids)
-
- def filter_rows_backref_anchor():
- # flatten to [('<fqnstr>:<uuid>', (<val>,<ts>), *]
- all_cols = [cols for obj_key in obj_rows.keys() for cols in obj_rows[obj_key].items()]
- all_backref_infos = {}
- for col_name, col_val_ts in all_cols:
- # give chance for zk heartbeat/ping
- gevent.sleep(0)
- col_name_arr = col_name.split(':')
- fq_name = col_name_arr[:-1]
- obj_uuid = col_name_arr[-1]
- if obj_uuids and obj_uuid not in obj_uuids:
- continue
- all_backref_infos[obj_uuid] = \
- {'uuid': obj_uuid, 'fq_name': fq_name, 'tstamp': col_val_ts[1]}
-
- filter_cols = ['prop:%s' %(fname) for fname, _ in filter_fields]
- if filter_cols:
- filt_backref_infos = filter_rows(all_backref_infos, filter_cols, filter_fields)
- else: # no filter specified
- filt_backref_infos = all_backref_infos
-
- return [(br_info['fq_name'], br_info['uuid']) for br_info in filt_backref_infos.values()]
- # end filter_rows_backref_anchor
-
- if count:
- return (True, len(filter_rows_backref_anchor()))
-
- children_fq_names_uuids = filter_rows_backref_anchor()
-
- if not parent_uuids and not back_ref_uuids:
- obj_uuid_cf = self._obj_uuid_cf
- if obj_uuids:
- # exact objects specified
- def filter_rows_object_list():
- all_obj_infos = {}
- for obj_uuid in obj_uuids:
- all_obj_infos[obj_uuid] = None
-
- filter_cols = ['prop:%s' %(fname) for fname, _ in filter_fields]
- if filter_cols:
- filt_obj_infos = filter_rows(all_obj_infos, filter_cols, filter_fields)
- else: # no filters specified
- filt_obj_infos = all_obj_infos
-
- return get_fq_name_uuid_list(filt_obj_infos.keys())
- # end filter_rows_object_list
-
- if count:
- return (True, len(filter_rows_object_list()))
- children_fq_names_uuids = filter_rows_object_list()
-
- else: # grab all resources of this type
- obj_fq_name_cf = self._obj_fq_name_cf
- try:
- cols = obj_fq_name_cf.get('virtual_machine', column_count = 10000000)
- except pycassa.NotFoundException:
- if count:
- return (True, 0)
- else:
- return (True, children_fq_names_uuids)
-
- def filter_rows_no_anchor():
- all_obj_infos = {}
- for col_name, col_val in cols.items():
- # give chance for zk heartbeat/ping
- gevent.sleep(0)
- col_name_arr = utils.decode_string(col_name).split(':')
- obj_uuid = col_name_arr[-1]
- all_obj_infos[obj_uuid] = (col_name_arr[:-1], obj_uuid)
-
- filter_cols = ['prop:%s' %(fname) for fname, _ in filter_fields]
- if filter_cols:
- filt_obj_infos = filter_rows(all_obj_infos, filter_cols, filter_fields)
- else: # no filters specified
- filt_obj_infos = all_obj_infos
-
- return filt_obj_infos.values()
- # end filter_rows_no_anchor
-
- if count:
- return (True, len(filter_rows_no_anchor()))
-
- children_fq_names_uuids = filter_rows_no_anchor()
-
- return (True, children_fq_names_uuids)
- #end _cassandra_virtual_machine_list
-
- def _cassandra_virtual_machine_delete(self, obj_uuid):
- obj_uuid_cf = self._obj_uuid_cf
- fq_name = json.loads(obj_uuid_cf.get(obj_uuid, columns = ['fq_name'])['fq_name'])
- bch = obj_uuid_cf.batch()
-
- # unlink from parent
- col_start = 'parent:'
- col_fin = 'parent;'
- col_name_iter = obj_uuid_cf.xget(obj_uuid, column_start = col_start, column_finish = col_fin)
- for (col_name, col_val) in col_name_iter:
- (_, parent_type, parent_uuid) = col_name.split(':')
- self._delete_child(bch, parent_type, parent_uuid, 'virtual_machine', obj_uuid)
-
- # remove refs
- col_start = 'ref:'
- col_fin = 'ref;'
- col_name_iter = obj_uuid_cf.xget(obj_uuid, column_start = col_start, column_finish = col_fin)
- for (col_name, col_val) in col_name_iter:
- (_, ref_type, ref_uuid) = col_name.split(':')
- self._delete_ref(bch, 'virtual_machine', obj_uuid, ref_type, ref_uuid)
-
- bch.remove(obj_uuid)
- bch.send()
-
- # Update fqname table
- fq_name_str = ':'.join(fq_name)
- fq_name_col = utils.encode_string(fq_name_str) + ':' + obj_uuid
- self._obj_fq_name_cf.remove('virtual_machine', columns = [fq_name_col])
-
-
- return (True, '')
- #end _cassandra_virtual_machine_delete
-
- def _cassandra_interface_route_table_alloc(self, fq_name):
- return (True, '')
- #end _cassandra_interface_route_table_alloc
-
- def _cassandra_interface_route_table_create(self, obj_ids, obj_dict):
- # Gather column values for obj and updates to backrefs
- # in a batch and write it at the end
- bch = self._obj_uuid_cf.batch()
-
- obj_cols = {}
- obj_cols['fq_name'] = json.dumps(obj_dict['fq_name'])
- obj_cols['type'] = json.dumps('interface_route_table')
- if 'parent_type' in obj_dict:
- # non config-root child
- parent_type = obj_dict['parent_type']
- parent_method_type = parent_type.replace('-', '_')
- parent_fq_name = obj_dict['fq_name'][:-1]
- obj_cols['parent_type'] = json.dumps(parent_type)
- parent_uuid = self.fq_name_to_uuid(parent_method_type, parent_fq_name)
- self._create_child(bch, parent_method_type, parent_uuid, 'interface_route_table', obj_ids['uuid'])
-
- # Properties
- field = obj_dict.get('interface_route_table_routes', None)
- if field is not None:
- self._create_prop(bch, obj_ids['uuid'], 'interface_route_table_routes', field)
-
- field = obj_dict.get('id_perms', None)
- if field is not None:
- field['created'] = datetime.datetime.utcnow().isoformat()
- field['last_modified'] = field['created']
- self._create_prop(bch, obj_ids['uuid'], 'id_perms', field)
-
- field = obj_dict.get('display_name', None)
- if field is not None:
- self._create_prop(bch, obj_ids['uuid'], 'display_name', field)
-
-
- # References
-
- bch.insert(obj_ids['uuid'], obj_cols)
- bch.send()
-
- # Update fqname table
- fq_name_str = ':'.join(obj_dict['fq_name'])
- fq_name_cols = {utils.encode_string(fq_name_str) + ':' + obj_ids['uuid']: json.dumps(None)}
- self._obj_fq_name_cf.insert('interface_route_table', fq_name_cols)
-
- return (True, '')
- #end _cassandra_interface_route_table_create
-
- def _cassandra_interface_route_table_read(self, obj_uuids, field_names = None):
- # if field_names = None, all fields will be read/returned
-
- obj_uuid_cf = self._obj_uuid_cf
-
- # optimize for common case of reading non-backref, non-children fields
- # ignoring columns starting from 'b' and 'c' - significant performance
- # impact in scaled setting. e.g. read of project
- if (field_names is None or
- (set(field_names) & (InterfaceRouteTable.backref_fields | InterfaceRouteTable.children_fields))):
- # atleast one backref/children field is needed
- obj_rows = obj_uuid_cf.multiget(obj_uuids,
- column_count = 10000000,
- include_timestamp = True)
- else: # ignore reading backref + children columns
- obj_rows = obj_uuid_cf.multiget(obj_uuids,
- column_start = 'd',
- column_count = 10000000,
- include_timestamp = True)
-
- if (len(obj_uuids) == 1) and not obj_rows:
- raise cfgm_common.exceptions.NoIdError(obj_uuids[0])
-
- results = []
- for row_key in obj_rows:
- # give chance for zk heartbeat/ping
- gevent.sleep(0)
- obj_uuid = row_key
- obj_cols = obj_rows[obj_uuid]
- result = {}
- result['uuid'] = obj_uuid
- result['fq_name'] = json.loads(obj_cols['fq_name'][0])
- for col_name in obj_cols.keys():
- if self._re_match_parent.match(col_name):
- # non config-root child
- (_, _, parent_uuid) = col_name.split(':')
- parent_type = json.loads(obj_cols['parent_type'][0])
- result['parent_type'] = parent_type
- try:
- result['parent_uuid'] = parent_uuid
- result['parent_href'] = self._generate_url(parent_type, parent_uuid)
- except cfgm_common.exceptions.NoIdError:
- err_msg = 'Unknown uuid for parent ' + result['fq_name'][-2]
- return (False, err_msg)
-
- # TODO use compiled RE
- if self._re_match_prop.match(col_name):
- (_, prop_name) = col_name.split(':')
- result[prop_name] = json.loads(obj_cols[col_name][0])
-
- # TODO use compiled RE
- if self._re_match_children.match(col_name):
- (_, child_type, child_uuid) = col_name.split(':')
- if field_names and '%ss' %(child_type) not in field_names:
- continue
-
- child_tstamp = obj_cols[col_name][1]
- try:
- self._read_child(result, obj_uuid, child_type, child_uuid, child_tstamp)
- except cfgm_common.exceptions.NoIdError:
- continue
-
- # TODO use compiled RE
- if self._re_match_ref.match(col_name):
- (_, ref_type, ref_uuid) = col_name.split(':')
- self._read_ref(result, obj_uuid, ref_type, ref_uuid, obj_cols[col_name][0])
-
- if self._re_match_backref.match(col_name):
- (_, back_ref_type, back_ref_uuid) = col_name.split(':')
- if field_names and '%s_back_refs' %(back_ref_type) not in field_names:
- continue
-
- try:
- self._read_back_ref(result, obj_uuid, back_ref_type, back_ref_uuid,
- obj_cols[col_name][0])
- except cfgm_common.exceptions.NoIdError:
- continue
-
- # for all column names
-
-
- results.append(result)
- # end for all rows
-
- return (True, results)
- #end _cassandra_interface_route_table_read
-
- def _cassandra_interface_route_table_count_children(self, obj_uuid, child_type):
- # if child_type = None, return
- if child_type is None:
- return (False, '')
-
- obj_uuid_cf = self._obj_uuid_cf
- if child_type not in InterfaceRouteTable.children_fields:
- return (False, '%s is not a valid children of InterfaceRouteTable' %(child_type))
-
- col_start = 'children:'+child_type[:-1]+':'
- col_finish = 'children:'+child_type[:-1]+';'
- num_children = obj_uuid_cf.get_count(obj_uuid,
- column_start = col_start,
- column_finish = col_finish,
- max_count = 10000000)
- return (True, num_children)
- #end _cassandra_interface_route_table_count_children
-
- def _cassandra_interface_route_table_update(self, obj_uuid, new_obj_dict):
- # Grab ref-uuids and properties in new version
- new_ref_infos = {}
-
- new_props = {}
- if 'interface_route_table_routes' in new_obj_dict:
- new_props['interface_route_table_routes'] = new_obj_dict['interface_route_table_routes']
- if 'id_perms' in new_obj_dict:
- new_props['id_perms'] = new_obj_dict['id_perms']
- if 'display_name' in new_obj_dict:
- new_props['display_name'] = new_obj_dict['display_name']
- # Gather column values for obj and updates to backrefs
- # in a batch and write it at the end
- obj_uuid_cf = self._obj_uuid_cf
- obj_cols_iter = obj_uuid_cf.xget(obj_uuid)
- # TODO optimize this (converts tuple to dict)
- obj_cols = {}
- for col_info in obj_cols_iter:
- obj_cols[col_info[0]] = col_info[1]
-
- bch = obj_uuid_cf.batch()
- for col_name in obj_cols.keys():
- # TODO use compiled RE
- if re.match('prop:', col_name):
- (_, prop_name) = col_name.split(':')
- if prop_name == 'id_perms':
- # id-perms always has to be updated for last-mod timestamp
- # get it from request dict(or from db if not in request dict)
- new_id_perms = new_obj_dict.get(prop_name, json.loads(obj_cols[col_name]))
- self.update_last_modified(bch, obj_uuid, new_id_perms)
- elif prop_name in new_obj_dict:
- self._update_prop(bch, obj_uuid, prop_name, new_props)
-
- # TODO use compiled RE
- if re.match('ref:', col_name):
- (_, ref_type, ref_uuid) = col_name.split(':')
- self._update_ref(bch, 'interface_route_table', obj_uuid, ref_type, ref_uuid, new_ref_infos)
- # for all column names
-
- # create new refs
- for ref_type in new_ref_infos.keys():
- for ref_uuid in new_ref_infos[ref_type].keys():
- ref_data = new_ref_infos[ref_type][ref_uuid]
- self._create_ref(bch, 'interface_route_table', obj_uuid, ref_type, ref_uuid, ref_data)
-
- # create new props
- for prop_name in new_props.keys():
- self._create_prop(bch, obj_uuid, prop_name, new_props[prop_name])
-
- bch.send()
-
- return (True, '')
- #end _cassandra_interface_route_table_update
-
- def _cassandra_interface_route_table_list(self, parent_uuids=None, back_ref_uuids=None,
- obj_uuids=None, count=False, filters=None):
- children_fq_names_uuids = []
- if filters:
- fnames = filters.get('field_names', [])
- fvalues = filters.get('field_values', [])
- filter_fields = [(fnames[i], fvalues[i]) for i in range(len(fnames))]
- else:
- filter_fields = []
-
- def filter_rows(coll_infos, filter_cols, filter_params):
- filt_infos = {}
- coll_rows = obj_uuid_cf.multiget(coll_infos.keys(),
- columns=filter_cols,
- column_count=10000000)
- for row in coll_rows:
- # give chance for zk heartbeat/ping
- gevent.sleep(0)
- full_match = True
- for fname, fval in filter_params:
- if coll_rows[row]['prop:%s' %(fname)] != fval:
- full_match = False
- break
- if full_match:
- filt_infos[row] = coll_infos[row]
- return filt_infos
- # end filter_rows
-
- def get_fq_name_uuid_list(obj_uuids):
- ret_list = []
- for obj_uuid in obj_uuids:
- try:
- obj_fq_name = self.uuid_to_fq_name(obj_uuid)
- ret_list.append((obj_fq_name, obj_uuid))
- except cfgm_common.exceptions.NoIdError:
- pass
- return ret_list
- # end get_fq_name_uuid_list
-
- if parent_uuids:
- # go from parent to child
- obj_uuid_cf = self._obj_uuid_cf
- col_start = 'children:interface_route_table:'
- col_fin = 'children:interface_route_table;'
- try:
- obj_rows = obj_uuid_cf.multiget(parent_uuids,
- column_start = col_start,
- column_finish = col_fin,
- column_count = 10000000,
- include_timestamp = True)
- except pycassa.NotFoundException:
- if count:
- return (True, 0)
- else:
- return (True, children_fq_names_uuids)
-
- def filter_rows_parent_anchor(sort=False):
- # flatten to [('children:<type>:<uuid>', (<val>,<ts>), *]
- all_cols = [cols for obj_key in obj_rows.keys() for cols in obj_rows[obj_key].items()]
- all_child_infos = {}
- for col_name, col_val_ts in all_cols:
- # give chance for zk heartbeat/ping
- gevent.sleep(0)
- child_uuid = col_name.split(':')[2]
- if obj_uuids and child_uuid not in obj_uuids:
- continue
- all_child_infos[child_uuid] = {'uuid': child_uuid, 'tstamp': col_val_ts[1]}
-
- filter_cols = ['prop:%s' %(fname) for fname, _ in filter_fields]
- if filter_cols:
- filt_child_infos = filter_rows(all_child_infos, filter_cols, filter_fields)
- else: # no filter specified
- filt_child_infos = all_child_infos
-
- if not sort:
- ret_child_infos = filt_child_infos.values()
- else:
- ret_child_infos = sorted(filt_child_infos.values(), key=itemgetter('tstamp'))
-
- return get_fq_name_uuid_list(r['uuid'] for r in ret_child_infos)
- # end filter_rows_parent_anchor
-
- if count:
- return (True, len(filter_rows_parent_anchor()))
-
- children_fq_names_uuids = filter_rows_parent_anchor(sort=True)
-
- if back_ref_uuids:
- # go from anchor to backrefs
- obj_uuid_cf = self._obj_uuid_cf
- col_start = 'backref:interface_route_table:'
- col_fin = 'backref:interface_route_table;'
- try:
- obj_rows = obj_uuid_cf.multiget(back_ref_uuids,
- column_start = col_start,
- column_finish = col_fin,
- column_count = 10000000,
- include_timestamp = True)
- except pycassa.NotFoundException:
- if count:
- return (True, 0)
- else:
- return (True, children_fq_names_uuids)
-
- def filter_rows_backref_anchor():
- # flatten to [('<fqnstr>:<uuid>', (<val>,<ts>), *]
- all_cols = [cols for obj_key in obj_rows.keys() for cols in obj_rows[obj_key].items()]
- all_backref_infos = {}
- for col_name, col_val_ts in all_cols:
- # give chance for zk heartbeat/ping
- gevent.sleep(0)
- col_name_arr = col_name.split(':')
- fq_name = col_name_arr[:-1]
- obj_uuid = col_name_arr[-1]
- if obj_uuids and obj_uuid not in obj_uuids:
- continue
- all_backref_infos[obj_uuid] = \
- {'uuid': obj_uuid, 'fq_name': fq_name, 'tstamp': col_val_ts[1]}
-
- filter_cols = ['prop:%s' %(fname) for fname, _ in filter_fields]
- if filter_cols:
- filt_backref_infos = filter_rows(all_backref_infos, filter_cols, filter_fields)
- else: # no filter specified
- filt_backref_infos = all_backref_infos
-
- return [(br_info['fq_name'], br_info['uuid']) for br_info in filt_backref_infos.values()]
- # end filter_rows_backref_anchor
-
- if count:
- return (True, len(filter_rows_backref_anchor()))
-
- children_fq_names_uuids = filter_rows_backref_anchor()
-
- if not parent_uuids and not back_ref_uuids:
- obj_uuid_cf = self._obj_uuid_cf
- if obj_uuids:
- # exact objects specified
- def filter_rows_object_list():
- all_obj_infos = {}
- for obj_uuid in obj_uuids:
- all_obj_infos[obj_uuid] = None
-
- filter_cols = ['prop:%s' %(fname) for fname, _ in filter_fields]
- if filter_cols:
- filt_obj_infos = filter_rows(all_obj_infos, filter_cols, filter_fields)
- else: # no filters specified
- filt_obj_infos = all_obj_infos
-
- return get_fq_name_uuid_list(filt_obj_infos.keys())
- # end filter_rows_object_list
-
- if count:
- return (True, len(filter_rows_object_list()))
- children_fq_names_uuids = filter_rows_object_list()
-
- else: # grab all resources of this type
- obj_fq_name_cf = self._obj_fq_name_cf
- try:
- cols = obj_fq_name_cf.get('interface_route_table', column_count = 10000000)
- except pycassa.NotFoundException:
- if count:
- return (True, 0)
- else:
- return (True, children_fq_names_uuids)
-
- def filter_rows_no_anchor():
- all_obj_infos = {}
- for col_name, col_val in cols.items():
- # give chance for zk heartbeat/ping
- gevent.sleep(0)
- col_name_arr = utils.decode_string(col_name).split(':')
- obj_uuid = col_name_arr[-1]
- all_obj_infos[obj_uuid] = (col_name_arr[:-1], obj_uuid)
-
- filter_cols = ['prop:%s' %(fname) for fname, _ in filter_fields]
- if filter_cols:
- filt_obj_infos = filter_rows(all_obj_infos, filter_cols, filter_fields)
- else: # no filters specified
- filt_obj_infos = all_obj_infos
-
- return filt_obj_infos.values()
- # end filter_rows_no_anchor
-
- if count:
- return (True, len(filter_rows_no_anchor()))
-
- children_fq_names_uuids = filter_rows_no_anchor()
-
- return (True, children_fq_names_uuids)
- #end _cassandra_interface_route_table_list
-
- def _cassandra_interface_route_table_delete(self, obj_uuid):
- obj_uuid_cf = self._obj_uuid_cf
- fq_name = json.loads(obj_uuid_cf.get(obj_uuid, columns = ['fq_name'])['fq_name'])
- bch = obj_uuid_cf.batch()
-
- # unlink from parent
- col_start = 'parent:'
- col_fin = 'parent;'
- col_name_iter = obj_uuid_cf.xget(obj_uuid, column_start = col_start, column_finish = col_fin)
- for (col_name, col_val) in col_name_iter:
- (_, parent_type, parent_uuid) = col_name.split(':')
- self._delete_child(bch, parent_type, parent_uuid, 'interface_route_table', obj_uuid)
-
- # remove refs
- col_start = 'ref:'
- col_fin = 'ref;'
- col_name_iter = obj_uuid_cf.xget(obj_uuid, column_start = col_start, column_finish = col_fin)
- for (col_name, col_val) in col_name_iter:
- (_, ref_type, ref_uuid) = col_name.split(':')
- self._delete_ref(bch, 'interface_route_table', obj_uuid, ref_type, ref_uuid)
-
- bch.remove(obj_uuid)
- bch.send()
-
- # Update fqname table
- fq_name_str = ':'.join(fq_name)
- fq_name_col = utils.encode_string(fq_name_str) + ':' + obj_uuid
- self._obj_fq_name_cf.remove('interface_route_table', columns = [fq_name_col])
-
-
- return (True, '')
- #end _cassandra_interface_route_table_delete
-
- def _cassandra_service_template_alloc(self, fq_name):
- return (True, '')
- #end _cassandra_service_template_alloc
-
- def _cassandra_service_template_create(self, obj_ids, obj_dict):
- # Gather column values for obj and updates to backrefs
- # in a batch and write it at the end
- bch = self._obj_uuid_cf.batch()
-
- obj_cols = {}
- obj_cols['fq_name'] = json.dumps(obj_dict['fq_name'])
- obj_cols['type'] = json.dumps('service_template')
- if 'parent_type' in obj_dict:
- # non config-root child
- parent_type = obj_dict['parent_type']
- parent_method_type = parent_type.replace('-', '_')
- parent_fq_name = obj_dict['fq_name'][:-1]
- obj_cols['parent_type'] = json.dumps(parent_type)
- parent_uuid = self.fq_name_to_uuid(parent_method_type, parent_fq_name)
- self._create_child(bch, parent_method_type, parent_uuid, 'service_template', obj_ids['uuid'])
-
- # Properties
- field = obj_dict.get('service_template_properties', None)
- if field is not None:
- self._create_prop(bch, obj_ids['uuid'], 'service_template_properties', field)
-
- field = obj_dict.get('id_perms', None)
- if field is not None:
- field['created'] = datetime.datetime.utcnow().isoformat()
- field['last_modified'] = field['created']
- self._create_prop(bch, obj_ids['uuid'], 'id_perms', field)
-
- field = obj_dict.get('display_name', None)
- if field is not None:
- self._create_prop(bch, obj_ids['uuid'], 'display_name', field)
-
-
- # References
-
- bch.insert(obj_ids['uuid'], obj_cols)
- bch.send()
-
- # Update fqname table
- fq_name_str = ':'.join(obj_dict['fq_name'])
- fq_name_cols = {utils.encode_string(fq_name_str) + ':' + obj_ids['uuid']: json.dumps(None)}
- self._obj_fq_name_cf.insert('service_template', fq_name_cols)
-
- return (True, '')
- #end _cassandra_service_template_create
-
- def _cassandra_service_template_read(self, obj_uuids, field_names = None):
- # if field_names = None, all fields will be read/returned
-
- obj_uuid_cf = self._obj_uuid_cf
-
- # optimize for common case of reading non-backref, non-children fields
- # ignoring columns starting from 'b' and 'c' - significant performance
- # impact in scaled setting. e.g. read of project
- if (field_names is None or
- (set(field_names) & (ServiceTemplate.backref_fields | ServiceTemplate.children_fields))):
- # atleast one backref/children field is needed
- obj_rows = obj_uuid_cf.multiget(obj_uuids,
- column_count = 10000000,
- include_timestamp = True)
- else: # ignore reading backref + children columns
- obj_rows = obj_uuid_cf.multiget(obj_uuids,
- column_start = 'd',
- column_count = 10000000,
- include_timestamp = True)
-
- if (len(obj_uuids) == 1) and not obj_rows:
- raise cfgm_common.exceptions.NoIdError(obj_uuids[0])
-
- results = []
- for row_key in obj_rows:
- # give chance for zk heartbeat/ping
- gevent.sleep(0)
- obj_uuid = row_key
- obj_cols = obj_rows[obj_uuid]
- result = {}
- result['uuid'] = obj_uuid
- result['fq_name'] = json.loads(obj_cols['fq_name'][0])
- for col_name in obj_cols.keys():
- if self._re_match_parent.match(col_name):
- # non config-root child
- (_, _, parent_uuid) = col_name.split(':')
- parent_type = json.loads(obj_cols['parent_type'][0])
- result['parent_type'] = parent_type
- try:
- result['parent_uuid'] = parent_uuid
- result['parent_href'] = self._generate_url(parent_type, parent_uuid)
- except cfgm_common.exceptions.NoIdError:
- err_msg = 'Unknown uuid for parent ' + result['fq_name'][-2]
- return (False, err_msg)
-
- # TODO use compiled RE
- if self._re_match_prop.match(col_name):
- (_, prop_name) = col_name.split(':')
- result[prop_name] = json.loads(obj_cols[col_name][0])
-
- # TODO use compiled RE
- if self._re_match_children.match(col_name):
- (_, child_type, child_uuid) = col_name.split(':')
- if field_names and '%ss' %(child_type) not in field_names:
- continue
-
- child_tstamp = obj_cols[col_name][1]
- try:
- self._read_child(result, obj_uuid, child_type, child_uuid, child_tstamp)
- except cfgm_common.exceptions.NoIdError:
- continue
-
- # TODO use compiled RE
- if self._re_match_ref.match(col_name):
- (_, ref_type, ref_uuid) = col_name.split(':')
- self._read_ref(result, obj_uuid, ref_type, ref_uuid, obj_cols[col_name][0])
-
- if self._re_match_backref.match(col_name):
- (_, back_ref_type, back_ref_uuid) = col_name.split(':')
- if field_names and '%s_back_refs' %(back_ref_type) not in field_names:
- continue
-
- try:
- self._read_back_ref(result, obj_uuid, back_ref_type, back_ref_uuid,
- obj_cols[col_name][0])
- except cfgm_common.exceptions.NoIdError:
- continue
-
- # for all column names
-
-
- results.append(result)
- # end for all rows
-
- return (True, results)
- #end _cassandra_service_template_read
-
- def _cassandra_service_template_count_children(self, obj_uuid, child_type):
- # if child_type = None, return
- if child_type is None:
- return (False, '')
-
- obj_uuid_cf = self._obj_uuid_cf
- if child_type not in ServiceTemplate.children_fields:
- return (False, '%s is not a valid children of ServiceTemplate' %(child_type))
-
- col_start = 'children:'+child_type[:-1]+':'
- col_finish = 'children:'+child_type[:-1]+';'
- num_children = obj_uuid_cf.get_count(obj_uuid,
- column_start = col_start,
- column_finish = col_finish,
- max_count = 10000000)
- return (True, num_children)
- #end _cassandra_service_template_count_children
-
- def _cassandra_service_template_update(self, obj_uuid, new_obj_dict):
- # Grab ref-uuids and properties in new version
- new_ref_infos = {}
-
- new_props = {}
- if 'service_template_properties' in new_obj_dict:
- new_props['service_template_properties'] = new_obj_dict['service_template_properties']
- if 'id_perms' in new_obj_dict:
- new_props['id_perms'] = new_obj_dict['id_perms']
- if 'display_name' in new_obj_dict:
- new_props['display_name'] = new_obj_dict['display_name']
- # Gather column values for obj and updates to backrefs
- # in a batch and write it at the end
- obj_uuid_cf = self._obj_uuid_cf
- obj_cols_iter = obj_uuid_cf.xget(obj_uuid)
- # TODO optimize this (converts tuple to dict)
- obj_cols = {}
- for col_info in obj_cols_iter:
- obj_cols[col_info[0]] = col_info[1]
-
- bch = obj_uuid_cf.batch()
- for col_name in obj_cols.keys():
- # TODO use compiled RE
- if re.match('prop:', col_name):
- (_, prop_name) = col_name.split(':')
- if prop_name == 'id_perms':
- # id-perms always has to be updated for last-mod timestamp
- # get it from request dict(or from db if not in request dict)
- new_id_perms = new_obj_dict.get(prop_name, json.loads(obj_cols[col_name]))
- self.update_last_modified(bch, obj_uuid, new_id_perms)
- elif prop_name in new_obj_dict:
- self._update_prop(bch, obj_uuid, prop_name, new_props)
-
- # TODO use compiled RE
- if re.match('ref:', col_name):
- (_, ref_type, ref_uuid) = col_name.split(':')
- self._update_ref(bch, 'service_template', obj_uuid, ref_type, ref_uuid, new_ref_infos)
- # for all column names
-
- # create new refs
- for ref_type in new_ref_infos.keys():
- for ref_uuid in new_ref_infos[ref_type].keys():
- ref_data = new_ref_infos[ref_type][ref_uuid]
- self._create_ref(bch, 'service_template', obj_uuid, ref_type, ref_uuid, ref_data)
-
- # create new props
- for prop_name in new_props.keys():
- self._create_prop(bch, obj_uuid, prop_name, new_props[prop_name])
-
- bch.send()
-
- return (True, '')
- #end _cassandra_service_template_update
-
- def _cassandra_service_template_list(self, parent_uuids=None, back_ref_uuids=None,
- obj_uuids=None, count=False, filters=None):
- children_fq_names_uuids = []
- if filters:
- fnames = filters.get('field_names', [])
- fvalues = filters.get('field_values', [])
- filter_fields = [(fnames[i], fvalues[i]) for i in range(len(fnames))]
- else:
- filter_fields = []
-
- def filter_rows(coll_infos, filter_cols, filter_params):
- filt_infos = {}
- coll_rows = obj_uuid_cf.multiget(coll_infos.keys(),
- columns=filter_cols,
- column_count=10000000)
- for row in coll_rows:
- # give chance for zk heartbeat/ping
- gevent.sleep(0)
- full_match = True
- for fname, fval in filter_params:
- if coll_rows[row]['prop:%s' %(fname)] != fval:
- full_match = False
- break
- if full_match:
- filt_infos[row] = coll_infos[row]
- return filt_infos
- # end filter_rows
-
- def get_fq_name_uuid_list(obj_uuids):
- ret_list = []
- for obj_uuid in obj_uuids:
- try:
- obj_fq_name = self.uuid_to_fq_name(obj_uuid)
- ret_list.append((obj_fq_name, obj_uuid))
- except cfgm_common.exceptions.NoIdError:
- pass
- return ret_list
- # end get_fq_name_uuid_list
-
- if parent_uuids:
- # go from parent to child
- obj_uuid_cf = self._obj_uuid_cf
- col_start = 'children:service_template:'
- col_fin = 'children:service_template;'
- try:
- obj_rows = obj_uuid_cf.multiget(parent_uuids,
- column_start = col_start,
- column_finish = col_fin,
- column_count = 10000000,
- include_timestamp = True)
- except pycassa.NotFoundException:
- if count:
- return (True, 0)
- else:
- return (True, children_fq_names_uuids)
-
- def filter_rows_parent_anchor(sort=False):
- # flatten to [('children:<type>:<uuid>', (<val>,<ts>), *]
- all_cols = [cols for obj_key in obj_rows.keys() for cols in obj_rows[obj_key].items()]
- all_child_infos = {}
- for col_name, col_val_ts in all_cols:
- # give chance for zk heartbeat/ping
- gevent.sleep(0)
- child_uuid = col_name.split(':')[2]
- if obj_uuids and child_uuid not in obj_uuids:
- continue
- all_child_infos[child_uuid] = {'uuid': child_uuid, 'tstamp': col_val_ts[1]}
-
- filter_cols = ['prop:%s' %(fname) for fname, _ in filter_fields]
- if filter_cols:
- filt_child_infos = filter_rows(all_child_infos, filter_cols, filter_fields)
- else: # no filter specified
- filt_child_infos = all_child_infos
-
- if not sort:
- ret_child_infos = filt_child_infos.values()
- else:
- ret_child_infos = sorted(filt_child_infos.values(), key=itemgetter('tstamp'))
-
- return get_fq_name_uuid_list(r['uuid'] for r in ret_child_infos)
- # end filter_rows_parent_anchor
-
- if count:
- return (True, len(filter_rows_parent_anchor()))
-
- children_fq_names_uuids = filter_rows_parent_anchor(sort=True)
-
- if back_ref_uuids:
- # go from anchor to backrefs
- obj_uuid_cf = self._obj_uuid_cf
- col_start = 'backref:service_template:'
- col_fin = 'backref:service_template;'
- try:
- obj_rows = obj_uuid_cf.multiget(back_ref_uuids,
- column_start = col_start,
- column_finish = col_fin,
- column_count = 10000000,
- include_timestamp = True)
- except pycassa.NotFoundException:
- if count:
- return (True, 0)
- else:
- return (True, children_fq_names_uuids)
-
- def filter_rows_backref_anchor():
- # flatten to [('<fqnstr>:<uuid>', (<val>,<ts>), *]
- all_cols = [cols for obj_key in obj_rows.keys() for cols in obj_rows[obj_key].items()]
- all_backref_infos = {}
- for col_name, col_val_ts in all_cols:
- # give chance for zk heartbeat/ping
- gevent.sleep(0)
- col_name_arr = col_name.split(':')
- fq_name = col_name_arr[:-1]
- obj_uuid = col_name_arr[-1]
- if obj_uuids and obj_uuid not in obj_uuids:
- continue
- all_backref_infos[obj_uuid] = \
- {'uuid': obj_uuid, 'fq_name': fq_name, 'tstamp': col_val_ts[1]}
-
- filter_cols = ['prop:%s' %(fname) for fname, _ in filter_fields]
- if filter_cols:
- filt_backref_infos = filter_rows(all_backref_infos, filter_cols, filter_fields)
- else: # no filter specified
- filt_backref_infos = all_backref_infos
-
- return [(br_info['fq_name'], br_info['uuid']) for br_info in filt_backref_infos.values()]
- # end filter_rows_backref_anchor
-
- if count:
- return (True, len(filter_rows_backref_anchor()))
-
- children_fq_names_uuids = filter_rows_backref_anchor()
-
- if not parent_uuids and not back_ref_uuids:
- obj_uuid_cf = self._obj_uuid_cf
- if obj_uuids:
- # exact objects specified
- def filter_rows_object_list():
- all_obj_infos = {}
- for obj_uuid in obj_uuids:
- all_obj_infos[obj_uuid] = None
-
- filter_cols = ['prop:%s' %(fname) for fname, _ in filter_fields]
- if filter_cols:
- filt_obj_infos = filter_rows(all_obj_infos, filter_cols, filter_fields)
- else: # no filters specified
- filt_obj_infos = all_obj_infos
-
- return get_fq_name_uuid_list(filt_obj_infos.keys())
- # end filter_rows_object_list
-
- if count:
- return (True, len(filter_rows_object_list()))
- children_fq_names_uuids = filter_rows_object_list()
-
- else: # grab all resources of this type
- obj_fq_name_cf = self._obj_fq_name_cf
- try:
- cols = obj_fq_name_cf.get('service_template', column_count = 10000000)
- except pycassa.NotFoundException:
- if count:
- return (True, 0)
- else:
- return (True, children_fq_names_uuids)
-
- def filter_rows_no_anchor():
- all_obj_infos = {}
- for col_name, col_val in cols.items():
- # give chance for zk heartbeat/ping
- gevent.sleep(0)
- col_name_arr = utils.decode_string(col_name).split(':')
- obj_uuid = col_name_arr[-1]
- all_obj_infos[obj_uuid] = (col_name_arr[:-1], obj_uuid)
-
- filter_cols = ['prop:%s' %(fname) for fname, _ in filter_fields]
- if filter_cols:
- filt_obj_infos = filter_rows(all_obj_infos, filter_cols, filter_fields)
- else: # no filters specified
- filt_obj_infos = all_obj_infos
-
- return filt_obj_infos.values()
- # end filter_rows_no_anchor
-
- if count:
- return (True, len(filter_rows_no_anchor()))
-
- children_fq_names_uuids = filter_rows_no_anchor()
-
- return (True, children_fq_names_uuids)
- #end _cassandra_service_template_list
-
- def _cassandra_service_template_delete(self, obj_uuid):
- obj_uuid_cf = self._obj_uuid_cf
- fq_name = json.loads(obj_uuid_cf.get(obj_uuid, columns = ['fq_name'])['fq_name'])
- bch = obj_uuid_cf.batch()
-
- # unlink from parent
- col_start = 'parent:'
- col_fin = 'parent;'
- col_name_iter = obj_uuid_cf.xget(obj_uuid, column_start = col_start, column_finish = col_fin)
- for (col_name, col_val) in col_name_iter:
- (_, parent_type, parent_uuid) = col_name.split(':')
- self._delete_child(bch, parent_type, parent_uuid, 'service_template', obj_uuid)
-
- # remove refs
- col_start = 'ref:'
- col_fin = 'ref;'
- col_name_iter = obj_uuid_cf.xget(obj_uuid, column_start = col_start, column_finish = col_fin)
- for (col_name, col_val) in col_name_iter:
- (_, ref_type, ref_uuid) = col_name.split(':')
- self._delete_ref(bch, 'service_template', obj_uuid, ref_type, ref_uuid)
-
- bch.remove(obj_uuid)
- bch.send()
-
- # Update fqname table
- fq_name_str = ':'.join(fq_name)
- fq_name_col = utils.encode_string(fq_name_str) + ':' + obj_uuid
- self._obj_fq_name_cf.remove('service_template', columns = [fq_name_col])
-
-
- return (True, '')
- #end _cassandra_service_template_delete
-
- def _cassandra_virtual_ip_alloc(self, fq_name):
- return (True, '')
- #end _cassandra_virtual_ip_alloc
-
- def _cassandra_virtual_ip_create(self, obj_ids, obj_dict):
- # Gather column values for obj and updates to backrefs
- # in a batch and write it at the end
- bch = self._obj_uuid_cf.batch()
-
- obj_cols = {}
- obj_cols['fq_name'] = json.dumps(obj_dict['fq_name'])
- obj_cols['type'] = json.dumps('virtual_ip')
- if 'parent_type' in obj_dict:
- # non config-root child
- parent_type = obj_dict['parent_type']
- parent_method_type = parent_type.replace('-', '_')
- parent_fq_name = obj_dict['fq_name'][:-1]
- obj_cols['parent_type'] = json.dumps(parent_type)
- parent_uuid = self.fq_name_to_uuid(parent_method_type, parent_fq_name)
- self._create_child(bch, parent_method_type, parent_uuid, 'virtual_ip', obj_ids['uuid'])
-
- # Properties
- field = obj_dict.get('virtual_ip_properties', None)
- if field is not None:
- self._create_prop(bch, obj_ids['uuid'], 'virtual_ip_properties', field)
-
- field = obj_dict.get('id_perms', None)
- if field is not None:
- field['created'] = datetime.datetime.utcnow().isoformat()
- field['last_modified'] = field['created']
- self._create_prop(bch, obj_ids['uuid'], 'id_perms', field)
-
- field = obj_dict.get('display_name', None)
- if field is not None:
- self._create_prop(bch, obj_ids['uuid'], 'display_name', field)
-
-
- # References
- refs = obj_dict.get('loadbalancer_pool_refs', [])
- for ref in refs:
- ref_uuid = self.fq_name_to_uuid('loadbalancer_pool', ref['to'])
- ref_attr = ref.get('attr', None)
- ref_data = {'attr': ref_attr, 'is_weakref': False}
- self._create_ref(bch, 'virtual_ip', obj_ids['uuid'], 'loadbalancer_pool', ref_uuid, ref_data)
- refs = obj_dict.get('virtual_machine_interface_refs', [])
- for ref in refs:
- ref_uuid = self.fq_name_to_uuid('virtual_machine_interface', ref['to'])
- ref_attr = ref.get('attr', None)
- ref_data = {'attr': ref_attr, 'is_weakref': False}
- self._create_ref(bch, 'virtual_ip', obj_ids['uuid'], 'virtual_machine_interface', ref_uuid, ref_data)
-
- bch.insert(obj_ids['uuid'], obj_cols)
- bch.send()
-
- # Update fqname table
- fq_name_str = ':'.join(obj_dict['fq_name'])
- fq_name_cols = {utils.encode_string(fq_name_str) + ':' + obj_ids['uuid']: json.dumps(None)}
- self._obj_fq_name_cf.insert('virtual_ip', fq_name_cols)
-
- return (True, '')
- #end _cassandra_virtual_ip_create
-
- def _cassandra_virtual_ip_read(self, obj_uuids, field_names = None):
- # if field_names = None, all fields will be read/returned
-
- obj_uuid_cf = self._obj_uuid_cf
-
- # optimize for common case of reading non-backref, non-children fields
- # ignoring columns starting from 'b' and 'c' - significant performance
- # impact in scaled setting. e.g. read of project
- if (field_names is None or
- (set(field_names) & (VirtualIp.backref_fields | VirtualIp.children_fields))):
- # atleast one backref/children field is needed
- obj_rows = obj_uuid_cf.multiget(obj_uuids,
- column_count = 10000000,
- include_timestamp = True)
- else: # ignore reading backref + children columns
- obj_rows = obj_uuid_cf.multiget(obj_uuids,
- column_start = 'd',
- column_count = 10000000,
- include_timestamp = True)
-
- if (len(obj_uuids) == 1) and not obj_rows:
- raise cfgm_common.exceptions.NoIdError(obj_uuids[0])
-
- results = []
- for row_key in obj_rows:
- # give chance for zk heartbeat/ping
- gevent.sleep(0)
- obj_uuid = row_key
- obj_cols = obj_rows[obj_uuid]
- result = {}
- result['uuid'] = obj_uuid
- result['fq_name'] = json.loads(obj_cols['fq_name'][0])
- for col_name in obj_cols.keys():
- if self._re_match_parent.match(col_name):
- # non config-root child
- (_, _, parent_uuid) = col_name.split(':')
- parent_type = json.loads(obj_cols['parent_type'][0])
- result['parent_type'] = parent_type
- try:
- result['parent_uuid'] = parent_uuid
- result['parent_href'] = self._generate_url(parent_type, parent_uuid)
- except cfgm_common.exceptions.NoIdError:
- err_msg = 'Unknown uuid for parent ' + result['fq_name'][-2]
- return (False, err_msg)
-
- # TODO use compiled RE
- if self._re_match_prop.match(col_name):
- (_, prop_name) = col_name.split(':')
- result[prop_name] = json.loads(obj_cols[col_name][0])
-
- # TODO use compiled RE
- if self._re_match_children.match(col_name):
- (_, child_type, child_uuid) = col_name.split(':')
- if field_names and '%ss' %(child_type) not in field_names:
- continue
-
- child_tstamp = obj_cols[col_name][1]
- try:
- self._read_child(result, obj_uuid, child_type, child_uuid, child_tstamp)
- except cfgm_common.exceptions.NoIdError:
- continue
-
- # TODO use compiled RE
- if self._re_match_ref.match(col_name):
- (_, ref_type, ref_uuid) = col_name.split(':')
- self._read_ref(result, obj_uuid, ref_type, ref_uuid, obj_cols[col_name][0])
-
- if self._re_match_backref.match(col_name):
- (_, back_ref_type, back_ref_uuid) = col_name.split(':')
- if field_names and '%s_back_refs' %(back_ref_type) not in field_names:
- continue
-
- try:
- self._read_back_ref(result, obj_uuid, back_ref_type, back_ref_uuid,
- obj_cols[col_name][0])
- except cfgm_common.exceptions.NoIdError:
- continue
-
- # for all column names
-
-
- results.append(result)
- # end for all rows
-
- return (True, results)
- #end _cassandra_virtual_ip_read
-
- def _cassandra_virtual_ip_count_children(self, obj_uuid, child_type):
- # if child_type = None, return
- if child_type is None:
- return (False, '')
-
- obj_uuid_cf = self._obj_uuid_cf
- if child_type not in VirtualIp.children_fields:
- return (False, '%s is not a valid children of VirtualIp' %(child_type))
-
- col_start = 'children:'+child_type[:-1]+':'
- col_finish = 'children:'+child_type[:-1]+';'
- num_children = obj_uuid_cf.get_count(obj_uuid,
- column_start = col_start,
- column_finish = col_finish,
- max_count = 10000000)
- return (True, num_children)
- #end _cassandra_virtual_ip_count_children
-
- def _cassandra_virtual_ip_update(self, obj_uuid, new_obj_dict):
- # Grab ref-uuids and properties in new version
- new_ref_infos = {}
-
- if 'loadbalancer_pool_refs' in new_obj_dict:
- new_ref_infos['loadbalancer_pool'] = {}
- new_refs = new_obj_dict['loadbalancer_pool_refs']
- if new_refs:
- for new_ref in new_refs:
- new_ref_uuid = self.fq_name_to_uuid('loadbalancer_pool', new_ref['to'])
- new_ref_attr = new_ref.get('attr', None)
- new_ref_data = {'attr': new_ref_attr, 'is_weakref': False}
- new_ref_infos['loadbalancer_pool'][new_ref_uuid] = new_ref_data
-
- if 'virtual_machine_interface_refs' in new_obj_dict:
- new_ref_infos['virtual_machine_interface'] = {}
- new_refs = new_obj_dict['virtual_machine_interface_refs']
- if new_refs:
- for new_ref in new_refs:
- new_ref_uuid = self.fq_name_to_uuid('virtual_machine_interface', new_ref['to'])
- new_ref_attr = new_ref.get('attr', None)
- new_ref_data = {'attr': new_ref_attr, 'is_weakref': False}
- new_ref_infos['virtual_machine_interface'][new_ref_uuid] = new_ref_data
-
- new_props = {}
- if 'virtual_ip_properties' in new_obj_dict:
- new_props['virtual_ip_properties'] = new_obj_dict['virtual_ip_properties']
- if 'id_perms' in new_obj_dict:
- new_props['id_perms'] = new_obj_dict['id_perms']
- if 'display_name' in new_obj_dict:
- new_props['display_name'] = new_obj_dict['display_name']
- # Gather column values for obj and updates to backrefs
- # in a batch and write it at the end
- obj_uuid_cf = self._obj_uuid_cf
- obj_cols_iter = obj_uuid_cf.xget(obj_uuid)
- # TODO optimize this (converts tuple to dict)
- obj_cols = {}
- for col_info in obj_cols_iter:
- obj_cols[col_info[0]] = col_info[1]
-
- bch = obj_uuid_cf.batch()
- for col_name in obj_cols.keys():
- # TODO use compiled RE
- if re.match('prop:', col_name):
- (_, prop_name) = col_name.split(':')
- if prop_name == 'id_perms':
- # id-perms always has to be updated for last-mod timestamp
- # get it from request dict(or from db if not in request dict)
- new_id_perms = new_obj_dict.get(prop_name, json.loads(obj_cols[col_name]))
- self.update_last_modified(bch, obj_uuid, new_id_perms)
- elif prop_name in new_obj_dict:
- self._update_prop(bch, obj_uuid, prop_name, new_props)
-
- # TODO use compiled RE
- if re.match('ref:', col_name):
- (_, ref_type, ref_uuid) = col_name.split(':')
- self._update_ref(bch, 'virtual_ip', obj_uuid, ref_type, ref_uuid, new_ref_infos)
- # for all column names
-
- # create new refs
- for ref_type in new_ref_infos.keys():
- for ref_uuid in new_ref_infos[ref_type].keys():
- ref_data = new_ref_infos[ref_type][ref_uuid]
- self._create_ref(bch, 'virtual_ip', obj_uuid, ref_type, ref_uuid, ref_data)
-
- # create new props
- for prop_name in new_props.keys():
- self._create_prop(bch, obj_uuid, prop_name, new_props[prop_name])
-
- bch.send()
-
- return (True, '')
- #end _cassandra_virtual_ip_update
-
- def _cassandra_virtual_ip_list(self, parent_uuids=None, back_ref_uuids=None,
- obj_uuids=None, count=False, filters=None):
- children_fq_names_uuids = []
- if filters:
- fnames = filters.get('field_names', [])
- fvalues = filters.get('field_values', [])
- filter_fields = [(fnames[i], fvalues[i]) for i in range(len(fnames))]
- else:
- filter_fields = []
-
- def filter_rows(coll_infos, filter_cols, filter_params):
- filt_infos = {}
- coll_rows = obj_uuid_cf.multiget(coll_infos.keys(),
- columns=filter_cols,
- column_count=10000000)
- for row in coll_rows:
- # give chance for zk heartbeat/ping
- gevent.sleep(0)
- full_match = True
- for fname, fval in filter_params:
- if coll_rows[row]['prop:%s' %(fname)] != fval:
- full_match = False
- break
- if full_match:
- filt_infos[row] = coll_infos[row]
- return filt_infos
- # end filter_rows
-
- def get_fq_name_uuid_list(obj_uuids):
- ret_list = []
- for obj_uuid in obj_uuids:
- try:
- obj_fq_name = self.uuid_to_fq_name(obj_uuid)
- ret_list.append((obj_fq_name, obj_uuid))
- except cfgm_common.exceptions.NoIdError:
- pass
- return ret_list
- # end get_fq_name_uuid_list
-
- if parent_uuids:
- # go from parent to child
- obj_uuid_cf = self._obj_uuid_cf
- col_start = 'children:virtual_ip:'
- col_fin = 'children:virtual_ip;'
- try:
- obj_rows = obj_uuid_cf.multiget(parent_uuids,
- column_start = col_start,
- column_finish = col_fin,
- column_count = 10000000,
- include_timestamp = True)
- except pycassa.NotFoundException:
- if count:
- return (True, 0)
- else:
- return (True, children_fq_names_uuids)
-
- def filter_rows_parent_anchor(sort=False):
- # flatten to [('children:<type>:<uuid>', (<val>,<ts>), *]
- all_cols = [cols for obj_key in obj_rows.keys() for cols in obj_rows[obj_key].items()]
- all_child_infos = {}
- for col_name, col_val_ts in all_cols:
- # give chance for zk heartbeat/ping
- gevent.sleep(0)
- child_uuid = col_name.split(':')[2]
- if obj_uuids and child_uuid not in obj_uuids:
- continue
- all_child_infos[child_uuid] = {'uuid': child_uuid, 'tstamp': col_val_ts[1]}
-
- filter_cols = ['prop:%s' %(fname) for fname, _ in filter_fields]
- if filter_cols:
- filt_child_infos = filter_rows(all_child_infos, filter_cols, filter_fields)
- else: # no filter specified
- filt_child_infos = all_child_infos
-
- if not sort:
- ret_child_infos = filt_child_infos.values()
- else:
- ret_child_infos = sorted(filt_child_infos.values(), key=itemgetter('tstamp'))
-
- return get_fq_name_uuid_list(r['uuid'] for r in ret_child_infos)
- # end filter_rows_parent_anchor
-
- if count:
- return (True, len(filter_rows_parent_anchor()))
-
- children_fq_names_uuids = filter_rows_parent_anchor(sort=True)
-
- if back_ref_uuids:
- # go from anchor to backrefs
- obj_uuid_cf = self._obj_uuid_cf
- col_start = 'backref:virtual_ip:'
- col_fin = 'backref:virtual_ip;'
- try:
- obj_rows = obj_uuid_cf.multiget(back_ref_uuids,
- column_start = col_start,
- column_finish = col_fin,
- column_count = 10000000,
- include_timestamp = True)
- except pycassa.NotFoundException:
- if count:
- return (True, 0)
- else:
- return (True, children_fq_names_uuids)
-
- def filter_rows_backref_anchor():
- # flatten to [('<fqnstr>:<uuid>', (<val>,<ts>), *]
- all_cols = [cols for obj_key in obj_rows.keys() for cols in obj_rows[obj_key].items()]
- all_backref_infos = {}
- for col_name, col_val_ts in all_cols:
- # give chance for zk heartbeat/ping
- gevent.sleep(0)
- col_name_arr = col_name.split(':')
- fq_name = col_name_arr[:-1]
- obj_uuid = col_name_arr[-1]
- if obj_uuids and obj_uuid not in obj_uuids:
- continue
- all_backref_infos[obj_uuid] = \
- {'uuid': obj_uuid, 'fq_name': fq_name, 'tstamp': col_val_ts[1]}
-
- filter_cols = ['prop:%s' %(fname) for fname, _ in filter_fields]
- if filter_cols:
- filt_backref_infos = filter_rows(all_backref_infos, filter_cols, filter_fields)
- else: # no filter specified
- filt_backref_infos = all_backref_infos
-
- return [(br_info['fq_name'], br_info['uuid']) for br_info in filt_backref_infos.values()]
- # end filter_rows_backref_anchor
-
- if count:
- return (True, len(filter_rows_backref_anchor()))
-
- children_fq_names_uuids = filter_rows_backref_anchor()
-
- if not parent_uuids and not back_ref_uuids:
- obj_uuid_cf = self._obj_uuid_cf
- if obj_uuids:
- # exact objects specified
- def filter_rows_object_list():
- all_obj_infos = {}
- for obj_uuid in obj_uuids:
- all_obj_infos[obj_uuid] = None
-
- filter_cols = ['prop:%s' %(fname) for fname, _ in filter_fields]
- if filter_cols:
- filt_obj_infos = filter_rows(all_obj_infos, filter_cols, filter_fields)
- else: # no filters specified
- filt_obj_infos = all_obj_infos
-
- return get_fq_name_uuid_list(filt_obj_infos.keys())
- # end filter_rows_object_list
-
- if count:
- return (True, len(filter_rows_object_list()))
- children_fq_names_uuids = filter_rows_object_list()
-
- else: # grab all resources of this type
- obj_fq_name_cf = self._obj_fq_name_cf
- try:
- cols = obj_fq_name_cf.get('virtual_ip', column_count = 10000000)
- except pycassa.NotFoundException:
- if count:
- return (True, 0)
- else:
- return (True, children_fq_names_uuids)
-
- def filter_rows_no_anchor():
- all_obj_infos = {}
- for col_name, col_val in cols.items():
- # give chance for zk heartbeat/ping
- gevent.sleep(0)
- col_name_arr = utils.decode_string(col_name).split(':')
- obj_uuid = col_name_arr[-1]
- all_obj_infos[obj_uuid] = (col_name_arr[:-1], obj_uuid)
-
- filter_cols = ['prop:%s' %(fname) for fname, _ in filter_fields]
- if filter_cols:
- filt_obj_infos = filter_rows(all_obj_infos, filter_cols, filter_fields)
- else: # no filters specified
- filt_obj_infos = all_obj_infos
-
- return filt_obj_infos.values()
- # end filter_rows_no_anchor
-
- if count:
- return (True, len(filter_rows_no_anchor()))
-
- children_fq_names_uuids = filter_rows_no_anchor()
-
- return (True, children_fq_names_uuids)
- #end _cassandra_virtual_ip_list
-
- def _cassandra_virtual_ip_delete(self, obj_uuid):
- obj_uuid_cf = self._obj_uuid_cf
- fq_name = json.loads(obj_uuid_cf.get(obj_uuid, columns = ['fq_name'])['fq_name'])
- bch = obj_uuid_cf.batch()
-
- # unlink from parent
- col_start = 'parent:'
- col_fin = 'parent;'
- col_name_iter = obj_uuid_cf.xget(obj_uuid, column_start = col_start, column_finish = col_fin)
- for (col_name, col_val) in col_name_iter:
- (_, parent_type, parent_uuid) = col_name.split(':')
- self._delete_child(bch, parent_type, parent_uuid, 'virtual_ip', obj_uuid)
-
- # remove refs
- col_start = 'ref:'
- col_fin = 'ref;'
- col_name_iter = obj_uuid_cf.xget(obj_uuid, column_start = col_start, column_finish = col_fin)
- for (col_name, col_val) in col_name_iter:
- (_, ref_type, ref_uuid) = col_name.split(':')
- self._delete_ref(bch, 'virtual_ip', obj_uuid, ref_type, ref_uuid)
-
- bch.remove(obj_uuid)
- bch.send()
-
- # Update fqname table
- fq_name_str = ':'.join(fq_name)
- fq_name_col = utils.encode_string(fq_name_str) + ':' + obj_uuid
- self._obj_fq_name_cf.remove('virtual_ip', columns = [fq_name_col])
-
-
- return (True, '')
- #end _cassandra_virtual_ip_delete
-
- def _cassandra_loadbalancer_member_alloc(self, fq_name):
- return (True, '')
- #end _cassandra_loadbalancer_member_alloc
-
- def _cassandra_loadbalancer_member_create(self, obj_ids, obj_dict):
- # Gather column values for obj and updates to backrefs
- # in a batch and write it at the end
- bch = self._obj_uuid_cf.batch()
-
- obj_cols = {}
- obj_cols['fq_name'] = json.dumps(obj_dict['fq_name'])
- obj_cols['type'] = json.dumps('loadbalancer_member')
- if 'parent_type' in obj_dict:
- # non config-root child
- parent_type = obj_dict['parent_type']
- parent_method_type = parent_type.replace('-', '_')
- parent_fq_name = obj_dict['fq_name'][:-1]
- obj_cols['parent_type'] = json.dumps(parent_type)
- parent_uuid = self.fq_name_to_uuid(parent_method_type, parent_fq_name)
- self._create_child(bch, parent_method_type, parent_uuid, 'loadbalancer_member', obj_ids['uuid'])
-
- # Properties
- field = obj_dict.get('loadbalancer_member_properties', None)
- if field is not None:
- self._create_prop(bch, obj_ids['uuid'], 'loadbalancer_member_properties', field)
-
- field = obj_dict.get('id_perms', None)
- if field is not None:
- field['created'] = datetime.datetime.utcnow().isoformat()
- field['last_modified'] = field['created']
- self._create_prop(bch, obj_ids['uuid'], 'id_perms', field)
-
- field = obj_dict.get('display_name', None)
- if field is not None:
- self._create_prop(bch, obj_ids['uuid'], 'display_name', field)
-
-
- # References
-
- bch.insert(obj_ids['uuid'], obj_cols)
- bch.send()
-
- # Update fqname table
- fq_name_str = ':'.join(obj_dict['fq_name'])
- fq_name_cols = {utils.encode_string(fq_name_str) + ':' + obj_ids['uuid']: json.dumps(None)}
- self._obj_fq_name_cf.insert('loadbalancer_member', fq_name_cols)
-
- return (True, '')
- #end _cassandra_loadbalancer_member_create
-
- def _cassandra_loadbalancer_member_read(self, obj_uuids, field_names = None):
- # if field_names = None, all fields will be read/returned
-
- obj_uuid_cf = self._obj_uuid_cf
-
- # optimize for common case of reading non-backref, non-children fields
- # ignoring columns starting from 'b' and 'c' - significant performance
- # impact in scaled setting. e.g. read of project
- if (field_names is None or
- (set(field_names) & (LoadbalancerMember.backref_fields | LoadbalancerMember.children_fields))):
- # atleast one backref/children field is needed
- obj_rows = obj_uuid_cf.multiget(obj_uuids,
- column_count = 10000000,
- include_timestamp = True)
- else: # ignore reading backref + children columns
- obj_rows = obj_uuid_cf.multiget(obj_uuids,
- column_start = 'd',
- column_count = 10000000,
- include_timestamp = True)
-
- if (len(obj_uuids) == 1) and not obj_rows:
- raise cfgm_common.exceptions.NoIdError(obj_uuids[0])
-
- results = []
- for row_key in obj_rows:
- # give chance for zk heartbeat/ping
- gevent.sleep(0)
- obj_uuid = row_key
- obj_cols = obj_rows[obj_uuid]
- result = {}
- result['uuid'] = obj_uuid
- result['fq_name'] = json.loads(obj_cols['fq_name'][0])
- for col_name in obj_cols.keys():
- if self._re_match_parent.match(col_name):
- # non config-root child
- (_, _, parent_uuid) = col_name.split(':')
- parent_type = json.loads(obj_cols['parent_type'][0])
- result['parent_type'] = parent_type
- try:
- result['parent_uuid'] = parent_uuid
- result['parent_href'] = self._generate_url(parent_type, parent_uuid)
- except cfgm_common.exceptions.NoIdError:
- err_msg = 'Unknown uuid for parent ' + result['fq_name'][-2]
- return (False, err_msg)
-
- # TODO use compiled RE
- if self._re_match_prop.match(col_name):
- (_, prop_name) = col_name.split(':')
- result[prop_name] = json.loads(obj_cols[col_name][0])
-
- # TODO use compiled RE
- if self._re_match_children.match(col_name):
- (_, child_type, child_uuid) = col_name.split(':')
- if field_names and '%ss' %(child_type) not in field_names:
- continue
-
- child_tstamp = obj_cols[col_name][1]
- try:
- self._read_child(result, obj_uuid, child_type, child_uuid, child_tstamp)
- except cfgm_common.exceptions.NoIdError:
- continue
-
- # TODO use compiled RE
- if self._re_match_ref.match(col_name):
- (_, ref_type, ref_uuid) = col_name.split(':')
- self._read_ref(result, obj_uuid, ref_type, ref_uuid, obj_cols[col_name][0])
-
- if self._re_match_backref.match(col_name):
- (_, back_ref_type, back_ref_uuid) = col_name.split(':')
- if field_names and '%s_back_refs' %(back_ref_type) not in field_names:
- continue
-
- try:
- self._read_back_ref(result, obj_uuid, back_ref_type, back_ref_uuid,
- obj_cols[col_name][0])
- except cfgm_common.exceptions.NoIdError:
- continue
-
- # for all column names
-
-
- results.append(result)
- # end for all rows
-
- return (True, results)
- #end _cassandra_loadbalancer_member_read
-
- def _cassandra_loadbalancer_member_count_children(self, obj_uuid, child_type):
- # if child_type = None, return
- if child_type is None:
- return (False, '')
-
- obj_uuid_cf = self._obj_uuid_cf
- if child_type not in LoadbalancerMember.children_fields:
- return (False, '%s is not a valid children of LoadbalancerMember' %(child_type))
-
- col_start = 'children:'+child_type[:-1]+':'
- col_finish = 'children:'+child_type[:-1]+';'
- num_children = obj_uuid_cf.get_count(obj_uuid,
- column_start = col_start,
- column_finish = col_finish,
- max_count = 10000000)
- return (True, num_children)
- #end _cassandra_loadbalancer_member_count_children
-
- def _cassandra_loadbalancer_member_update(self, obj_uuid, new_obj_dict):
- # Grab ref-uuids and properties in new version
- new_ref_infos = {}
-
- new_props = {}
- if 'loadbalancer_member_properties' in new_obj_dict:
- new_props['loadbalancer_member_properties'] = new_obj_dict['loadbalancer_member_properties']
- if 'id_perms' in new_obj_dict:
- new_props['id_perms'] = new_obj_dict['id_perms']
- if 'display_name' in new_obj_dict:
- new_props['display_name'] = new_obj_dict['display_name']
- # Gather column values for obj and updates to backrefs
- # in a batch and write it at the end
- obj_uuid_cf = self._obj_uuid_cf
- obj_cols_iter = obj_uuid_cf.xget(obj_uuid)
- # TODO optimize this (converts tuple to dict)
- obj_cols = {}
- for col_info in obj_cols_iter:
- obj_cols[col_info[0]] = col_info[1]
-
- bch = obj_uuid_cf.batch()
- for col_name in obj_cols.keys():
- # TODO use compiled RE
- if re.match('prop:', col_name):
- (_, prop_name) = col_name.split(':')
- if prop_name == 'id_perms':
- # id-perms always has to be updated for last-mod timestamp
- # get it from request dict(or from db if not in request dict)
- new_id_perms = new_obj_dict.get(prop_name, json.loads(obj_cols[col_name]))
- self.update_last_modified(bch, obj_uuid, new_id_perms)
- elif prop_name in new_obj_dict:
- self._update_prop(bch, obj_uuid, prop_name, new_props)
-
- # TODO use compiled RE
- if re.match('ref:', col_name):
- (_, ref_type, ref_uuid) = col_name.split(':')
- self._update_ref(bch, 'loadbalancer_member', obj_uuid, ref_type, ref_uuid, new_ref_infos)
- # for all column names
-
- # create new refs
- for ref_type in new_ref_infos.keys():
- for ref_uuid in new_ref_infos[ref_type].keys():
- ref_data = new_ref_infos[ref_type][ref_uuid]
- self._create_ref(bch, 'loadbalancer_member', obj_uuid, ref_type, ref_uuid, ref_data)
-
- # create new props
- for prop_name in new_props.keys():
- self._create_prop(bch, obj_uuid, prop_name, new_props[prop_name])
-
- bch.send()
-
- return (True, '')
- #end _cassandra_loadbalancer_member_update
-
- def _cassandra_loadbalancer_member_list(self, parent_uuids=None, back_ref_uuids=None,
- obj_uuids=None, count=False, filters=None):
- children_fq_names_uuids = []
- if filters:
- fnames = filters.get('field_names', [])
- fvalues = filters.get('field_values', [])
- filter_fields = [(fnames[i], fvalues[i]) for i in range(len(fnames))]
- else:
- filter_fields = []
-
- def filter_rows(coll_infos, filter_cols, filter_params):
- filt_infos = {}
- coll_rows = obj_uuid_cf.multiget(coll_infos.keys(),
- columns=filter_cols,
- column_count=10000000)
- for row in coll_rows:
- # give chance for zk heartbeat/ping
- gevent.sleep(0)
- full_match = True
- for fname, fval in filter_params:
- if coll_rows[row]['prop:%s' %(fname)] != fval:
- full_match = False
- break
- if full_match:
- filt_infos[row] = coll_infos[row]
- return filt_infos
- # end filter_rows
-
- def get_fq_name_uuid_list(obj_uuids):
- ret_list = []
- for obj_uuid in obj_uuids:
- try:
- obj_fq_name = self.uuid_to_fq_name(obj_uuid)
- ret_list.append((obj_fq_name, obj_uuid))
- except cfgm_common.exceptions.NoIdError:
- pass
- return ret_list
- # end get_fq_name_uuid_list
-
- if parent_uuids:
- # go from parent to child
- obj_uuid_cf = self._obj_uuid_cf
- col_start = 'children:loadbalancer_member:'
- col_fin = 'children:loadbalancer_member;'
- try:
- obj_rows = obj_uuid_cf.multiget(parent_uuids,
- column_start = col_start,
- column_finish = col_fin,
- column_count = 10000000,
- include_timestamp = True)
- except pycassa.NotFoundException:
- if count:
- return (True, 0)
- else:
- return (True, children_fq_names_uuids)
-
- def filter_rows_parent_anchor(sort=False):
- # flatten to [('children:<type>:<uuid>', (<val>,<ts>), *]
- all_cols = [cols for obj_key in obj_rows.keys() for cols in obj_rows[obj_key].items()]
- all_child_infos = {}
- for col_name, col_val_ts in all_cols:
- # give chance for zk heartbeat/ping
- gevent.sleep(0)
- child_uuid = col_name.split(':')[2]
- if obj_uuids and child_uuid not in obj_uuids:
- continue
- all_child_infos[child_uuid] = {'uuid': child_uuid, 'tstamp': col_val_ts[1]}
-
- filter_cols = ['prop:%s' %(fname) for fname, _ in filter_fields]
- if filter_cols:
- filt_child_infos = filter_rows(all_child_infos, filter_cols, filter_fields)
- else: # no filter specified
- filt_child_infos = all_child_infos
-
- if not sort:
- ret_child_infos = filt_child_infos.values()
- else:
- ret_child_infos = sorted(filt_child_infos.values(), key=itemgetter('tstamp'))
-
- return get_fq_name_uuid_list(r['uuid'] for r in ret_child_infos)
- # end filter_rows_parent_anchor
-
- if count:
- return (True, len(filter_rows_parent_anchor()))
-
- children_fq_names_uuids = filter_rows_parent_anchor(sort=True)
-
- if back_ref_uuids:
- # go from anchor to backrefs
- obj_uuid_cf = self._obj_uuid_cf
- col_start = 'backref:loadbalancer_member:'
- col_fin = 'backref:loadbalancer_member;'
- try:
- obj_rows = obj_uuid_cf.multiget(back_ref_uuids,
- column_start = col_start,
- column_finish = col_fin,
- column_count = 10000000,
- include_timestamp = True)
- except pycassa.NotFoundException:
- if count:
- return (True, 0)
- else:
- return (True, children_fq_names_uuids)
-
- def filter_rows_backref_anchor():
- # flatten to [('<fqnstr>:<uuid>', (<val>,<ts>), *]
- all_cols = [cols for obj_key in obj_rows.keys() for cols in obj_rows[obj_key].items()]
- all_backref_infos = {}
- for col_name, col_val_ts in all_cols:
- # give chance for zk heartbeat/ping
- gevent.sleep(0)
- col_name_arr = col_name.split(':')
- fq_name = col_name_arr[:-1]
- obj_uuid = col_name_arr[-1]
- if obj_uuids and obj_uuid not in obj_uuids:
- continue
- all_backref_infos[obj_uuid] = \
- {'uuid': obj_uuid, 'fq_name': fq_name, 'tstamp': col_val_ts[1]}
-
- filter_cols = ['prop:%s' %(fname) for fname, _ in filter_fields]
- if filter_cols:
- filt_backref_infos = filter_rows(all_backref_infos, filter_cols, filter_fields)
- else: # no filter specified
- filt_backref_infos = all_backref_infos
-
- return [(br_info['fq_name'], br_info['uuid']) for br_info in filt_backref_infos.values()]
- # end filter_rows_backref_anchor
-
- if count:
- return (True, len(filter_rows_backref_anchor()))
-
- children_fq_names_uuids = filter_rows_backref_anchor()
-
- if not parent_uuids and not back_ref_uuids:
- obj_uuid_cf = self._obj_uuid_cf
- if obj_uuids:
- # exact objects specified
- def filter_rows_object_list():
- all_obj_infos = {}
- for obj_uuid in obj_uuids:
- all_obj_infos[obj_uuid] = None
-
- filter_cols = ['prop:%s' %(fname) for fname, _ in filter_fields]
- if filter_cols:
- filt_obj_infos = filter_rows(all_obj_infos, filter_cols, filter_fields)
- else: # no filters specified
- filt_obj_infos = all_obj_infos
-
- return get_fq_name_uuid_list(filt_obj_infos.keys())
- # end filter_rows_object_list
-
- if count:
- return (True, len(filter_rows_object_list()))
- children_fq_names_uuids = filter_rows_object_list()
-
- else: # grab all resources of this type
- obj_fq_name_cf = self._obj_fq_name_cf
- try:
- cols = obj_fq_name_cf.get('loadbalancer_member', column_count = 10000000)
- except pycassa.NotFoundException:
- if count:
- return (True, 0)
- else:
- return (True, children_fq_names_uuids)
-
- def filter_rows_no_anchor():
- all_obj_infos = {}
- for col_name, col_val in cols.items():
- # give chance for zk heartbeat/ping
- gevent.sleep(0)
- col_name_arr = utils.decode_string(col_name).split(':')
- obj_uuid = col_name_arr[-1]
- all_obj_infos[obj_uuid] = (col_name_arr[:-1], obj_uuid)
-
- filter_cols = ['prop:%s' %(fname) for fname, _ in filter_fields]
- if filter_cols:
- filt_obj_infos = filter_rows(all_obj_infos, filter_cols, filter_fields)
- else: # no filters specified
- filt_obj_infos = all_obj_infos
-
- return filt_obj_infos.values()
- # end filter_rows_no_anchor
-
- if count:
- return (True, len(filter_rows_no_anchor()))
-
- children_fq_names_uuids = filter_rows_no_anchor()
-
- return (True, children_fq_names_uuids)
- #end _cassandra_loadbalancer_member_list
-
- def _cassandra_loadbalancer_member_delete(self, obj_uuid):
- obj_uuid_cf = self._obj_uuid_cf
- fq_name = json.loads(obj_uuid_cf.get(obj_uuid, columns = ['fq_name'])['fq_name'])
- bch = obj_uuid_cf.batch()
-
- # unlink from parent
- col_start = 'parent:'
- col_fin = 'parent;'
- col_name_iter = obj_uuid_cf.xget(obj_uuid, column_start = col_start, column_finish = col_fin)
- for (col_name, col_val) in col_name_iter:
- (_, parent_type, parent_uuid) = col_name.split(':')
- self._delete_child(bch, parent_type, parent_uuid, 'loadbalancer_member', obj_uuid)
-
- # remove refs
- col_start = 'ref:'
- col_fin = 'ref;'
- col_name_iter = obj_uuid_cf.xget(obj_uuid, column_start = col_start, column_finish = col_fin)
- for (col_name, col_val) in col_name_iter:
- (_, ref_type, ref_uuid) = col_name.split(':')
- self._delete_ref(bch, 'loadbalancer_member', obj_uuid, ref_type, ref_uuid)
-
- bch.remove(obj_uuid)
- bch.send()
-
- # Update fqname table
- fq_name_str = ':'.join(fq_name)
- fq_name_col = utils.encode_string(fq_name_str) + ':' + obj_uuid
- self._obj_fq_name_cf.remove('loadbalancer_member', columns = [fq_name_col])
-
-
- return (True, '')
- #end _cassandra_loadbalancer_member_delete
-
- def _cassandra_security_group_alloc(self, fq_name):
- return (True, '')
- #end _cassandra_security_group_alloc
-
- def _cassandra_security_group_create(self, obj_ids, obj_dict):
- # Gather column values for obj and updates to backrefs
- # in a batch and write it at the end
- bch = self._obj_uuid_cf.batch()
-
- obj_cols = {}
- obj_cols['fq_name'] = json.dumps(obj_dict['fq_name'])
- obj_cols['type'] = json.dumps('security_group')
- if 'parent_type' in obj_dict:
- # non config-root child
- parent_type = obj_dict['parent_type']
- parent_method_type = parent_type.replace('-', '_')
- parent_fq_name = obj_dict['fq_name'][:-1]
- obj_cols['parent_type'] = json.dumps(parent_type)
- parent_uuid = self.fq_name_to_uuid(parent_method_type, parent_fq_name)
- self._create_child(bch, parent_method_type, parent_uuid, 'security_group', obj_ids['uuid'])
-
- # Properties
- field = obj_dict.get('security_group_id', None)
- if field is not None:
- self._create_prop(bch, obj_ids['uuid'], 'security_group_id', field)
-
- field = obj_dict.get('configured_security_group_id', None)
- if field is not None:
- self._create_prop(bch, obj_ids['uuid'], 'configured_security_group_id', field)
-
- field = obj_dict.get('security_group_entries', None)
- if field is not None:
- self._create_prop(bch, obj_ids['uuid'], 'security_group_entries', field)
-
- field = obj_dict.get('id_perms', None)
- if field is not None:
- field['created'] = datetime.datetime.utcnow().isoformat()
- field['last_modified'] = field['created']
- self._create_prop(bch, obj_ids['uuid'], 'id_perms', field)
-
- field = obj_dict.get('display_name', None)
- if field is not None:
- self._create_prop(bch, obj_ids['uuid'], 'display_name', field)
-
-
- # References
-
- bch.insert(obj_ids['uuid'], obj_cols)
- bch.send()
-
- # Update fqname table
- fq_name_str = ':'.join(obj_dict['fq_name'])
- fq_name_cols = {utils.encode_string(fq_name_str) + ':' + obj_ids['uuid']: json.dumps(None)}
- self._obj_fq_name_cf.insert('security_group', fq_name_cols)
-
- return (True, '')
- #end _cassandra_security_group_create
-
- def _cassandra_security_group_read(self, obj_uuids, field_names = None):
- # if field_names = None, all fields will be read/returned
-
- obj_uuid_cf = self._obj_uuid_cf
-
- # optimize for common case of reading non-backref, non-children fields
- # ignoring columns starting from 'b' and 'c' - significant performance
- # impact in scaled setting. e.g. read of project
- if (field_names is None or
- (set(field_names) & (SecurityGroup.backref_fields | SecurityGroup.children_fields))):
- # atleast one backref/children field is needed
- obj_rows = obj_uuid_cf.multiget(obj_uuids,
- column_count = 10000000,
- include_timestamp = True)
- else: # ignore reading backref + children columns
- obj_rows = obj_uuid_cf.multiget(obj_uuids,
- column_start = 'd',
- column_count = 10000000,
- include_timestamp = True)
-
- if (len(obj_uuids) == 1) and not obj_rows:
- raise cfgm_common.exceptions.NoIdError(obj_uuids[0])
-
- results = []
- for row_key in obj_rows:
- # give chance for zk heartbeat/ping
- gevent.sleep(0)
- obj_uuid = row_key
- obj_cols = obj_rows[obj_uuid]
- result = {}
- result['uuid'] = obj_uuid
- result['fq_name'] = json.loads(obj_cols['fq_name'][0])
- for col_name in obj_cols.keys():
- if self._re_match_parent.match(col_name):
- # non config-root child
- (_, _, parent_uuid) = col_name.split(':')
- parent_type = json.loads(obj_cols['parent_type'][0])
- result['parent_type'] = parent_type
- try:
- result['parent_uuid'] = parent_uuid
- result['parent_href'] = self._generate_url(parent_type, parent_uuid)
- except cfgm_common.exceptions.NoIdError:
- err_msg = 'Unknown uuid for parent ' + result['fq_name'][-2]
- return (False, err_msg)
-
- # TODO use compiled RE
- if self._re_match_prop.match(col_name):
- (_, prop_name) = col_name.split(':')
- result[prop_name] = json.loads(obj_cols[col_name][0])
-
- # TODO use compiled RE
- if self._re_match_children.match(col_name):
- (_, child_type, child_uuid) = col_name.split(':')
- if field_names and '%ss' %(child_type) not in field_names:
- continue
-
- child_tstamp = obj_cols[col_name][1]
- try:
- self._read_child(result, obj_uuid, child_type, child_uuid, child_tstamp)
- except cfgm_common.exceptions.NoIdError:
- continue
-
- # TODO use compiled RE
- if self._re_match_ref.match(col_name):
- (_, ref_type, ref_uuid) = col_name.split(':')
- self._read_ref(result, obj_uuid, ref_type, ref_uuid, obj_cols[col_name][0])
-
- if self._re_match_backref.match(col_name):
- (_, back_ref_type, back_ref_uuid) = col_name.split(':')
- if field_names and '%s_back_refs' %(back_ref_type) not in field_names:
- continue
-
- try:
- self._read_back_ref(result, obj_uuid, back_ref_type, back_ref_uuid,
- obj_cols[col_name][0])
- except cfgm_common.exceptions.NoIdError:
- continue
-
- # for all column names
-
- if 'access_control_lists' in result:
- # sort children; TODO do this based on schema
- sorted_children = sorted(result['access_control_lists'], key = itemgetter('tstamp'))
- # re-write result's children without timestamp
- result['access_control_lists'] = sorted_children
- [child.pop('tstamp') for child in result['access_control_lists']]
-
- results.append(result)
- # end for all rows
-
- return (True, results)
- #end _cassandra_security_group_read
-
- def _cassandra_security_group_count_children(self, obj_uuid, child_type):
- # if child_type = None, return
- if child_type is None:
- return (False, '')
-
- obj_uuid_cf = self._obj_uuid_cf
- if child_type not in SecurityGroup.children_fields:
- return (False, '%s is not a valid children of SecurityGroup' %(child_type))
-
- col_start = 'children:'+child_type[:-1]+':'
- col_finish = 'children:'+child_type[:-1]+';'
- num_children = obj_uuid_cf.get_count(obj_uuid,
- column_start = col_start,
- column_finish = col_finish,
- max_count = 10000000)
- return (True, num_children)
- #end _cassandra_security_group_count_children
-
- def _cassandra_security_group_update(self, obj_uuid, new_obj_dict):
- # Grab ref-uuids and properties in new version
- new_ref_infos = {}
-
- new_props = {}
- if 'security_group_id' in new_obj_dict:
- new_props['security_group_id'] = new_obj_dict['security_group_id']
- if 'configured_security_group_id' in new_obj_dict:
- new_props['configured_security_group_id'] = new_obj_dict['configured_security_group_id']
- if 'security_group_entries' in new_obj_dict:
- new_props['security_group_entries'] = new_obj_dict['security_group_entries']
- if 'id_perms' in new_obj_dict:
- new_props['id_perms'] = new_obj_dict['id_perms']
- if 'display_name' in new_obj_dict:
- new_props['display_name'] = new_obj_dict['display_name']
- # Gather column values for obj and updates to backrefs
- # in a batch and write it at the end
- obj_uuid_cf = self._obj_uuid_cf
- obj_cols_iter = obj_uuid_cf.xget(obj_uuid)
- # TODO optimize this (converts tuple to dict)
- obj_cols = {}
- for col_info in obj_cols_iter:
- obj_cols[col_info[0]] = col_info[1]
-
- bch = obj_uuid_cf.batch()
- for col_name in obj_cols.keys():
- # TODO use compiled RE
- if re.match('prop:', col_name):
- (_, prop_name) = col_name.split(':')
- if prop_name == 'id_perms':
- # id-perms always has to be updated for last-mod timestamp
- # get it from request dict(or from db if not in request dict)
- new_id_perms = new_obj_dict.get(prop_name, json.loads(obj_cols[col_name]))
- self.update_last_modified(bch, obj_uuid, new_id_perms)
- elif prop_name in new_obj_dict:
- self._update_prop(bch, obj_uuid, prop_name, new_props)
-
- # TODO use compiled RE
- if re.match('ref:', col_name):
- (_, ref_type, ref_uuid) = col_name.split(':')
- self._update_ref(bch, 'security_group', obj_uuid, ref_type, ref_uuid, new_ref_infos)
- # for all column names
-
- # create new refs
- for ref_type in new_ref_infos.keys():
- for ref_uuid in new_ref_infos[ref_type].keys():
- ref_data = new_ref_infos[ref_type][ref_uuid]
- self._create_ref(bch, 'security_group', obj_uuid, ref_type, ref_uuid, ref_data)
-
- # create new props
- for prop_name in new_props.keys():
- self._create_prop(bch, obj_uuid, prop_name, new_props[prop_name])
-
- bch.send()
-
- return (True, '')
- #end _cassandra_security_group_update
-
- def _cassandra_security_group_list(self, parent_uuids=None, back_ref_uuids=None,
- obj_uuids=None, count=False, filters=None):
- children_fq_names_uuids = []
- if filters:
- fnames = filters.get('field_names', [])
- fvalues = filters.get('field_values', [])
- filter_fields = [(fnames[i], fvalues[i]) for i in range(len(fnames))]
- else:
- filter_fields = []
-
- def filter_rows(coll_infos, filter_cols, filter_params):
- filt_infos = {}
- coll_rows = obj_uuid_cf.multiget(coll_infos.keys(),
- columns=filter_cols,
- column_count=10000000)
- for row in coll_rows:
- # give chance for zk heartbeat/ping
- gevent.sleep(0)
- full_match = True
- for fname, fval in filter_params:
- if coll_rows[row]['prop:%s' %(fname)] != fval:
- full_match = False
- break
- if full_match:
- filt_infos[row] = coll_infos[row]
- return filt_infos
- # end filter_rows
-
- def get_fq_name_uuid_list(obj_uuids):
- ret_list = []
- for obj_uuid in obj_uuids:
- try:
- obj_fq_name = self.uuid_to_fq_name(obj_uuid)
- ret_list.append((obj_fq_name, obj_uuid))
- except cfgm_common.exceptions.NoIdError:
- pass
- return ret_list
- # end get_fq_name_uuid_list
-
- if parent_uuids:
- # go from parent to child
- obj_uuid_cf = self._obj_uuid_cf
- col_start = 'children:security_group:'
- col_fin = 'children:security_group;'
- try:
- obj_rows = obj_uuid_cf.multiget(parent_uuids,
- column_start = col_start,
- column_finish = col_fin,
- column_count = 10000000,
- include_timestamp = True)
- except pycassa.NotFoundException:
- if count:
- return (True, 0)
- else:
- return (True, children_fq_names_uuids)
-
- def filter_rows_parent_anchor(sort=False):
- # flatten to [('children:<type>:<uuid>', (<val>,<ts>), *]
- all_cols = [cols for obj_key in obj_rows.keys() for cols in obj_rows[obj_key].items()]
- all_child_infos = {}
- for col_name, col_val_ts in all_cols:
- # give chance for zk heartbeat/ping
- gevent.sleep(0)
- child_uuid = col_name.split(':')[2]
- if obj_uuids and child_uuid not in obj_uuids:
- continue
- all_child_infos[child_uuid] = {'uuid': child_uuid, 'tstamp': col_val_ts[1]}
-
- filter_cols = ['prop:%s' %(fname) for fname, _ in filter_fields]
- if filter_cols:
- filt_child_infos = filter_rows(all_child_infos, filter_cols, filter_fields)
- else: # no filter specified
- filt_child_infos = all_child_infos
-
- if not sort:
- ret_child_infos = filt_child_infos.values()
- else:
- ret_child_infos = sorted(filt_child_infos.values(), key=itemgetter('tstamp'))
-
- return get_fq_name_uuid_list(r['uuid'] for r in ret_child_infos)
- # end filter_rows_parent_anchor
-
- if count:
- return (True, len(filter_rows_parent_anchor()))
-
- children_fq_names_uuids = filter_rows_parent_anchor(sort=True)
-
- if back_ref_uuids:
- # go from anchor to backrefs
- obj_uuid_cf = self._obj_uuid_cf
- col_start = 'backref:security_group:'
- col_fin = 'backref:security_group;'
- try:
- obj_rows = obj_uuid_cf.multiget(back_ref_uuids,
- column_start = col_start,
- column_finish = col_fin,
- column_count = 10000000,
- include_timestamp = True)
- except pycassa.NotFoundException:
- if count:
- return (True, 0)
- else:
- return (True, children_fq_names_uuids)
-
- def filter_rows_backref_anchor():
- # flatten to [('<fqnstr>:<uuid>', (<val>,<ts>), *]
- all_cols = [cols for obj_key in obj_rows.keys() for cols in obj_rows[obj_key].items()]
- all_backref_infos = {}
- for col_name, col_val_ts in all_cols:
- # give chance for zk heartbeat/ping
- gevent.sleep(0)
- col_name_arr = col_name.split(':')
- fq_name = col_name_arr[:-1]
- obj_uuid = col_name_arr[-1]
- if obj_uuids and obj_uuid not in obj_uuids:
- continue
- all_backref_infos[obj_uuid] = \
- {'uuid': obj_uuid, 'fq_name': fq_name, 'tstamp': col_val_ts[1]}
-
- filter_cols = ['prop:%s' %(fname) for fname, _ in filter_fields]
- if filter_cols:
- filt_backref_infos = filter_rows(all_backref_infos, filter_cols, filter_fields)
- else: # no filter specified
- filt_backref_infos = all_backref_infos
-
- return [(br_info['fq_name'], br_info['uuid']) for br_info in filt_backref_infos.values()]
- # end filter_rows_backref_anchor
-
- if count:
- return (True, len(filter_rows_backref_anchor()))
-
- children_fq_names_uuids = filter_rows_backref_anchor()
-
- if not parent_uuids and not back_ref_uuids:
- obj_uuid_cf = self._obj_uuid_cf
- if obj_uuids:
- # exact objects specified
- def filter_rows_object_list():
- all_obj_infos = {}
- for obj_uuid in obj_uuids:
- all_obj_infos[obj_uuid] = None
-
- filter_cols = ['prop:%s' %(fname) for fname, _ in filter_fields]
- if filter_cols:
- filt_obj_infos = filter_rows(all_obj_infos, filter_cols, filter_fields)
- else: # no filters specified
- filt_obj_infos = all_obj_infos
-
- return get_fq_name_uuid_list(filt_obj_infos.keys())
- # end filter_rows_object_list
-
- if count:
- return (True, len(filter_rows_object_list()))
- children_fq_names_uuids = filter_rows_object_list()
-
- else: # grab all resources of this type
- obj_fq_name_cf = self._obj_fq_name_cf
- try:
- cols = obj_fq_name_cf.get('security_group', column_count = 10000000)
- except pycassa.NotFoundException:
- if count:
- return (True, 0)
- else:
- return (True, children_fq_names_uuids)
-
- def filter_rows_no_anchor():
- all_obj_infos = {}
- for col_name, col_val in cols.items():
- # give chance for zk heartbeat/ping
- gevent.sleep(0)
- col_name_arr = utils.decode_string(col_name).split(':')
- obj_uuid = col_name_arr[-1]
- all_obj_infos[obj_uuid] = (col_name_arr[:-1], obj_uuid)
-
- filter_cols = ['prop:%s' %(fname) for fname, _ in filter_fields]
- if filter_cols:
- filt_obj_infos = filter_rows(all_obj_infos, filter_cols, filter_fields)
- else: # no filters specified
- filt_obj_infos = all_obj_infos
-
- return filt_obj_infos.values()
- # end filter_rows_no_anchor
-
- if count:
- return (True, len(filter_rows_no_anchor()))
-
- children_fq_names_uuids = filter_rows_no_anchor()
-
- return (True, children_fq_names_uuids)
- #end _cassandra_security_group_list
-
- def _cassandra_security_group_delete(self, obj_uuid):
- obj_uuid_cf = self._obj_uuid_cf
- fq_name = json.loads(obj_uuid_cf.get(obj_uuid, columns = ['fq_name'])['fq_name'])
- bch = obj_uuid_cf.batch()
-
- # unlink from parent
- col_start = 'parent:'
- col_fin = 'parent;'
- col_name_iter = obj_uuid_cf.xget(obj_uuid, column_start = col_start, column_finish = col_fin)
- for (col_name, col_val) in col_name_iter:
- (_, parent_type, parent_uuid) = col_name.split(':')
- self._delete_child(bch, parent_type, parent_uuid, 'security_group', obj_uuid)
-
- # remove refs
- col_start = 'ref:'
- col_fin = 'ref;'
- col_name_iter = obj_uuid_cf.xget(obj_uuid, column_start = col_start, column_finish = col_fin)
- for (col_name, col_val) in col_name_iter:
- (_, ref_type, ref_uuid) = col_name.split(':')
- self._delete_ref(bch, 'security_group', obj_uuid, ref_type, ref_uuid)
-
- bch.remove(obj_uuid)
- bch.send()
-
- # Update fqname table
- fq_name_str = ':'.join(fq_name)
- fq_name_col = utils.encode_string(fq_name_str) + ':' + obj_uuid
- self._obj_fq_name_cf.remove('security_group', columns = [fq_name_col])
-
-
- return (True, '')
- #end _cassandra_security_group_delete
-
- def _cassandra_provider_attachment_alloc(self, fq_name):
- return (True, '')
- #end _cassandra_provider_attachment_alloc
-
- def _cassandra_provider_attachment_create(self, obj_ids, obj_dict):
- # Gather column values for obj and updates to backrefs
- # in a batch and write it at the end
- bch = self._obj_uuid_cf.batch()
-
- obj_cols = {}
- obj_cols['fq_name'] = json.dumps(obj_dict['fq_name'])
- obj_cols['type'] = json.dumps('provider_attachment')
-
- # Properties
- field = obj_dict.get('id_perms', None)
- if field is not None:
- field['created'] = datetime.datetime.utcnow().isoformat()
- field['last_modified'] = field['created']
- self._create_prop(bch, obj_ids['uuid'], 'id_perms', field)
-
- field = obj_dict.get('display_name', None)
- if field is not None:
- self._create_prop(bch, obj_ids['uuid'], 'display_name', field)
-
-
- # References
- refs = obj_dict.get('virtual_router_refs', [])
- for ref in refs:
- ref_uuid = self.fq_name_to_uuid('virtual_router', ref['to'])
- ref_attr = ref.get('attr', None)
- ref_data = {'attr': ref_attr, 'is_weakref': False}
- self._create_ref(bch, 'provider_attachment', obj_ids['uuid'], 'virtual_router', ref_uuid, ref_data)
-
- bch.insert(obj_ids['uuid'], obj_cols)
- bch.send()
-
- # Update fqname table
- fq_name_str = ':'.join(obj_dict['fq_name'])
- fq_name_cols = {utils.encode_string(fq_name_str) + ':' + obj_ids['uuid']: json.dumps(None)}
- self._obj_fq_name_cf.insert('provider_attachment', fq_name_cols)
-
- return (True, '')
- #end _cassandra_provider_attachment_create
-
- def _cassandra_provider_attachment_read(self, obj_uuids, field_names = None):
- # if field_names = None, all fields will be read/returned
-
- obj_uuid_cf = self._obj_uuid_cf
-
- # optimize for common case of reading non-backref, non-children fields
- # ignoring columns starting from 'b' and 'c' - significant performance
- # impact in scaled setting. e.g. read of project
- if (field_names is None or
- (set(field_names) & (ProviderAttachment.backref_fields | ProviderAttachment.children_fields))):
- # atleast one backref/children field is needed
- obj_rows = obj_uuid_cf.multiget(obj_uuids,
- column_count = 10000000,
- include_timestamp = True)
- else: # ignore reading backref + children columns
- obj_rows = obj_uuid_cf.multiget(obj_uuids,
- column_start = 'd',
- column_count = 10000000,
- include_timestamp = True)
-
- if (len(obj_uuids) == 1) and not obj_rows:
- raise cfgm_common.exceptions.NoIdError(obj_uuids[0])
-
- results = []
- for row_key in obj_rows:
- # give chance for zk heartbeat/ping
- gevent.sleep(0)
- obj_uuid = row_key
- obj_cols = obj_rows[obj_uuid]
- result = {}
- result['uuid'] = obj_uuid
- result['fq_name'] = json.loads(obj_cols['fq_name'][0])
- for col_name in obj_cols.keys():
- if self._re_match_parent.match(col_name):
- # non config-root child
- (_, _, parent_uuid) = col_name.split(':')
- parent_type = json.loads(obj_cols['parent_type'][0])
- result['parent_type'] = parent_type
- try:
- result['parent_uuid'] = parent_uuid
- result['parent_href'] = self._generate_url(parent_type, parent_uuid)
- except cfgm_common.exceptions.NoIdError:
- err_msg = 'Unknown uuid for parent ' + result['fq_name'][-2]
- return (False, err_msg)
-
- # TODO use compiled RE
- if self._re_match_prop.match(col_name):
- (_, prop_name) = col_name.split(':')
- result[prop_name] = json.loads(obj_cols[col_name][0])
-
- # TODO use compiled RE
- if self._re_match_children.match(col_name):
- (_, child_type, child_uuid) = col_name.split(':')
- if field_names and '%ss' %(child_type) not in field_names:
- continue
-
- child_tstamp = obj_cols[col_name][1]
- try:
- self._read_child(result, obj_uuid, child_type, child_uuid, child_tstamp)
- except cfgm_common.exceptions.NoIdError:
- continue
-
- # TODO use compiled RE
- if self._re_match_ref.match(col_name):
- (_, ref_type, ref_uuid) = col_name.split(':')
- self._read_ref(result, obj_uuid, ref_type, ref_uuid, obj_cols[col_name][0])
-
- if self._re_match_backref.match(col_name):
- (_, back_ref_type, back_ref_uuid) = col_name.split(':')
- if field_names and '%s_back_refs' %(back_ref_type) not in field_names:
- continue
-
- try:
- self._read_back_ref(result, obj_uuid, back_ref_type, back_ref_uuid,
- obj_cols[col_name][0])
- except cfgm_common.exceptions.NoIdError:
- continue
-
- # for all column names
-
-
- results.append(result)
- # end for all rows
-
- return (True, results)
- #end _cassandra_provider_attachment_read
-
- def _cassandra_provider_attachment_count_children(self, obj_uuid, child_type):
- # if child_type = None, return
- if child_type is None:
- return (False, '')
-
- obj_uuid_cf = self._obj_uuid_cf
- if child_type not in ProviderAttachment.children_fields:
- return (False, '%s is not a valid children of ProviderAttachment' %(child_type))
-
- col_start = 'children:'+child_type[:-1]+':'
- col_finish = 'children:'+child_type[:-1]+';'
- num_children = obj_uuid_cf.get_count(obj_uuid,
- column_start = col_start,
- column_finish = col_finish,
- max_count = 10000000)
- return (True, num_children)
- #end _cassandra_provider_attachment_count_children
-
- def _cassandra_provider_attachment_update(self, obj_uuid, new_obj_dict):
- # Grab ref-uuids and properties in new version
- new_ref_infos = {}
-
- if 'virtual_router_refs' in new_obj_dict:
- new_ref_infos['virtual_router'] = {}
- new_refs = new_obj_dict['virtual_router_refs']
- if new_refs:
- for new_ref in new_refs:
- new_ref_uuid = self.fq_name_to_uuid('virtual_router', new_ref['to'])
- new_ref_attr = new_ref.get('attr', None)
- new_ref_data = {'attr': new_ref_attr, 'is_weakref': False}
- new_ref_infos['virtual_router'][new_ref_uuid] = new_ref_data
-
- new_props = {}
- if 'id_perms' in new_obj_dict:
- new_props['id_perms'] = new_obj_dict['id_perms']
- if 'display_name' in new_obj_dict:
- new_props['display_name'] = new_obj_dict['display_name']
- # Gather column values for obj and updates to backrefs
- # in a batch and write it at the end
- obj_uuid_cf = self._obj_uuid_cf
- obj_cols_iter = obj_uuid_cf.xget(obj_uuid)
- # TODO optimize this (converts tuple to dict)
- obj_cols = {}
- for col_info in obj_cols_iter:
- obj_cols[col_info[0]] = col_info[1]
-
- bch = obj_uuid_cf.batch()
- for col_name in obj_cols.keys():
- # TODO use compiled RE
- if re.match('prop:', col_name):
- (_, prop_name) = col_name.split(':')
- if prop_name == 'id_perms':
- # id-perms always has to be updated for last-mod timestamp
- # get it from request dict(or from db if not in request dict)
- new_id_perms = new_obj_dict.get(prop_name, json.loads(obj_cols[col_name]))
- self.update_last_modified(bch, obj_uuid, new_id_perms)
- elif prop_name in new_obj_dict:
- self._update_prop(bch, obj_uuid, prop_name, new_props)
-
- # TODO use compiled RE
- if re.match('ref:', col_name):
- (_, ref_type, ref_uuid) = col_name.split(':')
- self._update_ref(bch, 'provider_attachment', obj_uuid, ref_type, ref_uuid, new_ref_infos)
- # for all column names
-
- # create new refs
- for ref_type in new_ref_infos.keys():
- for ref_uuid in new_ref_infos[ref_type].keys():
- ref_data = new_ref_infos[ref_type][ref_uuid]
- self._create_ref(bch, 'provider_attachment', obj_uuid, ref_type, ref_uuid, ref_data)
-
- # create new props
- for prop_name in new_props.keys():
- self._create_prop(bch, obj_uuid, prop_name, new_props[prop_name])
-
- bch.send()
-
- return (True, '')
- #end _cassandra_provider_attachment_update
-
- def _cassandra_provider_attachment_list(self, parent_uuids=None, back_ref_uuids=None,
- obj_uuids=None, count=False, filters=None):
- children_fq_names_uuids = []
- if filters:
- fnames = filters.get('field_names', [])
- fvalues = filters.get('field_values', [])
- filter_fields = [(fnames[i], fvalues[i]) for i in range(len(fnames))]
- else:
- filter_fields = []
-
- def filter_rows(coll_infos, filter_cols, filter_params):
- filt_infos = {}
- coll_rows = obj_uuid_cf.multiget(coll_infos.keys(),
- columns=filter_cols,
- column_count=10000000)
- for row in coll_rows:
- # give chance for zk heartbeat/ping
- gevent.sleep(0)
- full_match = True
- for fname, fval in filter_params:
- if coll_rows[row]['prop:%s' %(fname)] != fval:
- full_match = False
- break
- if full_match:
- filt_infos[row] = coll_infos[row]
- return filt_infos
- # end filter_rows
-
- def get_fq_name_uuid_list(obj_uuids):
- ret_list = []
- for obj_uuid in obj_uuids:
- try:
- obj_fq_name = self.uuid_to_fq_name(obj_uuid)
- ret_list.append((obj_fq_name, obj_uuid))
- except cfgm_common.exceptions.NoIdError:
- pass
- return ret_list
- # end get_fq_name_uuid_list
-
- if parent_uuids:
- # go from parent to child
- obj_uuid_cf = self._obj_uuid_cf
- col_start = 'children:provider_attachment:'
- col_fin = 'children:provider_attachment;'
- try:
- obj_rows = obj_uuid_cf.multiget(parent_uuids,
- column_start = col_start,
- column_finish = col_fin,
- column_count = 10000000,
- include_timestamp = True)
- except pycassa.NotFoundException:
- if count:
- return (True, 0)
- else:
- return (True, children_fq_names_uuids)
-
- def filter_rows_parent_anchor(sort=False):
- # flatten to [('children:<type>:<uuid>', (<val>,<ts>), *]
- all_cols = [cols for obj_key in obj_rows.keys() for cols in obj_rows[obj_key].items()]
- all_child_infos = {}
- for col_name, col_val_ts in all_cols:
- # give chance for zk heartbeat/ping
- gevent.sleep(0)
- child_uuid = col_name.split(':')[2]
- if obj_uuids and child_uuid not in obj_uuids:
- continue
- all_child_infos[child_uuid] = {'uuid': child_uuid, 'tstamp': col_val_ts[1]}
-
- filter_cols = ['prop:%s' %(fname) for fname, _ in filter_fields]
- if filter_cols:
- filt_child_infos = filter_rows(all_child_infos, filter_cols, filter_fields)
- else: # no filter specified
- filt_child_infos = all_child_infos
-
- if not sort:
- ret_child_infos = filt_child_infos.values()
- else:
- ret_child_infos = sorted(filt_child_infos.values(), key=itemgetter('tstamp'))
-
- return get_fq_name_uuid_list(r['uuid'] for r in ret_child_infos)
- # end filter_rows_parent_anchor
-
- if count:
- return (True, len(filter_rows_parent_anchor()))
-
- children_fq_names_uuids = filter_rows_parent_anchor(sort=True)
-
- if back_ref_uuids:
- # go from anchor to backrefs
- obj_uuid_cf = self._obj_uuid_cf
- col_start = 'backref:provider_attachment:'
- col_fin = 'backref:provider_attachment;'
- try:
- obj_rows = obj_uuid_cf.multiget(back_ref_uuids,
- column_start = col_start,
- column_finish = col_fin,
- column_count = 10000000,
- include_timestamp = True)
- except pycassa.NotFoundException:
- if count:
- return (True, 0)
- else:
- return (True, children_fq_names_uuids)
-
- def filter_rows_backref_anchor():
- # flatten to [('<fqnstr>:<uuid>', (<val>,<ts>), *]
- all_cols = [cols for obj_key in obj_rows.keys() for cols in obj_rows[obj_key].items()]
- all_backref_infos = {}
- for col_name, col_val_ts in all_cols:
- # give chance for zk heartbeat/ping
- gevent.sleep(0)
- col_name_arr = col_name.split(':')
- fq_name = col_name_arr[:-1]
- obj_uuid = col_name_arr[-1]
- if obj_uuids and obj_uuid not in obj_uuids:
- continue
- all_backref_infos[obj_uuid] = \
- {'uuid': obj_uuid, 'fq_name': fq_name, 'tstamp': col_val_ts[1]}
-
- filter_cols = ['prop:%s' %(fname) for fname, _ in filter_fields]
- if filter_cols:
- filt_backref_infos = filter_rows(all_backref_infos, filter_cols, filter_fields)
- else: # no filter specified
- filt_backref_infos = all_backref_infos
-
- return [(br_info['fq_name'], br_info['uuid']) for br_info in filt_backref_infos.values()]
- # end filter_rows_backref_anchor
-
- if count:
- return (True, len(filter_rows_backref_anchor()))
-
- children_fq_names_uuids = filter_rows_backref_anchor()
-
- if not parent_uuids and not back_ref_uuids:
- obj_uuid_cf = self._obj_uuid_cf
- if obj_uuids:
- # exact objects specified
- def filter_rows_object_list():
- all_obj_infos = {}
- for obj_uuid in obj_uuids:
- all_obj_infos[obj_uuid] = None
-
- filter_cols = ['prop:%s' %(fname) for fname, _ in filter_fields]
- if filter_cols:
- filt_obj_infos = filter_rows(all_obj_infos, filter_cols, filter_fields)
- else: # no filters specified
- filt_obj_infos = all_obj_infos
-
- return get_fq_name_uuid_list(filt_obj_infos.keys())
- # end filter_rows_object_list
-
- if count:
- return (True, len(filter_rows_object_list()))
- children_fq_names_uuids = filter_rows_object_list()
-
- else: # grab all resources of this type
- obj_fq_name_cf = self._obj_fq_name_cf
- try:
- cols = obj_fq_name_cf.get('provider_attachment', column_count = 10000000)
- except pycassa.NotFoundException:
- if count:
- return (True, 0)
- else:
- return (True, children_fq_names_uuids)
-
- def filter_rows_no_anchor():
- all_obj_infos = {}
- for col_name, col_val in cols.items():
- # give chance for zk heartbeat/ping
- gevent.sleep(0)
- col_name_arr = utils.decode_string(col_name).split(':')
- obj_uuid = col_name_arr[-1]
- all_obj_infos[obj_uuid] = (col_name_arr[:-1], obj_uuid)
-
- filter_cols = ['prop:%s' %(fname) for fname, _ in filter_fields]
- if filter_cols:
- filt_obj_infos = filter_rows(all_obj_infos, filter_cols, filter_fields)
- else: # no filters specified
- filt_obj_infos = all_obj_infos
-
- return filt_obj_infos.values()
- # end filter_rows_no_anchor
-
- if count:
- return (True, len(filter_rows_no_anchor()))
-
- children_fq_names_uuids = filter_rows_no_anchor()
-
- return (True, children_fq_names_uuids)
- #end _cassandra_provider_attachment_list
-
- def _cassandra_provider_attachment_delete(self, obj_uuid):
- obj_uuid_cf = self._obj_uuid_cf
- fq_name = json.loads(obj_uuid_cf.get(obj_uuid, columns = ['fq_name'])['fq_name'])
- bch = obj_uuid_cf.batch()
-
- # unlink from parent
- col_start = 'parent:'
- col_fin = 'parent;'
- col_name_iter = obj_uuid_cf.xget(obj_uuid, column_start = col_start, column_finish = col_fin)
- for (col_name, col_val) in col_name_iter:
- (_, parent_type, parent_uuid) = col_name.split(':')
- self._delete_child(bch, parent_type, parent_uuid, 'provider_attachment', obj_uuid)
-
- # remove refs
- col_start = 'ref:'
- col_fin = 'ref;'
- col_name_iter = obj_uuid_cf.xget(obj_uuid, column_start = col_start, column_finish = col_fin)
- for (col_name, col_val) in col_name_iter:
- (_, ref_type, ref_uuid) = col_name.split(':')
- self._delete_ref(bch, 'provider_attachment', obj_uuid, ref_type, ref_uuid)
-
- bch.remove(obj_uuid)
- bch.send()
-
- # Update fqname table
- fq_name_str = ':'.join(fq_name)
- fq_name_col = utils.encode_string(fq_name_str) + ':' + obj_uuid
- self._obj_fq_name_cf.remove('provider_attachment', columns = [fq_name_col])
-
-
- return (True, '')
- #end _cassandra_provider_attachment_delete
-
- def _cassandra_virtual_machine_interface_alloc(self, fq_name):
- return (True, '')
- #end _cassandra_virtual_machine_interface_alloc
-
- def _cassandra_virtual_machine_interface_create(self, obj_ids, obj_dict):
- # Gather column values for obj and updates to backrefs
- # in a batch and write it at the end
- bch = self._obj_uuid_cf.batch()
-
- obj_cols = {}
- obj_cols['fq_name'] = json.dumps(obj_dict['fq_name'])
- obj_cols['type'] = json.dumps('virtual_machine_interface')
- if 'parent_type' in obj_dict:
- # non config-root child
- parent_type = obj_dict['parent_type']
- parent_method_type = parent_type.replace('-', '_')
- parent_fq_name = obj_dict['fq_name'][:-1]
- obj_cols['parent_type'] = json.dumps(parent_type)
- parent_uuid = self.fq_name_to_uuid(parent_method_type, parent_fq_name)
- self._create_child(bch, parent_method_type, parent_uuid, 'virtual_machine_interface', obj_ids['uuid'])
-
- # Properties
- field = obj_dict.get('virtual_machine_interface_mac_addresses', None)
- if field is not None:
- self._create_prop(bch, obj_ids['uuid'], 'virtual_machine_interface_mac_addresses', field)
-
- field = obj_dict.get('virtual_machine_interface_dhcp_option_list', None)
- if field is not None:
- self._create_prop(bch, obj_ids['uuid'], 'virtual_machine_interface_dhcp_option_list', field)
-
- field = obj_dict.get('virtual_machine_interface_host_routes', None)
- if field is not None:
- self._create_prop(bch, obj_ids['uuid'], 'virtual_machine_interface_host_routes', field)
-
- field = obj_dict.get('virtual_machine_interface_allowed_address_pairs', None)
- if field is not None:
- self._create_prop(bch, obj_ids['uuid'], 'virtual_machine_interface_allowed_address_pairs', field)
-
- field = obj_dict.get('vrf_assign_table', None)
- if field is not None:
- self._create_prop(bch, obj_ids['uuid'], 'vrf_assign_table', field)
-
- field = obj_dict.get('virtual_machine_interface_device_owner', None)
- if field is not None:
- self._create_prop(bch, obj_ids['uuid'], 'virtual_machine_interface_device_owner', field)
-
- field = obj_dict.get('virtual_machine_interface_properties', None)
- if field is not None:
- self._create_prop(bch, obj_ids['uuid'], 'virtual_machine_interface_properties', field)
-
- field = obj_dict.get('id_perms', None)
- if field is not None:
- field['created'] = datetime.datetime.utcnow().isoformat()
- field['last_modified'] = field['created']
- self._create_prop(bch, obj_ids['uuid'], 'id_perms', field)
-
- field = obj_dict.get('display_name', None)
- if field is not None:
- self._create_prop(bch, obj_ids['uuid'], 'display_name', field)
-
-
- # References
- refs = obj_dict.get('qos_forwarding_class_refs', [])
- for ref in refs:
- ref_uuid = self.fq_name_to_uuid('qos_forwarding_class', ref['to'])
- ref_attr = ref.get('attr', None)
- ref_data = {'attr': ref_attr, 'is_weakref': False}
- self._create_ref(bch, 'virtual_machine_interface', obj_ids['uuid'], 'qos_forwarding_class', ref_uuid, ref_data)
- refs = obj_dict.get('security_group_refs', [])
- for ref in refs:
- ref_uuid = self.fq_name_to_uuid('security_group', ref['to'])
- ref_attr = ref.get('attr', None)
- ref_data = {'attr': ref_attr, 'is_weakref': False}
- self._create_ref(bch, 'virtual_machine_interface', obj_ids['uuid'], 'security_group', ref_uuid, ref_data)
- refs = obj_dict.get('virtual_machine_interface_refs', [])
- for ref in refs:
- ref_uuid = self.fq_name_to_uuid('virtual_machine_interface', ref['to'])
- ref_attr = ref.get('attr', None)
- ref_data = {'attr': ref_attr, 'is_weakref': False}
- self._create_ref(bch, 'virtual_machine_interface', obj_ids['uuid'], 'virtual_machine_interface', ref_uuid, ref_data)
- refs = obj_dict.get('virtual_machine_refs', [])
- for ref in refs:
- ref_uuid = self.fq_name_to_uuid('virtual_machine', ref['to'])
- ref_attr = ref.get('attr', None)
- ref_data = {'attr': ref_attr, 'is_weakref': False}
- self._create_ref(bch, 'virtual_machine_interface', obj_ids['uuid'], 'virtual_machine', ref_uuid, ref_data)
- refs = obj_dict.get('virtual_network_refs', [])
- for ref in refs:
- ref_uuid = self.fq_name_to_uuid('virtual_network', ref['to'])
- ref_attr = ref.get('attr', None)
- ref_data = {'attr': ref_attr, 'is_weakref': False}
- self._create_ref(bch, 'virtual_machine_interface', obj_ids['uuid'], 'virtual_network', ref_uuid, ref_data)
- refs = obj_dict.get('routing_instance_refs', [])
- for ref in refs:
- ref_uuid = self.fq_name_to_uuid('routing_instance', ref['to'])
- ref_attr = ref.get('attr', None)
- ref_data = {'attr': ref_attr, 'is_weakref': False}
- self._create_ref(bch, 'virtual_machine_interface', obj_ids['uuid'], 'routing_instance', ref_uuid, ref_data)
- refs = obj_dict.get('interface_route_table_refs', [])
- for ref in refs:
- ref_uuid = self.fq_name_to_uuid('interface_route_table', ref['to'])
- ref_attr = ref.get('attr', None)
- ref_data = {'attr': ref_attr, 'is_weakref': False}
- self._create_ref(bch, 'virtual_machine_interface', obj_ids['uuid'], 'interface_route_table', ref_uuid, ref_data)
-
- bch.insert(obj_ids['uuid'], obj_cols)
- bch.send()
-
- # Update fqname table
- fq_name_str = ':'.join(obj_dict['fq_name'])
- fq_name_cols = {utils.encode_string(fq_name_str) + ':' + obj_ids['uuid']: json.dumps(None)}
- self._obj_fq_name_cf.insert('virtual_machine_interface', fq_name_cols)
-
- return (True, '')
- #end _cassandra_virtual_machine_interface_create
-
- def _cassandra_virtual_machine_interface_read(self, obj_uuids, field_names = None):
- # if field_names = None, all fields will be read/returned
-
- obj_uuid_cf = self._obj_uuid_cf
-
- # optimize for common case of reading non-backref, non-children fields
- # ignoring columns starting from 'b' and 'c' - significant performance
- # impact in scaled setting. e.g. read of project
- if (field_names is None or
- (set(field_names) & (VirtualMachineInterface.backref_fields | VirtualMachineInterface.children_fields))):
- # atleast one backref/children field is needed
- obj_rows = obj_uuid_cf.multiget(obj_uuids,
- column_count = 10000000,
- include_timestamp = True)
- else: # ignore reading backref + children columns
- obj_rows = obj_uuid_cf.multiget(obj_uuids,
- column_start = 'd',
- column_count = 10000000,
- include_timestamp = True)
-
- if (len(obj_uuids) == 1) and not obj_rows:
- raise cfgm_common.exceptions.NoIdError(obj_uuids[0])
-
- results = []
- for row_key in obj_rows:
- # give chance for zk heartbeat/ping
- gevent.sleep(0)
- obj_uuid = row_key
- obj_cols = obj_rows[obj_uuid]
- result = {}
- result['uuid'] = obj_uuid
- result['fq_name'] = json.loads(obj_cols['fq_name'][0])
- for col_name in obj_cols.keys():
- if self._re_match_parent.match(col_name):
- # non config-root child
- (_, _, parent_uuid) = col_name.split(':')
- parent_type = json.loads(obj_cols['parent_type'][0])
- result['parent_type'] = parent_type
- try:
- result['parent_uuid'] = parent_uuid
- result['parent_href'] = self._generate_url(parent_type, parent_uuid)
- except cfgm_common.exceptions.NoIdError:
- err_msg = 'Unknown uuid for parent ' + result['fq_name'][-2]
- return (False, err_msg)
-
- # TODO use compiled RE
- if self._re_match_prop.match(col_name):
- (_, prop_name) = col_name.split(':')
- result[prop_name] = json.loads(obj_cols[col_name][0])
-
- # TODO use compiled RE
- if self._re_match_children.match(col_name):
- (_, child_type, child_uuid) = col_name.split(':')
- if field_names and '%ss' %(child_type) not in field_names:
- continue
-
- child_tstamp = obj_cols[col_name][1]
- try:
- self._read_child(result, obj_uuid, child_type, child_uuid, child_tstamp)
- except cfgm_common.exceptions.NoIdError:
- continue
-
- # TODO use compiled RE
- if self._re_match_ref.match(col_name):
- (_, ref_type, ref_uuid) = col_name.split(':')
- self._read_ref(result, obj_uuid, ref_type, ref_uuid, obj_cols[col_name][0])
-
- if self._re_match_backref.match(col_name):
- (_, back_ref_type, back_ref_uuid) = col_name.split(':')
- if field_names and '%s_back_refs' %(back_ref_type) not in field_names:
- continue
-
- try:
- self._read_back_ref(result, obj_uuid, back_ref_type, back_ref_uuid,
- obj_cols[col_name][0])
- except cfgm_common.exceptions.NoIdError:
- continue
-
- # for all column names
-
-
- results.append(result)
- # end for all rows
-
- return (True, results)
- #end _cassandra_virtual_machine_interface_read
-
- def _cassandra_virtual_machine_interface_count_children(self, obj_uuid, child_type):
- # if child_type = None, return
- if child_type is None:
- return (False, '')
-
- obj_uuid_cf = self._obj_uuid_cf
- if child_type not in VirtualMachineInterface.children_fields:
- return (False, '%s is not a valid children of VirtualMachineInterface' %(child_type))
-
- col_start = 'children:'+child_type[:-1]+':'
- col_finish = 'children:'+child_type[:-1]+';'
- num_children = obj_uuid_cf.get_count(obj_uuid,
- column_start = col_start,
- column_finish = col_finish,
- max_count = 10000000)
- return (True, num_children)
- #end _cassandra_virtual_machine_interface_count_children
-
- def _cassandra_virtual_machine_interface_update(self, obj_uuid, new_obj_dict):
- # Grab ref-uuids and properties in new version
- new_ref_infos = {}
-
- if 'qos_forwarding_class_refs' in new_obj_dict:
- new_ref_infos['qos_forwarding_class'] = {}
- new_refs = new_obj_dict['qos_forwarding_class_refs']
- if new_refs:
- for new_ref in new_refs:
- new_ref_uuid = self.fq_name_to_uuid('qos_forwarding_class', new_ref['to'])
- new_ref_attr = new_ref.get('attr', None)
- new_ref_data = {'attr': new_ref_attr, 'is_weakref': False}
- new_ref_infos['qos_forwarding_class'][new_ref_uuid] = new_ref_data
-
- if 'security_group_refs' in new_obj_dict:
- new_ref_infos['security_group'] = {}
- new_refs = new_obj_dict['security_group_refs']
- if new_refs:
- for new_ref in new_refs:
- new_ref_uuid = self.fq_name_to_uuid('security_group', new_ref['to'])
- new_ref_attr = new_ref.get('attr', None)
- new_ref_data = {'attr': new_ref_attr, 'is_weakref': False}
- new_ref_infos['security_group'][new_ref_uuid] = new_ref_data
-
- if 'virtual_machine_interface_refs' in new_obj_dict:
- new_ref_infos['virtual_machine_interface'] = {}
- new_refs = new_obj_dict['virtual_machine_interface_refs']
- if new_refs:
- for new_ref in new_refs:
- new_ref_uuid = self.fq_name_to_uuid('virtual_machine_interface', new_ref['to'])
- new_ref_attr = new_ref.get('attr', None)
- new_ref_data = {'attr': new_ref_attr, 'is_weakref': False}
- new_ref_infos['virtual_machine_interface'][new_ref_uuid] = new_ref_data
-
- if 'virtual_machine_refs' in new_obj_dict:
- new_ref_infos['virtual_machine'] = {}
- new_refs = new_obj_dict['virtual_machine_refs']
- if new_refs:
- for new_ref in new_refs:
- new_ref_uuid = self.fq_name_to_uuid('virtual_machine', new_ref['to'])
- new_ref_attr = new_ref.get('attr', None)
- new_ref_data = {'attr': new_ref_attr, 'is_weakref': False}
- new_ref_infos['virtual_machine'][new_ref_uuid] = new_ref_data
-
- if 'virtual_network_refs' in new_obj_dict:
- new_ref_infos['virtual_network'] = {}
- new_refs = new_obj_dict['virtual_network_refs']
- if new_refs:
- for new_ref in new_refs:
- new_ref_uuid = self.fq_name_to_uuid('virtual_network', new_ref['to'])
- new_ref_attr = new_ref.get('attr', None)
- new_ref_data = {'attr': new_ref_attr, 'is_weakref': False}
- new_ref_infos['virtual_network'][new_ref_uuid] = new_ref_data
-
- if 'routing_instance_refs' in new_obj_dict:
- new_ref_infos['routing_instance'] = {}
- new_refs = new_obj_dict['routing_instance_refs']
- if new_refs:
- for new_ref in new_refs:
- new_ref_uuid = self.fq_name_to_uuid('routing_instance', new_ref['to'])
- new_ref_attr = new_ref.get('attr', None)
- new_ref_data = {'attr': new_ref_attr, 'is_weakref': False}
- new_ref_infos['routing_instance'][new_ref_uuid] = new_ref_data
-
- if 'interface_route_table_refs' in new_obj_dict:
- new_ref_infos['interface_route_table'] = {}
- new_refs = new_obj_dict['interface_route_table_refs']
- if new_refs:
- for new_ref in new_refs:
- new_ref_uuid = self.fq_name_to_uuid('interface_route_table', new_ref['to'])
- new_ref_attr = new_ref.get('attr', None)
- new_ref_data = {'attr': new_ref_attr, 'is_weakref': False}
- new_ref_infos['interface_route_table'][new_ref_uuid] = new_ref_data
-
- new_props = {}
- if 'virtual_machine_interface_mac_addresses' in new_obj_dict:
- new_props['virtual_machine_interface_mac_addresses'] = new_obj_dict['virtual_machine_interface_mac_addresses']
- if 'virtual_machine_interface_dhcp_option_list' in new_obj_dict:
- new_props['virtual_machine_interface_dhcp_option_list'] = new_obj_dict['virtual_machine_interface_dhcp_option_list']
- if 'virtual_machine_interface_host_routes' in new_obj_dict:
- new_props['virtual_machine_interface_host_routes'] = new_obj_dict['virtual_machine_interface_host_routes']
- if 'virtual_machine_interface_allowed_address_pairs' in new_obj_dict:
- new_props['virtual_machine_interface_allowed_address_pairs'] = new_obj_dict['virtual_machine_interface_allowed_address_pairs']
- if 'vrf_assign_table' in new_obj_dict:
- new_props['vrf_assign_table'] = new_obj_dict['vrf_assign_table']
- if 'virtual_machine_interface_device_owner' in new_obj_dict:
- new_props['virtual_machine_interface_device_owner'] = new_obj_dict['virtual_machine_interface_device_owner']
- if 'virtual_machine_interface_properties' in new_obj_dict:
- new_props['virtual_machine_interface_properties'] = new_obj_dict['virtual_machine_interface_properties']
- if 'id_perms' in new_obj_dict:
- new_props['id_perms'] = new_obj_dict['id_perms']
- if 'display_name' in new_obj_dict:
- new_props['display_name'] = new_obj_dict['display_name']
- # Gather column values for obj and updates to backrefs
- # in a batch and write it at the end
- obj_uuid_cf = self._obj_uuid_cf
- obj_cols_iter = obj_uuid_cf.xget(obj_uuid)
- # TODO optimize this (converts tuple to dict)
- obj_cols = {}
- for col_info in obj_cols_iter:
- obj_cols[col_info[0]] = col_info[1]
-
- bch = obj_uuid_cf.batch()
- for col_name in obj_cols.keys():
- # TODO use compiled RE
- if re.match('prop:', col_name):
- (_, prop_name) = col_name.split(':')
- if prop_name == 'id_perms':
- # id-perms always has to be updated for last-mod timestamp
- # get it from request dict(or from db if not in request dict)
- new_id_perms = new_obj_dict.get(prop_name, json.loads(obj_cols[col_name]))
- self.update_last_modified(bch, obj_uuid, new_id_perms)
- elif prop_name in new_obj_dict:
- self._update_prop(bch, obj_uuid, prop_name, new_props)
-
- # TODO use compiled RE
- if re.match('ref:', col_name):
- (_, ref_type, ref_uuid) = col_name.split(':')
- self._update_ref(bch, 'virtual_machine_interface', obj_uuid, ref_type, ref_uuid, new_ref_infos)
- # for all column names
-
- # create new refs
- for ref_type in new_ref_infos.keys():
- for ref_uuid in new_ref_infos[ref_type].keys():
- ref_data = new_ref_infos[ref_type][ref_uuid]
- self._create_ref(bch, 'virtual_machine_interface', obj_uuid, ref_type, ref_uuid, ref_data)
-
- # create new props
- for prop_name in new_props.keys():
- self._create_prop(bch, obj_uuid, prop_name, new_props[prop_name])
-
- bch.send()
-
- return (True, '')
- #end _cassandra_virtual_machine_interface_update
-
- def _cassandra_virtual_machine_interface_list(self, parent_uuids=None, back_ref_uuids=None,
- obj_uuids=None, count=False, filters=None):
- children_fq_names_uuids = []
- if filters:
- fnames = filters.get('field_names', [])
- fvalues = filters.get('field_values', [])
- filter_fields = [(fnames[i], fvalues[i]) for i in range(len(fnames))]
- else:
- filter_fields = []
-
- def filter_rows(coll_infos, filter_cols, filter_params):
- filt_infos = {}
- coll_rows = obj_uuid_cf.multiget(coll_infos.keys(),
- columns=filter_cols,
- column_count=10000000)
- for row in coll_rows:
- # give chance for zk heartbeat/ping
- gevent.sleep(0)
- full_match = True
- for fname, fval in filter_params:
- if coll_rows[row]['prop:%s' %(fname)] != fval:
- full_match = False
- break
- if full_match:
- filt_infos[row] = coll_infos[row]
- return filt_infos
- # end filter_rows
-
- def get_fq_name_uuid_list(obj_uuids):
- ret_list = []
- for obj_uuid in obj_uuids:
- try:
- obj_fq_name = self.uuid_to_fq_name(obj_uuid)
- ret_list.append((obj_fq_name, obj_uuid))
- except cfgm_common.exceptions.NoIdError:
- pass
- return ret_list
- # end get_fq_name_uuid_list
-
- if parent_uuids:
- # go from parent to child
- obj_uuid_cf = self._obj_uuid_cf
- col_start = 'children:virtual_machine_interface:'
- col_fin = 'children:virtual_machine_interface;'
- try:
- obj_rows = obj_uuid_cf.multiget(parent_uuids,
- column_start = col_start,
- column_finish = col_fin,
- column_count = 10000000,
- include_timestamp = True)
- except pycassa.NotFoundException:
- if count:
- return (True, 0)
- else:
- return (True, children_fq_names_uuids)
-
- def filter_rows_parent_anchor(sort=False):
- # flatten to [('children:<type>:<uuid>', (<val>,<ts>), *]
- all_cols = [cols for obj_key in obj_rows.keys() for cols in obj_rows[obj_key].items()]
- all_child_infos = {}
- for col_name, col_val_ts in all_cols:
- # give chance for zk heartbeat/ping
- gevent.sleep(0)
- child_uuid = col_name.split(':')[2]
- if obj_uuids and child_uuid not in obj_uuids:
- continue
- all_child_infos[child_uuid] = {'uuid': child_uuid, 'tstamp': col_val_ts[1]}
-
- filter_cols = ['prop:%s' %(fname) for fname, _ in filter_fields]
- if filter_cols:
- filt_child_infos = filter_rows(all_child_infos, filter_cols, filter_fields)
- else: # no filter specified
- filt_child_infos = all_child_infos
-
- if not sort:
- ret_child_infos = filt_child_infos.values()
- else:
- ret_child_infos = sorted(filt_child_infos.values(), key=itemgetter('tstamp'))
-
- return get_fq_name_uuid_list(r['uuid'] for r in ret_child_infos)
- # end filter_rows_parent_anchor
-
- if count:
- return (True, len(filter_rows_parent_anchor()))
-
- children_fq_names_uuids = filter_rows_parent_anchor(sort=True)
-
- if back_ref_uuids:
- # go from anchor to backrefs
- obj_uuid_cf = self._obj_uuid_cf
- col_start = 'backref:virtual_machine_interface:'
- col_fin = 'backref:virtual_machine_interface;'
- try:
- obj_rows = obj_uuid_cf.multiget(back_ref_uuids,
- column_start = col_start,
- column_finish = col_fin,
- column_count = 10000000,
- include_timestamp = True)
- except pycassa.NotFoundException:
- if count:
- return (True, 0)
- else:
- return (True, children_fq_names_uuids)
-
- def filter_rows_backref_anchor():
- # flatten to [('<fqnstr>:<uuid>', (<val>,<ts>), *]
- all_cols = [cols for obj_key in obj_rows.keys() for cols in obj_rows[obj_key].items()]
- all_backref_infos = {}
- for col_name, col_val_ts in all_cols:
- # give chance for zk heartbeat/ping
- gevent.sleep(0)
- col_name_arr = col_name.split(':')
- fq_name = col_name_arr[:-1]
- obj_uuid = col_name_arr[-1]
- if obj_uuids and obj_uuid not in obj_uuids:
- continue
- all_backref_infos[obj_uuid] = \
- {'uuid': obj_uuid, 'fq_name': fq_name, 'tstamp': col_val_ts[1]}
-
- filter_cols = ['prop:%s' %(fname) for fname, _ in filter_fields]
- if filter_cols:
- filt_backref_infos = filter_rows(all_backref_infos, filter_cols, filter_fields)
- else: # no filter specified
- filt_backref_infos = all_backref_infos
-
- return [(br_info['fq_name'], br_info['uuid']) for br_info in filt_backref_infos.values()]
- # end filter_rows_backref_anchor
-
- if count:
- return (True, len(filter_rows_backref_anchor()))
-
- children_fq_names_uuids = filter_rows_backref_anchor()
-
- if not parent_uuids and not back_ref_uuids:
- obj_uuid_cf = self._obj_uuid_cf
- if obj_uuids:
- # exact objects specified
- def filter_rows_object_list():
- all_obj_infos = {}
- for obj_uuid in obj_uuids:
- all_obj_infos[obj_uuid] = None
-
- filter_cols = ['prop:%s' %(fname) for fname, _ in filter_fields]
- if filter_cols:
- filt_obj_infos = filter_rows(all_obj_infos, filter_cols, filter_fields)
- else: # no filters specified
- filt_obj_infos = all_obj_infos
-
- return get_fq_name_uuid_list(filt_obj_infos.keys())
- # end filter_rows_object_list
-
- if count:
- return (True, len(filter_rows_object_list()))
- children_fq_names_uuids = filter_rows_object_list()
-
- else: # grab all resources of this type
- obj_fq_name_cf = self._obj_fq_name_cf
- try:
- cols = obj_fq_name_cf.get('virtual_machine_interface', column_count = 10000000)
- except pycassa.NotFoundException:
- if count:
- return (True, 0)
- else:
- return (True, children_fq_names_uuids)
-
- def filter_rows_no_anchor():
- all_obj_infos = {}
- for col_name, col_val in cols.items():
- # give chance for zk heartbeat/ping
- gevent.sleep(0)
- col_name_arr = utils.decode_string(col_name).split(':')
- obj_uuid = col_name_arr[-1]
- all_obj_infos[obj_uuid] = (col_name_arr[:-1], obj_uuid)
-
- filter_cols = ['prop:%s' %(fname) for fname, _ in filter_fields]
- if filter_cols:
- filt_obj_infos = filter_rows(all_obj_infos, filter_cols, filter_fields)
- else: # no filters specified
- filt_obj_infos = all_obj_infos
-
- return filt_obj_infos.values()
- # end filter_rows_no_anchor
-
- if count:
- return (True, len(filter_rows_no_anchor()))
-
- children_fq_names_uuids = filter_rows_no_anchor()
-
- return (True, children_fq_names_uuids)
- #end _cassandra_virtual_machine_interface_list
-
- def _cassandra_virtual_machine_interface_delete(self, obj_uuid):
- obj_uuid_cf = self._obj_uuid_cf
- fq_name = json.loads(obj_uuid_cf.get(obj_uuid, columns = ['fq_name'])['fq_name'])
- bch = obj_uuid_cf.batch()
-
- # unlink from parent
- col_start = 'parent:'
- col_fin = 'parent;'
- col_name_iter = obj_uuid_cf.xget(obj_uuid, column_start = col_start, column_finish = col_fin)
- for (col_name, col_val) in col_name_iter:
- (_, parent_type, parent_uuid) = col_name.split(':')
- self._delete_child(bch, parent_type, parent_uuid, 'virtual_machine_interface', obj_uuid)
-
- # remove refs
- col_start = 'ref:'
- col_fin = 'ref;'
- col_name_iter = obj_uuid_cf.xget(obj_uuid, column_start = col_start, column_finish = col_fin)
- for (col_name, col_val) in col_name_iter:
- (_, ref_type, ref_uuid) = col_name.split(':')
- self._delete_ref(bch, 'virtual_machine_interface', obj_uuid, ref_type, ref_uuid)
-
- bch.remove(obj_uuid)
- bch.send()
-
- # Update fqname table
- fq_name_str = ':'.join(fq_name)
- fq_name_col = utils.encode_string(fq_name_str) + ':' + obj_uuid
- self._obj_fq_name_cf.remove('virtual_machine_interface', columns = [fq_name_col])
-
-
- return (True, '')
- #end _cassandra_virtual_machine_interface_delete
-
- def _cassandra_loadbalancer_healthmonitor_alloc(self, fq_name):
- return (True, '')
- #end _cassandra_loadbalancer_healthmonitor_alloc
-
- def _cassandra_loadbalancer_healthmonitor_create(self, obj_ids, obj_dict):
- # Gather column values for obj and updates to backrefs
- # in a batch and write it at the end
- bch = self._obj_uuid_cf.batch()
-
- obj_cols = {}
- obj_cols['fq_name'] = json.dumps(obj_dict['fq_name'])
- obj_cols['type'] = json.dumps('loadbalancer_healthmonitor')
- if 'parent_type' in obj_dict:
- # non config-root child
- parent_type = obj_dict['parent_type']
- parent_method_type = parent_type.replace('-', '_')
- parent_fq_name = obj_dict['fq_name'][:-1]
- obj_cols['parent_type'] = json.dumps(parent_type)
- parent_uuid = self.fq_name_to_uuid(parent_method_type, parent_fq_name)
- self._create_child(bch, parent_method_type, parent_uuid, 'loadbalancer_healthmonitor', obj_ids['uuid'])
-
- # Properties
- field = obj_dict.get('loadbalancer_healthmonitor_properties', None)
- if field is not None:
- self._create_prop(bch, obj_ids['uuid'], 'loadbalancer_healthmonitor_properties', field)
-
- field = obj_dict.get('id_perms', None)
- if field is not None:
- field['created'] = datetime.datetime.utcnow().isoformat()
- field['last_modified'] = field['created']
- self._create_prop(bch, obj_ids['uuid'], 'id_perms', field)
-
- field = obj_dict.get('display_name', None)
- if field is not None:
- self._create_prop(bch, obj_ids['uuid'], 'display_name', field)
-
-
- # References
-
- bch.insert(obj_ids['uuid'], obj_cols)
- bch.send()
-
- # Update fqname table
- fq_name_str = ':'.join(obj_dict['fq_name'])
- fq_name_cols = {utils.encode_string(fq_name_str) + ':' + obj_ids['uuid']: json.dumps(None)}
- self._obj_fq_name_cf.insert('loadbalancer_healthmonitor', fq_name_cols)
-
- return (True, '')
- #end _cassandra_loadbalancer_healthmonitor_create
-
- def _cassandra_loadbalancer_healthmonitor_read(self, obj_uuids, field_names = None):
- # if field_names = None, all fields will be read/returned
-
- obj_uuid_cf = self._obj_uuid_cf
-
- # optimize for common case of reading non-backref, non-children fields
- # ignoring columns starting from 'b' and 'c' - significant performance
- # impact in scaled setting. e.g. read of project
- if (field_names is None or
- (set(field_names) & (LoadbalancerHealthmonitor.backref_fields | LoadbalancerHealthmonitor.children_fields))):
- # atleast one backref/children field is needed
- obj_rows = obj_uuid_cf.multiget(obj_uuids,
- column_count = 10000000,
- include_timestamp = True)
- else: # ignore reading backref + children columns
- obj_rows = obj_uuid_cf.multiget(obj_uuids,
- column_start = 'd',
- column_count = 10000000,
- include_timestamp = True)
-
- if (len(obj_uuids) == 1) and not obj_rows:
- raise cfgm_common.exceptions.NoIdError(obj_uuids[0])
-
- results = []
- for row_key in obj_rows:
- # give chance for zk heartbeat/ping
- gevent.sleep(0)
- obj_uuid = row_key
- obj_cols = obj_rows[obj_uuid]
- result = {}
- result['uuid'] = obj_uuid
- result['fq_name'] = json.loads(obj_cols['fq_name'][0])
- for col_name in obj_cols.keys():
- if self._re_match_parent.match(col_name):
- # non config-root child
- (_, _, parent_uuid) = col_name.split(':')
- parent_type = json.loads(obj_cols['parent_type'][0])
- result['parent_type'] = parent_type
- try:
- result['parent_uuid'] = parent_uuid
- result['parent_href'] = self._generate_url(parent_type, parent_uuid)
- except cfgm_common.exceptions.NoIdError:
- err_msg = 'Unknown uuid for parent ' + result['fq_name'][-2]
- return (False, err_msg)
-
- # TODO use compiled RE
- if self._re_match_prop.match(col_name):
- (_, prop_name) = col_name.split(':')
- result[prop_name] = json.loads(obj_cols[col_name][0])
-
- # TODO use compiled RE
- if self._re_match_children.match(col_name):
- (_, child_type, child_uuid) = col_name.split(':')
- if field_names and '%ss' %(child_type) not in field_names:
- continue
-
- child_tstamp = obj_cols[col_name][1]
- try:
- self._read_child(result, obj_uuid, child_type, child_uuid, child_tstamp)
- except cfgm_common.exceptions.NoIdError:
- continue
-
- # TODO use compiled RE
- if self._re_match_ref.match(col_name):
- (_, ref_type, ref_uuid) = col_name.split(':')
- self._read_ref(result, obj_uuid, ref_type, ref_uuid, obj_cols[col_name][0])
-
- if self._re_match_backref.match(col_name):
- (_, back_ref_type, back_ref_uuid) = col_name.split(':')
- if field_names and '%s_back_refs' %(back_ref_type) not in field_names:
- continue
-
- try:
- self._read_back_ref(result, obj_uuid, back_ref_type, back_ref_uuid,
- obj_cols[col_name][0])
- except cfgm_common.exceptions.NoIdError:
- continue
-
- # for all column names
-
-
- results.append(result)
- # end for all rows
-
- return (True, results)
- #end _cassandra_loadbalancer_healthmonitor_read
-
- def _cassandra_loadbalancer_healthmonitor_count_children(self, obj_uuid, child_type):
- # if child_type = None, return
- if child_type is None:
- return (False, '')
-
- obj_uuid_cf = self._obj_uuid_cf
- if child_type not in LoadbalancerHealthmonitor.children_fields:
- return (False, '%s is not a valid children of LoadbalancerHealthmonitor' %(child_type))
-
- col_start = 'children:'+child_type[:-1]+':'
- col_finish = 'children:'+child_type[:-1]+';'
- num_children = obj_uuid_cf.get_count(obj_uuid,
- column_start = col_start,
- column_finish = col_finish,
- max_count = 10000000)
- return (True, num_children)
- #end _cassandra_loadbalancer_healthmonitor_count_children
-
- def _cassandra_loadbalancer_healthmonitor_update(self, obj_uuid, new_obj_dict):
- # Grab ref-uuids and properties in new version
- new_ref_infos = {}
-
- new_props = {}
- if 'loadbalancer_healthmonitor_properties' in new_obj_dict:
- new_props['loadbalancer_healthmonitor_properties'] = new_obj_dict['loadbalancer_healthmonitor_properties']
- if 'id_perms' in new_obj_dict:
- new_props['id_perms'] = new_obj_dict['id_perms']
- if 'display_name' in new_obj_dict:
- new_props['display_name'] = new_obj_dict['display_name']
- # Gather column values for obj and updates to backrefs
- # in a batch and write it at the end
- obj_uuid_cf = self._obj_uuid_cf
- obj_cols_iter = obj_uuid_cf.xget(obj_uuid)
- # TODO optimize this (converts tuple to dict)
- obj_cols = {}
- for col_info in obj_cols_iter:
- obj_cols[col_info[0]] = col_info[1]
-
- bch = obj_uuid_cf.batch()
- for col_name in obj_cols.keys():
- # TODO use compiled RE
- if re.match('prop:', col_name):
- (_, prop_name) = col_name.split(':')
- if prop_name == 'id_perms':
- # id-perms always has to be updated for last-mod timestamp
- # get it from request dict(or from db if not in request dict)
- new_id_perms = new_obj_dict.get(prop_name, json.loads(obj_cols[col_name]))
- self.update_last_modified(bch, obj_uuid, new_id_perms)
- elif prop_name in new_obj_dict:
- self._update_prop(bch, obj_uuid, prop_name, new_props)
-
- # TODO use compiled RE
- if re.match('ref:', col_name):
- (_, ref_type, ref_uuid) = col_name.split(':')
- self._update_ref(bch, 'loadbalancer_healthmonitor', obj_uuid, ref_type, ref_uuid, new_ref_infos)
- # for all column names
-
- # create new refs
- for ref_type in new_ref_infos.keys():
- for ref_uuid in new_ref_infos[ref_type].keys():
- ref_data = new_ref_infos[ref_type][ref_uuid]
- self._create_ref(bch, 'loadbalancer_healthmonitor', obj_uuid, ref_type, ref_uuid, ref_data)
-
- # create new props
- for prop_name in new_props.keys():
- self._create_prop(bch, obj_uuid, prop_name, new_props[prop_name])
-
- bch.send()
-
- return (True, '')
- #end _cassandra_loadbalancer_healthmonitor_update
-
- def _cassandra_loadbalancer_healthmonitor_list(self, parent_uuids=None, back_ref_uuids=None,
- obj_uuids=None, count=False, filters=None):
- children_fq_names_uuids = []
- if filters:
- fnames = filters.get('field_names', [])
- fvalues = filters.get('field_values', [])
- filter_fields = [(fnames[i], fvalues[i]) for i in range(len(fnames))]
- else:
- filter_fields = []
-
- def filter_rows(coll_infos, filter_cols, filter_params):
- filt_infos = {}
- coll_rows = obj_uuid_cf.multiget(coll_infos.keys(),
- columns=filter_cols,
- column_count=10000000)
- for row in coll_rows:
- # give chance for zk heartbeat/ping
- gevent.sleep(0)
- full_match = True
- for fname, fval in filter_params:
- if coll_rows[row]['prop:%s' %(fname)] != fval:
- full_match = False
- break
- if full_match:
- filt_infos[row] = coll_infos[row]
- return filt_infos
- # end filter_rows
-
- def get_fq_name_uuid_list(obj_uuids):
- ret_list = []
- for obj_uuid in obj_uuids:
- try:
- obj_fq_name = self.uuid_to_fq_name(obj_uuid)
- ret_list.append((obj_fq_name, obj_uuid))
- except cfgm_common.exceptions.NoIdError:
- pass
- return ret_list
- # end get_fq_name_uuid_list
-
- if parent_uuids:
- # go from parent to child
- obj_uuid_cf = self._obj_uuid_cf
- col_start = 'children:loadbalancer_healthmonitor:'
- col_fin = 'children:loadbalancer_healthmonitor;'
- try:
- obj_rows = obj_uuid_cf.multiget(parent_uuids,
- column_start = col_start,
- column_finish = col_fin,
- column_count = 10000000,
- include_timestamp = True)
- except pycassa.NotFoundException:
- if count:
- return (True, 0)
- else:
- return (True, children_fq_names_uuids)
-
- def filter_rows_parent_anchor(sort=False):
- # flatten to [('children:<type>:<uuid>', (<val>,<ts>), *]
- all_cols = [cols for obj_key in obj_rows.keys() for cols in obj_rows[obj_key].items()]
- all_child_infos = {}
- for col_name, col_val_ts in all_cols:
- # give chance for zk heartbeat/ping
- gevent.sleep(0)
- child_uuid = col_name.split(':')[2]
- if obj_uuids and child_uuid not in obj_uuids:
- continue
- all_child_infos[child_uuid] = {'uuid': child_uuid, 'tstamp': col_val_ts[1]}
-
- filter_cols = ['prop:%s' %(fname) for fname, _ in filter_fields]
- if filter_cols:
- filt_child_infos = filter_rows(all_child_infos, filter_cols, filter_fields)
- else: # no filter specified
- filt_child_infos = all_child_infos
-
- if not sort:
- ret_child_infos = filt_child_infos.values()
- else:
- ret_child_infos = sorted(filt_child_infos.values(), key=itemgetter('tstamp'))
-
- return get_fq_name_uuid_list(r['uuid'] for r in ret_child_infos)
- # end filter_rows_parent_anchor
-
- if count:
- return (True, len(filter_rows_parent_anchor()))
-
- children_fq_names_uuids = filter_rows_parent_anchor(sort=True)
-
- if back_ref_uuids:
- # go from anchor to backrefs
- obj_uuid_cf = self._obj_uuid_cf
- col_start = 'backref:loadbalancer_healthmonitor:'
- col_fin = 'backref:loadbalancer_healthmonitor;'
- try:
- obj_rows = obj_uuid_cf.multiget(back_ref_uuids,
- column_start = col_start,
- column_finish = col_fin,
- column_count = 10000000,
- include_timestamp = True)
- except pycassa.NotFoundException:
- if count:
- return (True, 0)
- else:
- return (True, children_fq_names_uuids)
-
- def filter_rows_backref_anchor():
- # flatten to [('<fqnstr>:<uuid>', (<val>,<ts>), *]
- all_cols = [cols for obj_key in obj_rows.keys() for cols in obj_rows[obj_key].items()]
- all_backref_infos = {}
- for col_name, col_val_ts in all_cols:
- # give chance for zk heartbeat/ping
- gevent.sleep(0)
- col_name_arr = col_name.split(':')
- fq_name = col_name_arr[:-1]
- obj_uuid = col_name_arr[-1]
- if obj_uuids and obj_uuid not in obj_uuids:
- continue
- all_backref_infos[obj_uuid] = \
- {'uuid': obj_uuid, 'fq_name': fq_name, 'tstamp': col_val_ts[1]}
-
- filter_cols = ['prop:%s' %(fname) for fname, _ in filter_fields]
- if filter_cols:
- filt_backref_infos = filter_rows(all_backref_infos, filter_cols, filter_fields)
- else: # no filter specified
- filt_backref_infos = all_backref_infos
-
- return [(br_info['fq_name'], br_info['uuid']) for br_info in filt_backref_infos.values()]
- # end filter_rows_backref_anchor
-
- if count:
- return (True, len(filter_rows_backref_anchor()))
-
- children_fq_names_uuids = filter_rows_backref_anchor()
-
- if not parent_uuids and not back_ref_uuids:
- obj_uuid_cf = self._obj_uuid_cf
- if obj_uuids:
- # exact objects specified
- def filter_rows_object_list():
- all_obj_infos = {}
- for obj_uuid in obj_uuids:
- all_obj_infos[obj_uuid] = None
-
- filter_cols = ['prop:%s' %(fname) for fname, _ in filter_fields]
- if filter_cols:
- filt_obj_infos = filter_rows(all_obj_infos, filter_cols, filter_fields)
- else: # no filters specified
- filt_obj_infos = all_obj_infos
-
- return get_fq_name_uuid_list(filt_obj_infos.keys())
- # end filter_rows_object_list
-
- if count:
- return (True, len(filter_rows_object_list()))
- children_fq_names_uuids = filter_rows_object_list()
-
- else: # grab all resources of this type
- obj_fq_name_cf = self._obj_fq_name_cf
- try:
- cols = obj_fq_name_cf.get('loadbalancer_healthmonitor', column_count = 10000000)
- except pycassa.NotFoundException:
- if count:
- return (True, 0)
- else:
- return (True, children_fq_names_uuids)
-
- def filter_rows_no_anchor():
- all_obj_infos = {}
- for col_name, col_val in cols.items():
- # give chance for zk heartbeat/ping
- gevent.sleep(0)
- col_name_arr = utils.decode_string(col_name).split(':')
- obj_uuid = col_name_arr[-1]
- all_obj_infos[obj_uuid] = (col_name_arr[:-1], obj_uuid)
-
- filter_cols = ['prop:%s' %(fname) for fname, _ in filter_fields]
- if filter_cols:
- filt_obj_infos = filter_rows(all_obj_infos, filter_cols, filter_fields)
- else: # no filters specified
- filt_obj_infos = all_obj_infos
-
- return filt_obj_infos.values()
- # end filter_rows_no_anchor
-
- if count:
- return (True, len(filter_rows_no_anchor()))
-
- children_fq_names_uuids = filter_rows_no_anchor()
-
- return (True, children_fq_names_uuids)
- #end _cassandra_loadbalancer_healthmonitor_list
-
- def _cassandra_loadbalancer_healthmonitor_delete(self, obj_uuid):
- obj_uuid_cf = self._obj_uuid_cf
- fq_name = json.loads(obj_uuid_cf.get(obj_uuid, columns = ['fq_name'])['fq_name'])
- bch = obj_uuid_cf.batch()
-
- # unlink from parent
- col_start = 'parent:'
- col_fin = 'parent;'
- col_name_iter = obj_uuid_cf.xget(obj_uuid, column_start = col_start, column_finish = col_fin)
- for (col_name, col_val) in col_name_iter:
- (_, parent_type, parent_uuid) = col_name.split(':')
- self._delete_child(bch, parent_type, parent_uuid, 'loadbalancer_healthmonitor', obj_uuid)
-
- # remove refs
- col_start = 'ref:'
- col_fin = 'ref;'
- col_name_iter = obj_uuid_cf.xget(obj_uuid, column_start = col_start, column_finish = col_fin)
- for (col_name, col_val) in col_name_iter:
- (_, ref_type, ref_uuid) = col_name.split(':')
- self._delete_ref(bch, 'loadbalancer_healthmonitor', obj_uuid, ref_type, ref_uuid)
-
- bch.remove(obj_uuid)
- bch.send()
-
- # Update fqname table
- fq_name_str = ':'.join(fq_name)
- fq_name_col = utils.encode_string(fq_name_str) + ':' + obj_uuid
- self._obj_fq_name_cf.remove('loadbalancer_healthmonitor', columns = [fq_name_col])
-
-
- return (True, '')
- #end _cassandra_loadbalancer_healthmonitor_delete
-
- def _cassandra_virtual_network_alloc(self, fq_name):
- return (True, '')
- #end _cassandra_virtual_network_alloc
-
- def _cassandra_virtual_network_create(self, obj_ids, obj_dict):
- # Gather column values for obj and updates to backrefs
- # in a batch and write it at the end
- bch = self._obj_uuid_cf.batch()
-
- obj_cols = {}
- obj_cols['fq_name'] = json.dumps(obj_dict['fq_name'])
- obj_cols['type'] = json.dumps('virtual_network')
- if 'parent_type' in obj_dict:
- # non config-root child
- parent_type = obj_dict['parent_type']
- parent_method_type = parent_type.replace('-', '_')
- parent_fq_name = obj_dict['fq_name'][:-1]
- obj_cols['parent_type'] = json.dumps(parent_type)
- parent_uuid = self.fq_name_to_uuid(parent_method_type, parent_fq_name)
- self._create_child(bch, parent_method_type, parent_uuid, 'virtual_network', obj_ids['uuid'])
-
- # Properties
- field = obj_dict.get('virtual_network_properties', None)
- if field is not None:
- self._create_prop(bch, obj_ids['uuid'], 'virtual_network_properties', field)
-
- field = obj_dict.get('virtual_network_network_id', None)
- if field is not None:
- self._create_prop(bch, obj_ids['uuid'], 'virtual_network_network_id', field)
-
- field = obj_dict.get('route_target_list', None)
- if field is not None:
- self._create_prop(bch, obj_ids['uuid'], 'route_target_list', field)
-
- field = obj_dict.get('router_external', None)
- if field is not None:
- self._create_prop(bch, obj_ids['uuid'], 'router_external', field)
-
- field = obj_dict.get('is_shared', None)
- if field is not None:
- self._create_prop(bch, obj_ids['uuid'], 'is_shared', field)
-
- field = obj_dict.get('external_ipam', None)
- if field is not None:
- self._create_prop(bch, obj_ids['uuid'], 'external_ipam', field)
-
- field = obj_dict.get('flood_unknown_unicast', None)
- if field is not None:
- self._create_prop(bch, obj_ids['uuid'], 'flood_unknown_unicast', field)
-
- field = obj_dict.get('id_perms', None)
- if field is not None:
- field['created'] = datetime.datetime.utcnow().isoformat()
- field['last_modified'] = field['created']
- self._create_prop(bch, obj_ids['uuid'], 'id_perms', field)
-
- field = obj_dict.get('display_name', None)
- if field is not None:
- self._create_prop(bch, obj_ids['uuid'], 'display_name', field)
-
-
- # References
- refs = obj_dict.get('qos_forwarding_class_refs', [])
- for ref in refs:
- ref_uuid = self.fq_name_to_uuid('qos_forwarding_class', ref['to'])
- ref_attr = ref.get('attr', None)
- ref_data = {'attr': ref_attr, 'is_weakref': False}
- self._create_ref(bch, 'virtual_network', obj_ids['uuid'], 'qos_forwarding_class', ref_uuid, ref_data)
- refs = obj_dict.get('network_ipam_refs', [])
- for ref in refs:
- ref_uuid = self.fq_name_to_uuid('network_ipam', ref['to'])
- ref_attr = ref.get('attr', None)
- ref_data = {'attr': ref_attr, 'is_weakref': False}
- self._create_ref(bch, 'virtual_network', obj_ids['uuid'], 'network_ipam', ref_uuid, ref_data)
- refs = obj_dict.get('network_policy_refs', [])
- for ref in refs:
- ref_uuid = self.fq_name_to_uuid('network_policy', ref['to'])
- ref_attr = ref.get('attr', None)
- ref_data = {'attr': ref_attr, 'is_weakref': False}
- self._create_ref(bch, 'virtual_network', obj_ids['uuid'], 'network_policy', ref_uuid, ref_data)
- refs = obj_dict.get('route_table_refs', [])
- for ref in refs:
- ref_uuid = self.fq_name_to_uuid('route_table', ref['to'])
- ref_attr = ref.get('attr', None)
- ref_data = {'attr': ref_attr, 'is_weakref': False}
- self._create_ref(bch, 'virtual_network', obj_ids['uuid'], 'route_table', ref_uuid, ref_data)
-
- bch.insert(obj_ids['uuid'], obj_cols)
- bch.send()
-
- # Update fqname table
- fq_name_str = ':'.join(obj_dict['fq_name'])
- fq_name_cols = {utils.encode_string(fq_name_str) + ':' + obj_ids['uuid']: json.dumps(None)}
- self._obj_fq_name_cf.insert('virtual_network', fq_name_cols)
-
- return (True, '')
- #end _cassandra_virtual_network_create
-
- def _cassandra_virtual_network_read(self, obj_uuids, field_names = None):
- # if field_names = None, all fields will be read/returned
-
- obj_uuid_cf = self._obj_uuid_cf
-
- # optimize for common case of reading non-backref, non-children fields
- # ignoring columns starting from 'b' and 'c' - significant performance
- # impact in scaled setting. e.g. read of project
- if (field_names is None or
- (set(field_names) & (VirtualNetwork.backref_fields | VirtualNetwork.children_fields))):
- # atleast one backref/children field is needed
- obj_rows = obj_uuid_cf.multiget(obj_uuids,
- column_count = 10000000,
- include_timestamp = True)
- else: # ignore reading backref + children columns
- obj_rows = obj_uuid_cf.multiget(obj_uuids,
- column_start = 'd',
- column_count = 10000000,
- include_timestamp = True)
-
- if (len(obj_uuids) == 1) and not obj_rows:
- raise cfgm_common.exceptions.NoIdError(obj_uuids[0])
-
- results = []
- for row_key in obj_rows:
- # give chance for zk heartbeat/ping
- gevent.sleep(0)
- obj_uuid = row_key
- obj_cols = obj_rows[obj_uuid]
- result = {}
- result['uuid'] = obj_uuid
- result['fq_name'] = json.loads(obj_cols['fq_name'][0])
- for col_name in obj_cols.keys():
- if self._re_match_parent.match(col_name):
- # non config-root child
- (_, _, parent_uuid) = col_name.split(':')
- parent_type = json.loads(obj_cols['parent_type'][0])
- result['parent_type'] = parent_type
- try:
- result['parent_uuid'] = parent_uuid
- result['parent_href'] = self._generate_url(parent_type, parent_uuid)
- except cfgm_common.exceptions.NoIdError:
- err_msg = 'Unknown uuid for parent ' + result['fq_name'][-2]
- return (False, err_msg)
-
- # TODO use compiled RE
- if self._re_match_prop.match(col_name):
- (_, prop_name) = col_name.split(':')
- result[prop_name] = json.loads(obj_cols[col_name][0])
-
- # TODO use compiled RE
- if self._re_match_children.match(col_name):
- (_, child_type, child_uuid) = col_name.split(':')
- if field_names and '%ss' %(child_type) not in field_names:
- continue
-
- child_tstamp = obj_cols[col_name][1]
- try:
- self._read_child(result, obj_uuid, child_type, child_uuid, child_tstamp)
- except cfgm_common.exceptions.NoIdError:
- continue
-
- # TODO use compiled RE
- if self._re_match_ref.match(col_name):
- (_, ref_type, ref_uuid) = col_name.split(':')
- self._read_ref(result, obj_uuid, ref_type, ref_uuid, obj_cols[col_name][0])
-
- if self._re_match_backref.match(col_name):
- (_, back_ref_type, back_ref_uuid) = col_name.split(':')
- if field_names and '%s_back_refs' %(back_ref_type) not in field_names:
- continue
-
- try:
- self._read_back_ref(result, obj_uuid, back_ref_type, back_ref_uuid,
- obj_cols[col_name][0])
- except cfgm_common.exceptions.NoIdError:
- continue
-
- # for all column names
-
- if 'access_control_lists' in result:
- # sort children; TODO do this based on schema
- sorted_children = sorted(result['access_control_lists'], key = itemgetter('tstamp'))
- # re-write result's children without timestamp
- result['access_control_lists'] = sorted_children
- [child.pop('tstamp') for child in result['access_control_lists']]
-
- if 'floating_ip_pools' in result:
- # sort children; TODO do this based on schema
- sorted_children = sorted(result['floating_ip_pools'], key = itemgetter('tstamp'))
- # re-write result's children without timestamp
- result['floating_ip_pools'] = sorted_children
- [child.pop('tstamp') for child in result['floating_ip_pools']]
-
- if 'routing_instances' in result:
- # sort children; TODO do this based on schema
- sorted_children = sorted(result['routing_instances'], key = itemgetter('tstamp'))
- # re-write result's children without timestamp
- result['routing_instances'] = sorted_children
- [child.pop('tstamp') for child in result['routing_instances']]
-
- results.append(result)
- # end for all rows
-
- return (True, results)
- #end _cassandra_virtual_network_read
-
- def _cassandra_virtual_network_count_children(self, obj_uuid, child_type):
- # if child_type = None, return
- if child_type is None:
- return (False, '')
-
- obj_uuid_cf = self._obj_uuid_cf
- if child_type not in VirtualNetwork.children_fields:
- return (False, '%s is not a valid children of VirtualNetwork' %(child_type))
-
- col_start = 'children:'+child_type[:-1]+':'
- col_finish = 'children:'+child_type[:-1]+';'
- num_children = obj_uuid_cf.get_count(obj_uuid,
- column_start = col_start,
- column_finish = col_finish,
- max_count = 10000000)
- return (True, num_children)
- #end _cassandra_virtual_network_count_children
-
- def _cassandra_virtual_network_update(self, obj_uuid, new_obj_dict):
- # Grab ref-uuids and properties in new version
- new_ref_infos = {}
-
- if 'qos_forwarding_class_refs' in new_obj_dict:
- new_ref_infos['qos_forwarding_class'] = {}
- new_refs = new_obj_dict['qos_forwarding_class_refs']
- if new_refs:
- for new_ref in new_refs:
- new_ref_uuid = self.fq_name_to_uuid('qos_forwarding_class', new_ref['to'])
- new_ref_attr = new_ref.get('attr', None)
- new_ref_data = {'attr': new_ref_attr, 'is_weakref': False}
- new_ref_infos['qos_forwarding_class'][new_ref_uuid] = new_ref_data
-
- if 'network_ipam_refs' in new_obj_dict:
- new_ref_infos['network_ipam'] = {}
- new_refs = new_obj_dict['network_ipam_refs']
- if new_refs:
- for new_ref in new_refs:
- new_ref_uuid = self.fq_name_to_uuid('network_ipam', new_ref['to'])
- new_ref_attr = new_ref.get('attr', None)
- new_ref_data = {'attr': new_ref_attr, 'is_weakref': False}
- new_ref_infos['network_ipam'][new_ref_uuid] = new_ref_data
-
- if 'network_policy_refs' in new_obj_dict:
- new_ref_infos['network_policy'] = {}
- new_refs = new_obj_dict['network_policy_refs']
- if new_refs:
- for new_ref in new_refs:
- new_ref_uuid = self.fq_name_to_uuid('network_policy', new_ref['to'])
- new_ref_attr = new_ref.get('attr', None)
- new_ref_data = {'attr': new_ref_attr, 'is_weakref': False}
- new_ref_infos['network_policy'][new_ref_uuid] = new_ref_data
-
- if 'route_table_refs' in new_obj_dict:
- new_ref_infos['route_table'] = {}
- new_refs = new_obj_dict['route_table_refs']
- if new_refs:
- for new_ref in new_refs:
- new_ref_uuid = self.fq_name_to_uuid('route_table', new_ref['to'])
- new_ref_attr = new_ref.get('attr', None)
- new_ref_data = {'attr': new_ref_attr, 'is_weakref': False}
- new_ref_infos['route_table'][new_ref_uuid] = new_ref_data
-
- new_props = {}
- if 'virtual_network_properties' in new_obj_dict:
- new_props['virtual_network_properties'] = new_obj_dict['virtual_network_properties']
- if 'virtual_network_network_id' in new_obj_dict:
- new_props['virtual_network_network_id'] = new_obj_dict['virtual_network_network_id']
- if 'route_target_list' in new_obj_dict:
- new_props['route_target_list'] = new_obj_dict['route_target_list']
- if 'router_external' in new_obj_dict:
- new_props['router_external'] = new_obj_dict['router_external']
- if 'is_shared' in new_obj_dict:
- new_props['is_shared'] = new_obj_dict['is_shared']
- if 'external_ipam' in new_obj_dict:
- new_props['external_ipam'] = new_obj_dict['external_ipam']
- if 'flood_unknown_unicast' in new_obj_dict:
- new_props['flood_unknown_unicast'] = new_obj_dict['flood_unknown_unicast']
- if 'id_perms' in new_obj_dict:
- new_props['id_perms'] = new_obj_dict['id_perms']
- if 'display_name' in new_obj_dict:
- new_props['display_name'] = new_obj_dict['display_name']
- # Gather column values for obj and updates to backrefs
- # in a batch and write it at the end
- obj_uuid_cf = self._obj_uuid_cf
- obj_cols_iter = obj_uuid_cf.xget(obj_uuid)
- # TODO optimize this (converts tuple to dict)
- obj_cols = {}
- for col_info in obj_cols_iter:
- obj_cols[col_info[0]] = col_info[1]
-
- bch = obj_uuid_cf.batch()
- for col_name in obj_cols.keys():
- # TODO use compiled RE
- if re.match('prop:', col_name):
- (_, prop_name) = col_name.split(':')
- if prop_name == 'id_perms':
- # id-perms always has to be updated for last-mod timestamp
- # get it from request dict(or from db if not in request dict)
- new_id_perms = new_obj_dict.get(prop_name, json.loads(obj_cols[col_name]))
- self.update_last_modified(bch, obj_uuid, new_id_perms)
- elif prop_name in new_obj_dict:
- self._update_prop(bch, obj_uuid, prop_name, new_props)
-
- # TODO use compiled RE
- if re.match('ref:', col_name):
- (_, ref_type, ref_uuid) = col_name.split(':')
- self._update_ref(bch, 'virtual_network', obj_uuid, ref_type, ref_uuid, new_ref_infos)
- # for all column names
-
- # create new refs
- for ref_type in new_ref_infos.keys():
- for ref_uuid in new_ref_infos[ref_type].keys():
- ref_data = new_ref_infos[ref_type][ref_uuid]
- self._create_ref(bch, 'virtual_network', obj_uuid, ref_type, ref_uuid, ref_data)
-
- # create new props
- for prop_name in new_props.keys():
- self._create_prop(bch, obj_uuid, prop_name, new_props[prop_name])
-
- bch.send()
-
- return (True, '')
- #end _cassandra_virtual_network_update
-
- def _cassandra_virtual_network_list(self, parent_uuids=None, back_ref_uuids=None,
- obj_uuids=None, count=False, filters=None):
- children_fq_names_uuids = []
- if filters:
- fnames = filters.get('field_names', [])
- fvalues = filters.get('field_values', [])
- filter_fields = [(fnames[i], fvalues[i]) for i in range(len(fnames))]
- else:
- filter_fields = []
-
- def filter_rows(coll_infos, filter_cols, filter_params):
- filt_infos = {}
- coll_rows = obj_uuid_cf.multiget(coll_infos.keys(),
- columns=filter_cols,
- column_count=10000000)
- for row in coll_rows:
- # give chance for zk heartbeat/ping
- gevent.sleep(0)
- full_match = True
- for fname, fval in filter_params:
- if coll_rows[row]['prop:%s' %(fname)] != fval:
- full_match = False
- break
- if full_match:
- filt_infos[row] = coll_infos[row]
- return filt_infos
- # end filter_rows
-
- def get_fq_name_uuid_list(obj_uuids):
- ret_list = []
- for obj_uuid in obj_uuids:
- try:
- obj_fq_name = self.uuid_to_fq_name(obj_uuid)
- ret_list.append((obj_fq_name, obj_uuid))
- except cfgm_common.exceptions.NoIdError:
- pass
- return ret_list
- # end get_fq_name_uuid_list
-
- if parent_uuids:
- # go from parent to child
- obj_uuid_cf = self._obj_uuid_cf
- col_start = 'children:virtual_network:'
- col_fin = 'children:virtual_network;'
- try:
- obj_rows = obj_uuid_cf.multiget(parent_uuids,
- column_start = col_start,
- column_finish = col_fin,
- column_count = 10000000,
- include_timestamp = True)
- except pycassa.NotFoundException:
- if count:
- return (True, 0)
- else:
- return (True, children_fq_names_uuids)
-
- def filter_rows_parent_anchor(sort=False):
- # flatten to [('children:<type>:<uuid>', (<val>,<ts>), *]
- all_cols = [cols for obj_key in obj_rows.keys() for cols in obj_rows[obj_key].items()]
- all_child_infos = {}
- for col_name, col_val_ts in all_cols:
- # give chance for zk heartbeat/ping
- gevent.sleep(0)
- child_uuid = col_name.split(':')[2]
- if obj_uuids and child_uuid not in obj_uuids:
- continue
- all_child_infos[child_uuid] = {'uuid': child_uuid, 'tstamp': col_val_ts[1]}
-
- filter_cols = ['prop:%s' %(fname) for fname, _ in filter_fields]
- if filter_cols:
- filt_child_infos = filter_rows(all_child_infos, filter_cols, filter_fields)
- else: # no filter specified
- filt_child_infos = all_child_infos
-
- if not sort:
- ret_child_infos = filt_child_infos.values()
- else:
- ret_child_infos = sorted(filt_child_infos.values(), key=itemgetter('tstamp'))
-
- return get_fq_name_uuid_list(r['uuid'] for r in ret_child_infos)
- # end filter_rows_parent_anchor
-
- if count:
- return (True, len(filter_rows_parent_anchor()))
-
- children_fq_names_uuids = filter_rows_parent_anchor(sort=True)
-
- if back_ref_uuids:
- # go from anchor to backrefs
- obj_uuid_cf = self._obj_uuid_cf
- col_start = 'backref:virtual_network:'
- col_fin = 'backref:virtual_network;'
- try:
- obj_rows = obj_uuid_cf.multiget(back_ref_uuids,
- column_start = col_start,
- column_finish = col_fin,
- column_count = 10000000,
- include_timestamp = True)
- except pycassa.NotFoundException:
- if count:
- return (True, 0)
- else:
- return (True, children_fq_names_uuids)
-
- def filter_rows_backref_anchor():
- # flatten to [('<fqnstr>:<uuid>', (<val>,<ts>), *]
- all_cols = [cols for obj_key in obj_rows.keys() for cols in obj_rows[obj_key].items()]
- all_backref_infos = {}
- for col_name, col_val_ts in all_cols:
- # give chance for zk heartbeat/ping
- gevent.sleep(0)
- col_name_arr = col_name.split(':')
- fq_name = col_name_arr[:-1]
- obj_uuid = col_name_arr[-1]
- if obj_uuids and obj_uuid not in obj_uuids:
- continue
- all_backref_infos[obj_uuid] = \
- {'uuid': obj_uuid, 'fq_name': fq_name, 'tstamp': col_val_ts[1]}
-
- filter_cols = ['prop:%s' %(fname) for fname, _ in filter_fields]
- if filter_cols:
- filt_backref_infos = filter_rows(all_backref_infos, filter_cols, filter_fields)
- else: # no filter specified
- filt_backref_infos = all_backref_infos
-
- return [(br_info['fq_name'], br_info['uuid']) for br_info in filt_backref_infos.values()]
- # end filter_rows_backref_anchor
-
- if count:
- return (True, len(filter_rows_backref_anchor()))
-
- children_fq_names_uuids = filter_rows_backref_anchor()
-
- if not parent_uuids and not back_ref_uuids:
- obj_uuid_cf = self._obj_uuid_cf
- if obj_uuids:
- # exact objects specified
- def filter_rows_object_list():
- all_obj_infos = {}
- for obj_uuid in obj_uuids:
- all_obj_infos[obj_uuid] = None
-
- filter_cols = ['prop:%s' %(fname) for fname, _ in filter_fields]
- if filter_cols:
- filt_obj_infos = filter_rows(all_obj_infos, filter_cols, filter_fields)
- else: # no filters specified
- filt_obj_infos = all_obj_infos
-
- return get_fq_name_uuid_list(filt_obj_infos.keys())
- # end filter_rows_object_list
-
- if count:
- return (True, len(filter_rows_object_list()))
- children_fq_names_uuids = filter_rows_object_list()
-
- else: # grab all resources of this type
- obj_fq_name_cf = self._obj_fq_name_cf
- try:
- cols = obj_fq_name_cf.get('virtual_network', column_count = 10000000)
- except pycassa.NotFoundException:
- if count:
- return (True, 0)
- else:
- return (True, children_fq_names_uuids)
-
- def filter_rows_no_anchor():
- all_obj_infos = {}
- for col_name, col_val in cols.items():
- # give chance for zk heartbeat/ping
- gevent.sleep(0)
- col_name_arr = utils.decode_string(col_name).split(':')
- obj_uuid = col_name_arr[-1]
- all_obj_infos[obj_uuid] = (col_name_arr[:-1], obj_uuid)
-
- filter_cols = ['prop:%s' %(fname) for fname, _ in filter_fields]
- if filter_cols:
- filt_obj_infos = filter_rows(all_obj_infos, filter_cols, filter_fields)
- else: # no filters specified
- filt_obj_infos = all_obj_infos
-
- return filt_obj_infos.values()
- # end filter_rows_no_anchor
-
- if count:
- return (True, len(filter_rows_no_anchor()))
-
- children_fq_names_uuids = filter_rows_no_anchor()
-
- return (True, children_fq_names_uuids)
- #end _cassandra_virtual_network_list
-
- def _cassandra_virtual_network_delete(self, obj_uuid):
- obj_uuid_cf = self._obj_uuid_cf
- fq_name = json.loads(obj_uuid_cf.get(obj_uuid, columns = ['fq_name'])['fq_name'])
- bch = obj_uuid_cf.batch()
-
- # unlink from parent
- col_start = 'parent:'
- col_fin = 'parent;'
- col_name_iter = obj_uuid_cf.xget(obj_uuid, column_start = col_start, column_finish = col_fin)
- for (col_name, col_val) in col_name_iter:
- (_, parent_type, parent_uuid) = col_name.split(':')
- self._delete_child(bch, parent_type, parent_uuid, 'virtual_network', obj_uuid)
-
- # remove refs
- col_start = 'ref:'
- col_fin = 'ref;'
- col_name_iter = obj_uuid_cf.xget(obj_uuid, column_start = col_start, column_finish = col_fin)
- for (col_name, col_val) in col_name_iter:
- (_, ref_type, ref_uuid) = col_name.split(':')
- self._delete_ref(bch, 'virtual_network', obj_uuid, ref_type, ref_uuid)
-
- bch.remove(obj_uuid)
- bch.send()
-
- # Update fqname table
- fq_name_str = ':'.join(fq_name)
- fq_name_col = utils.encode_string(fq_name_str) + ':' + obj_uuid
- self._obj_fq_name_cf.remove('virtual_network', columns = [fq_name_col])
-
-
- return (True, '')
- #end _cassandra_virtual_network_delete
-
- def _cassandra_project_alloc(self, fq_name):
- return (True, '')
- #end _cassandra_project_alloc
-
- def _cassandra_project_create(self, obj_ids, obj_dict):
- # Gather column values for obj and updates to backrefs
- # in a batch and write it at the end
- bch = self._obj_uuid_cf.batch()
-
- obj_cols = {}
- obj_cols['fq_name'] = json.dumps(obj_dict['fq_name'])
- obj_cols['type'] = json.dumps('project')
- if 'parent_type' in obj_dict:
- # non config-root child
- parent_type = obj_dict['parent_type']
- parent_method_type = parent_type.replace('-', '_')
- parent_fq_name = obj_dict['fq_name'][:-1]
- obj_cols['parent_type'] = json.dumps(parent_type)
- parent_uuid = self.fq_name_to_uuid(parent_method_type, parent_fq_name)
- self._create_child(bch, parent_method_type, parent_uuid, 'project', obj_ids['uuid'])
-
- # Properties
- field = obj_dict.get('quota', None)
- if field is not None:
- self._create_prop(bch, obj_ids['uuid'], 'quota', field)
-
- field = obj_dict.get('id_perms', None)
- if field is not None:
- field['created'] = datetime.datetime.utcnow().isoformat()
- field['last_modified'] = field['created']
- self._create_prop(bch, obj_ids['uuid'], 'id_perms', field)
-
- field = obj_dict.get('display_name', None)
- if field is not None:
- self._create_prop(bch, obj_ids['uuid'], 'display_name', field)
-
-
- # References
- refs = obj_dict.get('namespace_refs', [])
- for ref in refs:
- ref_uuid = self.fq_name_to_uuid('namespace', ref['to'])
- ref_attr = ref.get('attr', None)
- ref_data = {'attr': ref_attr, 'is_weakref': False}
- self._create_ref(bch, 'project', obj_ids['uuid'], 'namespace', ref_uuid, ref_data)
- refs = obj_dict.get('floating_ip_pool_refs', [])
- for ref in refs:
- ref_uuid = self.fq_name_to_uuid('floating_ip_pool', ref['to'])
- ref_attr = ref.get('attr', None)
- ref_data = {'attr': ref_attr, 'is_weakref': False}
- self._create_ref(bch, 'project', obj_ids['uuid'], 'floating_ip_pool', ref_uuid, ref_data)
-
- bch.insert(obj_ids['uuid'], obj_cols)
- bch.send()
-
- # Update fqname table
- fq_name_str = ':'.join(obj_dict['fq_name'])
- fq_name_cols = {utils.encode_string(fq_name_str) + ':' + obj_ids['uuid']: json.dumps(None)}
- self._obj_fq_name_cf.insert('project', fq_name_cols)
-
- return (True, '')
- #end _cassandra_project_create
-
- def _cassandra_project_read(self, obj_uuids, field_names = None):
- # if field_names = None, all fields will be read/returned
-
- obj_uuid_cf = self._obj_uuid_cf
-
- # optimize for common case of reading non-backref, non-children fields
- # ignoring columns starting from 'b' and 'c' - significant performance
- # impact in scaled setting. e.g. read of project
- if (field_names is None or
- (set(field_names) & (Project.backref_fields | Project.children_fields))):
- # atleast one backref/children field is needed
- obj_rows = obj_uuid_cf.multiget(obj_uuids,
- column_count = 10000000,
- include_timestamp = True)
- else: # ignore reading backref + children columns
- obj_rows = obj_uuid_cf.multiget(obj_uuids,
- column_start = 'd',
- column_count = 10000000,
- include_timestamp = True)
-
- if (len(obj_uuids) == 1) and not obj_rows:
- raise cfgm_common.exceptions.NoIdError(obj_uuids[0])
-
- results = []
- for row_key in obj_rows:
- # give chance for zk heartbeat/ping
- gevent.sleep(0)
- obj_uuid = row_key
- obj_cols = obj_rows[obj_uuid]
- result = {}
- result['uuid'] = obj_uuid
- result['fq_name'] = json.loads(obj_cols['fq_name'][0])
- for col_name in obj_cols.keys():
- if self._re_match_parent.match(col_name):
- # non config-root child
- (_, _, parent_uuid) = col_name.split(':')
- parent_type = json.loads(obj_cols['parent_type'][0])
- result['parent_type'] = parent_type
- try:
- result['parent_uuid'] = parent_uuid
- result['parent_href'] = self._generate_url(parent_type, parent_uuid)
- except cfgm_common.exceptions.NoIdError:
- err_msg = 'Unknown uuid for parent ' + result['fq_name'][-2]
- return (False, err_msg)
-
- # TODO use compiled RE
- if self._re_match_prop.match(col_name):
- (_, prop_name) = col_name.split(':')
- result[prop_name] = json.loads(obj_cols[col_name][0])
-
- # TODO use compiled RE
- if self._re_match_children.match(col_name):
- (_, child_type, child_uuid) = col_name.split(':')
- if field_names and '%ss' %(child_type) not in field_names:
- continue
-
- child_tstamp = obj_cols[col_name][1]
- try:
- self._read_child(result, obj_uuid, child_type, child_uuid, child_tstamp)
- except cfgm_common.exceptions.NoIdError:
- continue
-
- # TODO use compiled RE
- if self._re_match_ref.match(col_name):
- (_, ref_type, ref_uuid) = col_name.split(':')
- self._read_ref(result, obj_uuid, ref_type, ref_uuid, obj_cols[col_name][0])
-
- if self._re_match_backref.match(col_name):
- (_, back_ref_type, back_ref_uuid) = col_name.split(':')
- if field_names and '%s_back_refs' %(back_ref_type) not in field_names:
- continue
-
- try:
- self._read_back_ref(result, obj_uuid, back_ref_type, back_ref_uuid,
- obj_cols[col_name][0])
- except cfgm_common.exceptions.NoIdError:
- continue
-
- # for all column names
-
- if 'security_groups' in result:
- # sort children; TODO do this based on schema
- sorted_children = sorted(result['security_groups'], key = itemgetter('tstamp'))
- # re-write result's children without timestamp
- result['security_groups'] = sorted_children
- [child.pop('tstamp') for child in result['security_groups']]
-
- if 'virtual_networks' in result:
- # sort children; TODO do this based on schema
- sorted_children = sorted(result['virtual_networks'], key = itemgetter('tstamp'))
- # re-write result's children without timestamp
- result['virtual_networks'] = sorted_children
- [child.pop('tstamp') for child in result['virtual_networks']]
-
- if 'qos_queues' in result:
- # sort children; TODO do this based on schema
- sorted_children = sorted(result['qos_queues'], key = itemgetter('tstamp'))
- # re-write result's children without timestamp
- result['qos_queues'] = sorted_children
- [child.pop('tstamp') for child in result['qos_queues']]
-
- if 'qos_forwarding_classs' in result:
- # sort children; TODO do this based on schema
- sorted_children = sorted(result['qos_forwarding_classs'], key = itemgetter('tstamp'))
- # re-write result's children without timestamp
- result['qos_forwarding_classs'] = sorted_children
- [child.pop('tstamp') for child in result['qos_forwarding_classs']]
-
- if 'network_ipams' in result:
- # sort children; TODO do this based on schema
- sorted_children = sorted(result['network_ipams'], key = itemgetter('tstamp'))
- # re-write result's children without timestamp
- result['network_ipams'] = sorted_children
- [child.pop('tstamp') for child in result['network_ipams']]
-
- if 'network_policys' in result:
- # sort children; TODO do this based on schema
- sorted_children = sorted(result['network_policys'], key = itemgetter('tstamp'))
- # re-write result's children without timestamp
- result['network_policys'] = sorted_children
- [child.pop('tstamp') for child in result['network_policys']]
-
- if 'virtual_machine_interfaces' in result:
- # sort children; TODO do this based on schema
- sorted_children = sorted(result['virtual_machine_interfaces'], key = itemgetter('tstamp'))
- # re-write result's children without timestamp
- result['virtual_machine_interfaces'] = sorted_children
- [child.pop('tstamp') for child in result['virtual_machine_interfaces']]
-
- if 'service_instances' in result:
- # sort children; TODO do this based on schema
- sorted_children = sorted(result['service_instances'], key = itemgetter('tstamp'))
- # re-write result's children without timestamp
- result['service_instances'] = sorted_children
- [child.pop('tstamp') for child in result['service_instances']]
-
- if 'route_tables' in result:
- # sort children; TODO do this based on schema
- sorted_children = sorted(result['route_tables'], key = itemgetter('tstamp'))
- # re-write result's children without timestamp
- result['route_tables'] = sorted_children
- [child.pop('tstamp') for child in result['route_tables']]
-
- if 'interface_route_tables' in result:
- # sort children; TODO do this based on schema
- sorted_children = sorted(result['interface_route_tables'], key = itemgetter('tstamp'))
- # re-write result's children without timestamp
- result['interface_route_tables'] = sorted_children
- [child.pop('tstamp') for child in result['interface_route_tables']]
-
- if 'logical_routers' in result:
- # sort children; TODO do this based on schema
- sorted_children = sorted(result['logical_routers'], key = itemgetter('tstamp'))
- # re-write result's children without timestamp
- result['logical_routers'] = sorted_children
- [child.pop('tstamp') for child in result['logical_routers']]
-
- if 'loadbalancer_pools' in result:
- # sort children; TODO do this based on schema
- sorted_children = sorted(result['loadbalancer_pools'], key = itemgetter('tstamp'))
- # re-write result's children without timestamp
- result['loadbalancer_pools'] = sorted_children
- [child.pop('tstamp') for child in result['loadbalancer_pools']]
-
- if 'loadbalancer_healthmonitors' in result:
- # sort children; TODO do this based on schema
- sorted_children = sorted(result['loadbalancer_healthmonitors'], key = itemgetter('tstamp'))
- # re-write result's children without timestamp
- result['loadbalancer_healthmonitors'] = sorted_children
- [child.pop('tstamp') for child in result['loadbalancer_healthmonitors']]
-
- if 'virtual_ips' in result:
- # sort children; TODO do this based on schema
- sorted_children = sorted(result['virtual_ips'], key = itemgetter('tstamp'))
- # re-write result's children without timestamp
- result['virtual_ips'] = sorted_children
- [child.pop('tstamp') for child in result['virtual_ips']]
-
- results.append(result)
- # end for all rows
-
- return (True, results)
- #end _cassandra_project_read
-
- def _cassandra_project_count_children(self, obj_uuid, child_type):
- # if child_type = None, return
- if child_type is None:
- return (False, '')
-
- obj_uuid_cf = self._obj_uuid_cf
- if child_type not in Project.children_fields:
- return (False, '%s is not a valid children of Project' %(child_type))
-
- col_start = 'children:'+child_type[:-1]+':'
- col_finish = 'children:'+child_type[:-1]+';'
- num_children = obj_uuid_cf.get_count(obj_uuid,
- column_start = col_start,
- column_finish = col_finish,
- max_count = 10000000)
- return (True, num_children)
- #end _cassandra_project_count_children
-
- def _cassandra_project_update(self, obj_uuid, new_obj_dict):
- # Grab ref-uuids and properties in new version
- new_ref_infos = {}
-
- if 'namespace_refs' in new_obj_dict:
- new_ref_infos['namespace'] = {}
- new_refs = new_obj_dict['namespace_refs']
- if new_refs:
- for new_ref in new_refs:
- new_ref_uuid = self.fq_name_to_uuid('namespace', new_ref['to'])
- new_ref_attr = new_ref.get('attr', None)
- new_ref_data = {'attr': new_ref_attr, 'is_weakref': False}
- new_ref_infos['namespace'][new_ref_uuid] = new_ref_data
-
- if 'floating_ip_pool_refs' in new_obj_dict:
- new_ref_infos['floating_ip_pool'] = {}
- new_refs = new_obj_dict['floating_ip_pool_refs']
- if new_refs:
- for new_ref in new_refs:
- new_ref_uuid = self.fq_name_to_uuid('floating_ip_pool', new_ref['to'])
- new_ref_attr = new_ref.get('attr', None)
- new_ref_data = {'attr': new_ref_attr, 'is_weakref': False}
- new_ref_infos['floating_ip_pool'][new_ref_uuid] = new_ref_data
-
- new_props = {}
- if 'quota' in new_obj_dict:
- new_props['quota'] = new_obj_dict['quota']
- if 'id_perms' in new_obj_dict:
- new_props['id_perms'] = new_obj_dict['id_perms']
- if 'display_name' in new_obj_dict:
- new_props['display_name'] = new_obj_dict['display_name']
- # Gather column values for obj and updates to backrefs
- # in a batch and write it at the end
- obj_uuid_cf = self._obj_uuid_cf
- obj_cols_iter = obj_uuid_cf.xget(obj_uuid)
- # TODO optimize this (converts tuple to dict)
- obj_cols = {}
- for col_info in obj_cols_iter:
- obj_cols[col_info[0]] = col_info[1]
-
- bch = obj_uuid_cf.batch()
- for col_name in obj_cols.keys():
- # TODO use compiled RE
- if re.match('prop:', col_name):
- (_, prop_name) = col_name.split(':')
- if prop_name == 'id_perms':
- # id-perms always has to be updated for last-mod timestamp
- # get it from request dict(or from db if not in request dict)
- new_id_perms = new_obj_dict.get(prop_name, json.loads(obj_cols[col_name]))
- self.update_last_modified(bch, obj_uuid, new_id_perms)
- elif prop_name in new_obj_dict:
- self._update_prop(bch, obj_uuid, prop_name, new_props)
-
- # TODO use compiled RE
- if re.match('ref:', col_name):
- (_, ref_type, ref_uuid) = col_name.split(':')
- self._update_ref(bch, 'project', obj_uuid, ref_type, ref_uuid, new_ref_infos)
- # for all column names
-
- # create new refs
- for ref_type in new_ref_infos.keys():
- for ref_uuid in new_ref_infos[ref_type].keys():
- ref_data = new_ref_infos[ref_type][ref_uuid]
- self._create_ref(bch, 'project', obj_uuid, ref_type, ref_uuid, ref_data)
-
- # create new props
- for prop_name in new_props.keys():
- self._create_prop(bch, obj_uuid, prop_name, new_props[prop_name])
-
- bch.send()
-
- return (True, '')
- #end _cassandra_project_update
-
- def _cassandra_project_list(self, parent_uuids=None, back_ref_uuids=None,
- obj_uuids=None, count=False, filters=None):
- children_fq_names_uuids = []
- if filters:
- fnames = filters.get('field_names', [])
- fvalues = filters.get('field_values', [])
- filter_fields = [(fnames[i], fvalues[i]) for i in range(len(fnames))]
- else:
- filter_fields = []
-
- def filter_rows(coll_infos, filter_cols, filter_params):
- filt_infos = {}
- coll_rows = obj_uuid_cf.multiget(coll_infos.keys(),
- columns=filter_cols,
- column_count=10000000)
- for row in coll_rows:
- # give chance for zk heartbeat/ping
- gevent.sleep(0)
- full_match = True
- for fname, fval in filter_params:
- if coll_rows[row]['prop:%s' %(fname)] != fval:
- full_match = False
- break
- if full_match:
- filt_infos[row] = coll_infos[row]
- return filt_infos
- # end filter_rows
-
- def get_fq_name_uuid_list(obj_uuids):
- ret_list = []
- for obj_uuid in obj_uuids:
- try:
- obj_fq_name = self.uuid_to_fq_name(obj_uuid)
- ret_list.append((obj_fq_name, obj_uuid))
- except cfgm_common.exceptions.NoIdError:
- pass
- return ret_list
- # end get_fq_name_uuid_list
-
- if parent_uuids:
- # go from parent to child
- obj_uuid_cf = self._obj_uuid_cf
- col_start = 'children:project:'
- col_fin = 'children:project;'
- try:
- obj_rows = obj_uuid_cf.multiget(parent_uuids,
- column_start = col_start,
- column_finish = col_fin,
- column_count = 10000000,
- include_timestamp = True)
- except pycassa.NotFoundException:
- if count:
- return (True, 0)
- else:
- return (True, children_fq_names_uuids)
-
- def filter_rows_parent_anchor(sort=False):
- # flatten to [('children:<type>:<uuid>', (<val>,<ts>), *]
- all_cols = [cols for obj_key in obj_rows.keys() for cols in obj_rows[obj_key].items()]
- all_child_infos = {}
- for col_name, col_val_ts in all_cols:
- # give chance for zk heartbeat/ping
- gevent.sleep(0)
- child_uuid = col_name.split(':')[2]
- if obj_uuids and child_uuid not in obj_uuids:
- continue
- all_child_infos[child_uuid] = {'uuid': child_uuid, 'tstamp': col_val_ts[1]}
-
- filter_cols = ['prop:%s' %(fname) for fname, _ in filter_fields]
- if filter_cols:
- filt_child_infos = filter_rows(all_child_infos, filter_cols, filter_fields)
- else: # no filter specified
- filt_child_infos = all_child_infos
-
- if not sort:
- ret_child_infos = filt_child_infos.values()
- else:
- ret_child_infos = sorted(filt_child_infos.values(), key=itemgetter('tstamp'))
-
- return get_fq_name_uuid_list(r['uuid'] for r in ret_child_infos)
- # end filter_rows_parent_anchor
-
- if count:
- return (True, len(filter_rows_parent_anchor()))
-
- children_fq_names_uuids = filter_rows_parent_anchor(sort=True)
-
- if back_ref_uuids:
- # go from anchor to backrefs
- obj_uuid_cf = self._obj_uuid_cf
- col_start = 'backref:project:'
- col_fin = 'backref:project;'
- try:
- obj_rows = obj_uuid_cf.multiget(back_ref_uuids,
- column_start = col_start,
- column_finish = col_fin,
- column_count = 10000000,
- include_timestamp = True)
- except pycassa.NotFoundException:
- if count:
- return (True, 0)
- else:
- return (True, children_fq_names_uuids)
-
- def filter_rows_backref_anchor():
- # flatten to [('<fqnstr>:<uuid>', (<val>,<ts>), *]
- all_cols = [cols for obj_key in obj_rows.keys() for cols in obj_rows[obj_key].items()]
- all_backref_infos = {}
- for col_name, col_val_ts in all_cols:
- # give chance for zk heartbeat/ping
- gevent.sleep(0)
- col_name_arr = col_name.split(':')
- fq_name = col_name_arr[:-1]
- obj_uuid = col_name_arr[-1]
- if obj_uuids and obj_uuid not in obj_uuids:
- continue
- all_backref_infos[obj_uuid] = \
- {'uuid': obj_uuid, 'fq_name': fq_name, 'tstamp': col_val_ts[1]}
-
- filter_cols = ['prop:%s' %(fname) for fname, _ in filter_fields]
- if filter_cols:
- filt_backref_infos = filter_rows(all_backref_infos, filter_cols, filter_fields)
- else: # no filter specified
- filt_backref_infos = all_backref_infos
-
- return [(br_info['fq_name'], br_info['uuid']) for br_info in filt_backref_infos.values()]
- # end filter_rows_backref_anchor
-
- if count:
- return (True, len(filter_rows_backref_anchor()))
-
- children_fq_names_uuids = filter_rows_backref_anchor()
-
- if not parent_uuids and not back_ref_uuids:
- obj_uuid_cf = self._obj_uuid_cf
- if obj_uuids:
- # exact objects specified
- def filter_rows_object_list():
- all_obj_infos = {}
- for obj_uuid in obj_uuids:
- all_obj_infos[obj_uuid] = None
-
- filter_cols = ['prop:%s' %(fname) for fname, _ in filter_fields]
- if filter_cols:
- filt_obj_infos = filter_rows(all_obj_infos, filter_cols, filter_fields)
- else: # no filters specified
- filt_obj_infos = all_obj_infos
-
- return get_fq_name_uuid_list(filt_obj_infos.keys())
- # end filter_rows_object_list
-
- if count:
- return (True, len(filter_rows_object_list()))
- children_fq_names_uuids = filter_rows_object_list()
-
- else: # grab all resources of this type
- obj_fq_name_cf = self._obj_fq_name_cf
- try:
- cols = obj_fq_name_cf.get('project', column_count = 10000000)
- except pycassa.NotFoundException:
- if count:
- return (True, 0)
- else:
- return (True, children_fq_names_uuids)
-
- def filter_rows_no_anchor():
- all_obj_infos = {}
- for col_name, col_val in cols.items():
- # give chance for zk heartbeat/ping
- gevent.sleep(0)
- col_name_arr = utils.decode_string(col_name).split(':')
- obj_uuid = col_name_arr[-1]
- all_obj_infos[obj_uuid] = (col_name_arr[:-1], obj_uuid)
-
- filter_cols = ['prop:%s' %(fname) for fname, _ in filter_fields]
- if filter_cols:
- filt_obj_infos = filter_rows(all_obj_infos, filter_cols, filter_fields)
- else: # no filters specified
- filt_obj_infos = all_obj_infos
-
- return filt_obj_infos.values()
- # end filter_rows_no_anchor
-
- if count:
- return (True, len(filter_rows_no_anchor()))
-
- children_fq_names_uuids = filter_rows_no_anchor()
-
- return (True, children_fq_names_uuids)
- #end _cassandra_project_list
-
- def _cassandra_project_delete(self, obj_uuid):
- obj_uuid_cf = self._obj_uuid_cf
- fq_name = json.loads(obj_uuid_cf.get(obj_uuid, columns = ['fq_name'])['fq_name'])
- bch = obj_uuid_cf.batch()
-
- # unlink from parent
- col_start = 'parent:'
- col_fin = 'parent;'
- col_name_iter = obj_uuid_cf.xget(obj_uuid, column_start = col_start, column_finish = col_fin)
- for (col_name, col_val) in col_name_iter:
- (_, parent_type, parent_uuid) = col_name.split(':')
- self._delete_child(bch, parent_type, parent_uuid, 'project', obj_uuid)
-
- # remove refs
- col_start = 'ref:'
- col_fin = 'ref;'
- col_name_iter = obj_uuid_cf.xget(obj_uuid, column_start = col_start, column_finish = col_fin)
- for (col_name, col_val) in col_name_iter:
- (_, ref_type, ref_uuid) = col_name.split(':')
- self._delete_ref(bch, 'project', obj_uuid, ref_type, ref_uuid)
-
- bch.remove(obj_uuid)
- bch.send()
-
- # Update fqname table
- fq_name_str = ':'.join(fq_name)
- fq_name_col = utils.encode_string(fq_name_str) + ':' + obj_uuid
- self._obj_fq_name_cf.remove('project', columns = [fq_name_col])
-
-
- return (True, '')
- #end _cassandra_project_delete
-
- def _cassandra_qos_forwarding_class_alloc(self, fq_name):
- return (True, '')
- #end _cassandra_qos_forwarding_class_alloc
-
- def _cassandra_qos_forwarding_class_create(self, obj_ids, obj_dict):
- # Gather column values for obj and updates to backrefs
- # in a batch and write it at the end
- bch = self._obj_uuid_cf.batch()
-
- obj_cols = {}
- obj_cols['fq_name'] = json.dumps(obj_dict['fq_name'])
- obj_cols['type'] = json.dumps('qos_forwarding_class')
- if 'parent_type' in obj_dict:
- # non config-root child
- parent_type = obj_dict['parent_type']
- parent_method_type = parent_type.replace('-', '_')
- parent_fq_name = obj_dict['fq_name'][:-1]
- obj_cols['parent_type'] = json.dumps(parent_type)
- parent_uuid = self.fq_name_to_uuid(parent_method_type, parent_fq_name)
- self._create_child(bch, parent_method_type, parent_uuid, 'qos_forwarding_class', obj_ids['uuid'])
-
- # Properties
- field = obj_dict.get('dscp', None)
- if field is not None:
- self._create_prop(bch, obj_ids['uuid'], 'dscp', field)
-
- field = obj_dict.get('trusted', None)
- if field is not None:
- self._create_prop(bch, obj_ids['uuid'], 'trusted', field)
-
- field = obj_dict.get('id_perms', None)
- if field is not None:
- field['created'] = datetime.datetime.utcnow().isoformat()
- field['last_modified'] = field['created']
- self._create_prop(bch, obj_ids['uuid'], 'id_perms', field)
-
- field = obj_dict.get('display_name', None)
- if field is not None:
- self._create_prop(bch, obj_ids['uuid'], 'display_name', field)
-
-
- # References
- refs = obj_dict.get('qos_queue_refs', [])
- for ref in refs:
- ref_uuid = self.fq_name_to_uuid('qos_queue', ref['to'])
- ref_attr = ref.get('attr', None)
- ref_data = {'attr': ref_attr, 'is_weakref': False}
- self._create_ref(bch, 'qos_forwarding_class', obj_ids['uuid'], 'qos_queue', ref_uuid, ref_data)
-
- bch.insert(obj_ids['uuid'], obj_cols)
- bch.send()
-
- # Update fqname table
- fq_name_str = ':'.join(obj_dict['fq_name'])
- fq_name_cols = {utils.encode_string(fq_name_str) + ':' + obj_ids['uuid']: json.dumps(None)}
- self._obj_fq_name_cf.insert('qos_forwarding_class', fq_name_cols)
-
- return (True, '')
- #end _cassandra_qos_forwarding_class_create
-
- def _cassandra_qos_forwarding_class_read(self, obj_uuids, field_names = None):
- # if field_names = None, all fields will be read/returned
-
- obj_uuid_cf = self._obj_uuid_cf
-
- # optimize for common case of reading non-backref, non-children fields
- # ignoring columns starting from 'b' and 'c' - significant performance
- # impact in scaled setting. e.g. read of project
- if (field_names is None or
- (set(field_names) & (QosForwardingClass.backref_fields | QosForwardingClass.children_fields))):
- # atleast one backref/children field is needed
- obj_rows = obj_uuid_cf.multiget(obj_uuids,
- column_count = 10000000,
- include_timestamp = True)
- else: # ignore reading backref + children columns
- obj_rows = obj_uuid_cf.multiget(obj_uuids,
- column_start = 'd',
- column_count = 10000000,
- include_timestamp = True)
-
- if (len(obj_uuids) == 1) and not obj_rows:
- raise cfgm_common.exceptions.NoIdError(obj_uuids[0])
-
- results = []
- for row_key in obj_rows:
- # give chance for zk heartbeat/ping
- gevent.sleep(0)
- obj_uuid = row_key
- obj_cols = obj_rows[obj_uuid]
- result = {}
- result['uuid'] = obj_uuid
- result['fq_name'] = json.loads(obj_cols['fq_name'][0])
- for col_name in obj_cols.keys():
- if self._re_match_parent.match(col_name):
- # non config-root child
- (_, _, parent_uuid) = col_name.split(':')
- parent_type = json.loads(obj_cols['parent_type'][0])
- result['parent_type'] = parent_type
- try:
- result['parent_uuid'] = parent_uuid
- result['parent_href'] = self._generate_url(parent_type, parent_uuid)
- except cfgm_common.exceptions.NoIdError:
- err_msg = 'Unknown uuid for parent ' + result['fq_name'][-2]
- return (False, err_msg)
-
- # TODO use compiled RE
- if self._re_match_prop.match(col_name):
- (_, prop_name) = col_name.split(':')
- result[prop_name] = json.loads(obj_cols[col_name][0])
-
- # TODO use compiled RE
- if self._re_match_children.match(col_name):
- (_, child_type, child_uuid) = col_name.split(':')
- if field_names and '%ss' %(child_type) not in field_names:
- continue
-
- child_tstamp = obj_cols[col_name][1]
- try:
- self._read_child(result, obj_uuid, child_type, child_uuid, child_tstamp)
- except cfgm_common.exceptions.NoIdError:
- continue
-
- # TODO use compiled RE
- if self._re_match_ref.match(col_name):
- (_, ref_type, ref_uuid) = col_name.split(':')
- self._read_ref(result, obj_uuid, ref_type, ref_uuid, obj_cols[col_name][0])
-
- if self._re_match_backref.match(col_name):
- (_, back_ref_type, back_ref_uuid) = col_name.split(':')
- if field_names and '%s_back_refs' %(back_ref_type) not in field_names:
- continue
-
- try:
- self._read_back_ref(result, obj_uuid, back_ref_type, back_ref_uuid,
- obj_cols[col_name][0])
- except cfgm_common.exceptions.NoIdError:
- continue
-
- # for all column names
-
-
- results.append(result)
- # end for all rows
-
- return (True, results)
- #end _cassandra_qos_forwarding_class_read
-
- def _cassandra_qos_forwarding_class_count_children(self, obj_uuid, child_type):
- # if child_type = None, return
- if child_type is None:
- return (False, '')
-
- obj_uuid_cf = self._obj_uuid_cf
- if child_type not in QosForwardingClass.children_fields:
- return (False, '%s is not a valid children of QosForwardingClass' %(child_type))
-
- col_start = 'children:'+child_type[:-1]+':'
- col_finish = 'children:'+child_type[:-1]+';'
- num_children = obj_uuid_cf.get_count(obj_uuid,
- column_start = col_start,
- column_finish = col_finish,
- max_count = 10000000)
- return (True, num_children)
- #end _cassandra_qos_forwarding_class_count_children
-
- def _cassandra_qos_forwarding_class_update(self, obj_uuid, new_obj_dict):
- # Grab ref-uuids and properties in new version
- new_ref_infos = {}
-
- if 'qos_queue_refs' in new_obj_dict:
- new_ref_infos['qos_queue'] = {}
- new_refs = new_obj_dict['qos_queue_refs']
- if new_refs:
- for new_ref in new_refs:
- new_ref_uuid = self.fq_name_to_uuid('qos_queue', new_ref['to'])
- new_ref_attr = new_ref.get('attr', None)
- new_ref_data = {'attr': new_ref_attr, 'is_weakref': False}
- new_ref_infos['qos_queue'][new_ref_uuid] = new_ref_data
-
- new_props = {}
- if 'dscp' in new_obj_dict:
- new_props['dscp'] = new_obj_dict['dscp']
- if 'trusted' in new_obj_dict:
- new_props['trusted'] = new_obj_dict['trusted']
- if 'id_perms' in new_obj_dict:
- new_props['id_perms'] = new_obj_dict['id_perms']
- if 'display_name' in new_obj_dict:
- new_props['display_name'] = new_obj_dict['display_name']
- # Gather column values for obj and updates to backrefs
- # in a batch and write it at the end
- obj_uuid_cf = self._obj_uuid_cf
- obj_cols_iter = obj_uuid_cf.xget(obj_uuid)
- # TODO optimize this (converts tuple to dict)
- obj_cols = {}
- for col_info in obj_cols_iter:
- obj_cols[col_info[0]] = col_info[1]
-
- bch = obj_uuid_cf.batch()
- for col_name in obj_cols.keys():
- # TODO use compiled RE
- if re.match('prop:', col_name):
- (_, prop_name) = col_name.split(':')
- if prop_name == 'id_perms':
- # id-perms always has to be updated for last-mod timestamp
- # get it from request dict(or from db if not in request dict)
- new_id_perms = new_obj_dict.get(prop_name, json.loads(obj_cols[col_name]))
- self.update_last_modified(bch, obj_uuid, new_id_perms)
- elif prop_name in new_obj_dict:
- self._update_prop(bch, obj_uuid, prop_name, new_props)
-
- # TODO use compiled RE
- if re.match('ref:', col_name):
- (_, ref_type, ref_uuid) = col_name.split(':')
- self._update_ref(bch, 'qos_forwarding_class', obj_uuid, ref_type, ref_uuid, new_ref_infos)
- # for all column names
-
- # create new refs
- for ref_type in new_ref_infos.keys():
- for ref_uuid in new_ref_infos[ref_type].keys():
- ref_data = new_ref_infos[ref_type][ref_uuid]
- self._create_ref(bch, 'qos_forwarding_class', obj_uuid, ref_type, ref_uuid, ref_data)
-
- # create new props
- for prop_name in new_props.keys():
- self._create_prop(bch, obj_uuid, prop_name, new_props[prop_name])
-
- bch.send()
-
- return (True, '')
- #end _cassandra_qos_forwarding_class_update
-
- def _cassandra_qos_forwarding_class_list(self, parent_uuids=None, back_ref_uuids=None,
- obj_uuids=None, count=False, filters=None):
- children_fq_names_uuids = []
- if filters:
- fnames = filters.get('field_names', [])
- fvalues = filters.get('field_values', [])
- filter_fields = [(fnames[i], fvalues[i]) for i in range(len(fnames))]
- else:
- filter_fields = []
-
- def filter_rows(coll_infos, filter_cols, filter_params):
- filt_infos = {}
- coll_rows = obj_uuid_cf.multiget(coll_infos.keys(),
- columns=filter_cols,
- column_count=10000000)
- for row in coll_rows:
- # give chance for zk heartbeat/ping
- gevent.sleep(0)
- full_match = True
- for fname, fval in filter_params:
- if coll_rows[row]['prop:%s' %(fname)] != fval:
- full_match = False
- break
- if full_match:
- filt_infos[row] = coll_infos[row]
- return filt_infos
- # end filter_rows
-
- def get_fq_name_uuid_list(obj_uuids):
- ret_list = []
- for obj_uuid in obj_uuids:
- try:
- obj_fq_name = self.uuid_to_fq_name(obj_uuid)
- ret_list.append((obj_fq_name, obj_uuid))
- except cfgm_common.exceptions.NoIdError:
- pass
- return ret_list
- # end get_fq_name_uuid_list
-
- if parent_uuids:
- # go from parent to child
- obj_uuid_cf = self._obj_uuid_cf
- col_start = 'children:qos_forwarding_class:'
- col_fin = 'children:qos_forwarding_class;'
- try:
- obj_rows = obj_uuid_cf.multiget(parent_uuids,
- column_start = col_start,
- column_finish = col_fin,
- column_count = 10000000,
- include_timestamp = True)
- except pycassa.NotFoundException:
- if count:
- return (True, 0)
- else:
- return (True, children_fq_names_uuids)
-
- def filter_rows_parent_anchor(sort=False):
- # flatten to [('children:<type>:<uuid>', (<val>,<ts>), *]
- all_cols = [cols for obj_key in obj_rows.keys() for cols in obj_rows[obj_key].items()]
- all_child_infos = {}
- for col_name, col_val_ts in all_cols:
- # give chance for zk heartbeat/ping
- gevent.sleep(0)
- child_uuid = col_name.split(':')[2]
- if obj_uuids and child_uuid not in obj_uuids:
- continue
- all_child_infos[child_uuid] = {'uuid': child_uuid, 'tstamp': col_val_ts[1]}
-
- filter_cols = ['prop:%s' %(fname) for fname, _ in filter_fields]
- if filter_cols:
- filt_child_infos = filter_rows(all_child_infos, filter_cols, filter_fields)
- else: # no filter specified
- filt_child_infos = all_child_infos
-
- if not sort:
- ret_child_infos = filt_child_infos.values()
- else:
- ret_child_infos = sorted(filt_child_infos.values(), key=itemgetter('tstamp'))
-
- return get_fq_name_uuid_list(r['uuid'] for r in ret_child_infos)
- # end filter_rows_parent_anchor
-
- if count:
- return (True, len(filter_rows_parent_anchor()))
-
- children_fq_names_uuids = filter_rows_parent_anchor(sort=True)
-
- if back_ref_uuids:
- # go from anchor to backrefs
- obj_uuid_cf = self._obj_uuid_cf
- col_start = 'backref:qos_forwarding_class:'
- col_fin = 'backref:qos_forwarding_class;'
- try:
- obj_rows = obj_uuid_cf.multiget(back_ref_uuids,
- column_start = col_start,
- column_finish = col_fin,
- column_count = 10000000,
- include_timestamp = True)
- except pycassa.NotFoundException:
- if count:
- return (True, 0)
- else:
- return (True, children_fq_names_uuids)
-
- def filter_rows_backref_anchor():
- # flatten to [('<fqnstr>:<uuid>', (<val>,<ts>), *]
- all_cols = [cols for obj_key in obj_rows.keys() for cols in obj_rows[obj_key].items()]
- all_backref_infos = {}
- for col_name, col_val_ts in all_cols:
- # give chance for zk heartbeat/ping
- gevent.sleep(0)
- col_name_arr = col_name.split(':')
- fq_name = col_name_arr[:-1]
- obj_uuid = col_name_arr[-1]
- if obj_uuids and obj_uuid not in obj_uuids:
- continue
- all_backref_infos[obj_uuid] = \
- {'uuid': obj_uuid, 'fq_name': fq_name, 'tstamp': col_val_ts[1]}
-
- filter_cols = ['prop:%s' %(fname) for fname, _ in filter_fields]
- if filter_cols:
- filt_backref_infos = filter_rows(all_backref_infos, filter_cols, filter_fields)
- else: # no filter specified
- filt_backref_infos = all_backref_infos
-
- return [(br_info['fq_name'], br_info['uuid']) for br_info in filt_backref_infos.values()]
- # end filter_rows_backref_anchor
-
- if count:
- return (True, len(filter_rows_backref_anchor()))
-
- children_fq_names_uuids = filter_rows_backref_anchor()
-
- if not parent_uuids and not back_ref_uuids:
- obj_uuid_cf = self._obj_uuid_cf
- if obj_uuids:
- # exact objects specified
- def filter_rows_object_list():
- all_obj_infos = {}
- for obj_uuid in obj_uuids:
- all_obj_infos[obj_uuid] = None
-
- filter_cols = ['prop:%s' %(fname) for fname, _ in filter_fields]
- if filter_cols:
- filt_obj_infos = filter_rows(all_obj_infos, filter_cols, filter_fields)
- else: # no filters specified
- filt_obj_infos = all_obj_infos
-
- return get_fq_name_uuid_list(filt_obj_infos.keys())
- # end filter_rows_object_list
-
- if count:
- return (True, len(filter_rows_object_list()))
- children_fq_names_uuids = filter_rows_object_list()
-
- else: # grab all resources of this type
- obj_fq_name_cf = self._obj_fq_name_cf
- try:
- cols = obj_fq_name_cf.get('qos_forwarding_class', column_count = 10000000)
- except pycassa.NotFoundException:
- if count:
- return (True, 0)
- else:
- return (True, children_fq_names_uuids)
-
- def filter_rows_no_anchor():
- all_obj_infos = {}
- for col_name, col_val in cols.items():
- # give chance for zk heartbeat/ping
- gevent.sleep(0)
- col_name_arr = utils.decode_string(col_name).split(':')
- obj_uuid = col_name_arr[-1]
- all_obj_infos[obj_uuid] = (col_name_arr[:-1], obj_uuid)
-
- filter_cols = ['prop:%s' %(fname) for fname, _ in filter_fields]
- if filter_cols:
- filt_obj_infos = filter_rows(all_obj_infos, filter_cols, filter_fields)
- else: # no filters specified
- filt_obj_infos = all_obj_infos
-
- return filt_obj_infos.values()
- # end filter_rows_no_anchor
-
- if count:
- return (True, len(filter_rows_no_anchor()))
-
- children_fq_names_uuids = filter_rows_no_anchor()
-
- return (True, children_fq_names_uuids)
- #end _cassandra_qos_forwarding_class_list
-
- def _cassandra_qos_forwarding_class_delete(self, obj_uuid):
- obj_uuid_cf = self._obj_uuid_cf
- fq_name = json.loads(obj_uuid_cf.get(obj_uuid, columns = ['fq_name'])['fq_name'])
- bch = obj_uuid_cf.batch()
-
- # unlink from parent
- col_start = 'parent:'
- col_fin = 'parent;'
- col_name_iter = obj_uuid_cf.xget(obj_uuid, column_start = col_start, column_finish = col_fin)
- for (col_name, col_val) in col_name_iter:
- (_, parent_type, parent_uuid) = col_name.split(':')
- self._delete_child(bch, parent_type, parent_uuid, 'qos_forwarding_class', obj_uuid)
-
- # remove refs
- col_start = 'ref:'
- col_fin = 'ref;'
- col_name_iter = obj_uuid_cf.xget(obj_uuid, column_start = col_start, column_finish = col_fin)
- for (col_name, col_val) in col_name_iter:
- (_, ref_type, ref_uuid) = col_name.split(':')
- self._delete_ref(bch, 'qos_forwarding_class', obj_uuid, ref_type, ref_uuid)
-
- bch.remove(obj_uuid)
- bch.send()
-
- # Update fqname table
- fq_name_str = ':'.join(fq_name)
- fq_name_col = utils.encode_string(fq_name_str) + ':' + obj_uuid
- self._obj_fq_name_cf.remove('qos_forwarding_class', columns = [fq_name_col])
-
-
- return (True, '')
- #end _cassandra_qos_forwarding_class_delete
-
- def _cassandra_database_node_alloc(self, fq_name):
- return (True, '')
- #end _cassandra_database_node_alloc
-
- def _cassandra_database_node_create(self, obj_ids, obj_dict):
- # Gather column values for obj and updates to backrefs
- # in a batch and write it at the end
- bch = self._obj_uuid_cf.batch()
-
- obj_cols = {}
- obj_cols['fq_name'] = json.dumps(obj_dict['fq_name'])
- obj_cols['type'] = json.dumps('database_node')
- if 'parent_type' in obj_dict:
- # non config-root child
- parent_type = obj_dict['parent_type']
- parent_method_type = parent_type.replace('-', '_')
- parent_fq_name = obj_dict['fq_name'][:-1]
- obj_cols['parent_type'] = json.dumps(parent_type)
- parent_uuid = self.fq_name_to_uuid(parent_method_type, parent_fq_name)
- self._create_child(bch, parent_method_type, parent_uuid, 'database_node', obj_ids['uuid'])
-
- # Properties
- field = obj_dict.get('database_node_ip_address', None)
- if field is not None:
- self._create_prop(bch, obj_ids['uuid'], 'database_node_ip_address', field)
-
- field = obj_dict.get('id_perms', None)
- if field is not None:
- field['created'] = datetime.datetime.utcnow().isoformat()
- field['last_modified'] = field['created']
- self._create_prop(bch, obj_ids['uuid'], 'id_perms', field)
-
- field = obj_dict.get('display_name', None)
- if field is not None:
- self._create_prop(bch, obj_ids['uuid'], 'display_name', field)
-
-
- # References
-
- bch.insert(obj_ids['uuid'], obj_cols)
- bch.send()
-
- # Update fqname table
- fq_name_str = ':'.join(obj_dict['fq_name'])
- fq_name_cols = {utils.encode_string(fq_name_str) + ':' + obj_ids['uuid']: json.dumps(None)}
- self._obj_fq_name_cf.insert('database_node', fq_name_cols)
-
- return (True, '')
- #end _cassandra_database_node_create
-
- def _cassandra_database_node_read(self, obj_uuids, field_names = None):
- # if field_names = None, all fields will be read/returned
-
- obj_uuid_cf = self._obj_uuid_cf
-
- # optimize for common case of reading non-backref, non-children fields
- # ignoring columns starting from 'b' and 'c' - significant performance
- # impact in scaled setting. e.g. read of project
- if (field_names is None or
- (set(field_names) & (DatabaseNode.backref_fields | DatabaseNode.children_fields))):
- # atleast one backref/children field is needed
- obj_rows = obj_uuid_cf.multiget(obj_uuids,
- column_count = 10000000,
- include_timestamp = True)
- else: # ignore reading backref + children columns
- obj_rows = obj_uuid_cf.multiget(obj_uuids,
- column_start = 'd',
- column_count = 10000000,
- include_timestamp = True)
-
- if (len(obj_uuids) == 1) and not obj_rows:
- raise cfgm_common.exceptions.NoIdError(obj_uuids[0])
-
- results = []
- for row_key in obj_rows:
- # give chance for zk heartbeat/ping
- gevent.sleep(0)
- obj_uuid = row_key
- obj_cols = obj_rows[obj_uuid]
- result = {}
- result['uuid'] = obj_uuid
- result['fq_name'] = json.loads(obj_cols['fq_name'][0])
- for col_name in obj_cols.keys():
- if self._re_match_parent.match(col_name):
- # non config-root child
- (_, _, parent_uuid) = col_name.split(':')
- parent_type = json.loads(obj_cols['parent_type'][0])
- result['parent_type'] = parent_type
- try:
- result['parent_uuid'] = parent_uuid
- result['parent_href'] = self._generate_url(parent_type, parent_uuid)
- except cfgm_common.exceptions.NoIdError:
- err_msg = 'Unknown uuid for parent ' + result['fq_name'][-2]
- return (False, err_msg)
-
- # TODO use compiled RE
- if self._re_match_prop.match(col_name):
- (_, prop_name) = col_name.split(':')
- result[prop_name] = json.loads(obj_cols[col_name][0])
-
- # TODO use compiled RE
- if self._re_match_children.match(col_name):
- (_, child_type, child_uuid) = col_name.split(':')
- if field_names and '%ss' %(child_type) not in field_names:
- continue
-
- child_tstamp = obj_cols[col_name][1]
- try:
- self._read_child(result, obj_uuid, child_type, child_uuid, child_tstamp)
- except cfgm_common.exceptions.NoIdError:
- continue
-
- # TODO use compiled RE
- if self._re_match_ref.match(col_name):
- (_, ref_type, ref_uuid) = col_name.split(':')
- self._read_ref(result, obj_uuid, ref_type, ref_uuid, obj_cols[col_name][0])
-
- if self._re_match_backref.match(col_name):
- (_, back_ref_type, back_ref_uuid) = col_name.split(':')
- if field_names and '%s_back_refs' %(back_ref_type) not in field_names:
- continue
-
- try:
- self._read_back_ref(result, obj_uuid, back_ref_type, back_ref_uuid,
- obj_cols[col_name][0])
- except cfgm_common.exceptions.NoIdError:
- continue
-
- # for all column names
-
-
- results.append(result)
- # end for all rows
-
- return (True, results)
- #end _cassandra_database_node_read
-
- def _cassandra_database_node_count_children(self, obj_uuid, child_type):
- # if child_type = None, return
- if child_type is None:
- return (False, '')
-
- obj_uuid_cf = self._obj_uuid_cf
- if child_type not in DatabaseNode.children_fields:
- return (False, '%s is not a valid children of DatabaseNode' %(child_type))
-
- col_start = 'children:'+child_type[:-1]+':'
- col_finish = 'children:'+child_type[:-1]+';'
- num_children = obj_uuid_cf.get_count(obj_uuid,
- column_start = col_start,
- column_finish = col_finish,
- max_count = 10000000)
- return (True, num_children)
- #end _cassandra_database_node_count_children
-
- def _cassandra_database_node_update(self, obj_uuid, new_obj_dict):
- # Grab ref-uuids and properties in new version
- new_ref_infos = {}
-
- new_props = {}
- if 'database_node_ip_address' in new_obj_dict:
- new_props['database_node_ip_address'] = new_obj_dict['database_node_ip_address']
- if 'id_perms' in new_obj_dict:
- new_props['id_perms'] = new_obj_dict['id_perms']
- if 'display_name' in new_obj_dict:
- new_props['display_name'] = new_obj_dict['display_name']
- # Gather column values for obj and updates to backrefs
- # in a batch and write it at the end
- obj_uuid_cf = self._obj_uuid_cf
- obj_cols_iter = obj_uuid_cf.xget(obj_uuid)
- # TODO optimize this (converts tuple to dict)
- obj_cols = {}
- for col_info in obj_cols_iter:
- obj_cols[col_info[0]] = col_info[1]
-
- bch = obj_uuid_cf.batch()
- for col_name in obj_cols.keys():
- # TODO use compiled RE
- if re.match('prop:', col_name):
- (_, prop_name) = col_name.split(':')
- if prop_name == 'id_perms':
- # id-perms always has to be updated for last-mod timestamp
- # get it from request dict(or from db if not in request dict)
- new_id_perms = new_obj_dict.get(prop_name, json.loads(obj_cols[col_name]))
- self.update_last_modified(bch, obj_uuid, new_id_perms)
- elif prop_name in new_obj_dict:
- self._update_prop(bch, obj_uuid, prop_name, new_props)
-
- # TODO use compiled RE
- if re.match('ref:', col_name):
- (_, ref_type, ref_uuid) = col_name.split(':')
- self._update_ref(bch, 'database_node', obj_uuid, ref_type, ref_uuid, new_ref_infos)
- # for all column names
-
- # create new refs
- for ref_type in new_ref_infos.keys():
- for ref_uuid in new_ref_infos[ref_type].keys():
- ref_data = new_ref_infos[ref_type][ref_uuid]
- self._create_ref(bch, 'database_node', obj_uuid, ref_type, ref_uuid, ref_data)
-
- # create new props
- for prop_name in new_props.keys():
- self._create_prop(bch, obj_uuid, prop_name, new_props[prop_name])
-
- bch.send()
-
- return (True, '')
- #end _cassandra_database_node_update
-
- def _cassandra_database_node_list(self, parent_uuids=None, back_ref_uuids=None,
- obj_uuids=None, count=False, filters=None):
- children_fq_names_uuids = []
- if filters:
- fnames = filters.get('field_names', [])
- fvalues = filters.get('field_values', [])
- filter_fields = [(fnames[i], fvalues[i]) for i in range(len(fnames))]
- else:
- filter_fields = []
-
- def filter_rows(coll_infos, filter_cols, filter_params):
- filt_infos = {}
- coll_rows = obj_uuid_cf.multiget(coll_infos.keys(),
- columns=filter_cols,
- column_count=10000000)
- for row in coll_rows:
- # give chance for zk heartbeat/ping
- gevent.sleep(0)
- full_match = True
- for fname, fval in filter_params:
- if coll_rows[row]['prop:%s' %(fname)] != fval:
- full_match = False
- break
- if full_match:
- filt_infos[row] = coll_infos[row]
- return filt_infos
- # end filter_rows
-
- def get_fq_name_uuid_list(obj_uuids):
- ret_list = []
- for obj_uuid in obj_uuids:
- try:
- obj_fq_name = self.uuid_to_fq_name(obj_uuid)
- ret_list.append((obj_fq_name, obj_uuid))
- except cfgm_common.exceptions.NoIdError:
- pass
- return ret_list
- # end get_fq_name_uuid_list
-
- if parent_uuids:
- # go from parent to child
- obj_uuid_cf = self._obj_uuid_cf
- col_start = 'children:database_node:'
- col_fin = 'children:database_node;'
- try:
- obj_rows = obj_uuid_cf.multiget(parent_uuids,
- column_start = col_start,
- column_finish = col_fin,
- column_count = 10000000,
- include_timestamp = True)
- except pycassa.NotFoundException:
- if count:
- return (True, 0)
- else:
- return (True, children_fq_names_uuids)
-
- def filter_rows_parent_anchor(sort=False):
- # flatten to [('children:<type>:<uuid>', (<val>,<ts>), *]
- all_cols = [cols for obj_key in obj_rows.keys() for cols in obj_rows[obj_key].items()]
- all_child_infos = {}
- for col_name, col_val_ts in all_cols:
- # give chance for zk heartbeat/ping
- gevent.sleep(0)
- child_uuid = col_name.split(':')[2]
- if obj_uuids and child_uuid not in obj_uuids:
- continue
- all_child_infos[child_uuid] = {'uuid': child_uuid, 'tstamp': col_val_ts[1]}
-
- filter_cols = ['prop:%s' %(fname) for fname, _ in filter_fields]
- if filter_cols:
- filt_child_infos = filter_rows(all_child_infos, filter_cols, filter_fields)
- else: # no filter specified
- filt_child_infos = all_child_infos
-
- if not sort:
- ret_child_infos = filt_child_infos.values()
- else:
- ret_child_infos = sorted(filt_child_infos.values(), key=itemgetter('tstamp'))
-
- return get_fq_name_uuid_list(r['uuid'] for r in ret_child_infos)
- # end filter_rows_parent_anchor
-
- if count:
- return (True, len(filter_rows_parent_anchor()))
-
- children_fq_names_uuids = filter_rows_parent_anchor(sort=True)
-
- if back_ref_uuids:
- # go from anchor to backrefs
- obj_uuid_cf = self._obj_uuid_cf
- col_start = 'backref:database_node:'
- col_fin = 'backref:database_node;'
- try:
- obj_rows = obj_uuid_cf.multiget(back_ref_uuids,
- column_start = col_start,
- column_finish = col_fin,
- column_count = 10000000,
- include_timestamp = True)
- except pycassa.NotFoundException:
- if count:
- return (True, 0)
- else:
- return (True, children_fq_names_uuids)
-
- def filter_rows_backref_anchor():
- # flatten to [('<fqnstr>:<uuid>', (<val>,<ts>), *]
- all_cols = [cols for obj_key in obj_rows.keys() for cols in obj_rows[obj_key].items()]
- all_backref_infos = {}
- for col_name, col_val_ts in all_cols:
- # give chance for zk heartbeat/ping
- gevent.sleep(0)
- col_name_arr = col_name.split(':')
- fq_name = col_name_arr[:-1]
- obj_uuid = col_name_arr[-1]
- if obj_uuids and obj_uuid not in obj_uuids:
- continue
- all_backref_infos[obj_uuid] = \
- {'uuid': obj_uuid, 'fq_name': fq_name, 'tstamp': col_val_ts[1]}
-
- filter_cols = ['prop:%s' %(fname) for fname, _ in filter_fields]
- if filter_cols:
- filt_backref_infos = filter_rows(all_backref_infos, filter_cols, filter_fields)
- else: # no filter specified
- filt_backref_infos = all_backref_infos
-
- return [(br_info['fq_name'], br_info['uuid']) for br_info in filt_backref_infos.values()]
- # end filter_rows_backref_anchor
-
- if count:
- return (True, len(filter_rows_backref_anchor()))
-
- children_fq_names_uuids = filter_rows_backref_anchor()
-
- if not parent_uuids and not back_ref_uuids:
- obj_uuid_cf = self._obj_uuid_cf
- if obj_uuids:
- # exact objects specified
- def filter_rows_object_list():
- all_obj_infos = {}
- for obj_uuid in obj_uuids:
- all_obj_infos[obj_uuid] = None
-
- filter_cols = ['prop:%s' %(fname) for fname, _ in filter_fields]
- if filter_cols:
- filt_obj_infos = filter_rows(all_obj_infos, filter_cols, filter_fields)
- else: # no filters specified
- filt_obj_infos = all_obj_infos
-
- return get_fq_name_uuid_list(filt_obj_infos.keys())
- # end filter_rows_object_list
-
- if count:
- return (True, len(filter_rows_object_list()))
- children_fq_names_uuids = filter_rows_object_list()
-
- else: # grab all resources of this type
- obj_fq_name_cf = self._obj_fq_name_cf
- try:
- cols = obj_fq_name_cf.get('database_node', column_count = 10000000)
- except pycassa.NotFoundException:
- if count:
- return (True, 0)
- else:
- return (True, children_fq_names_uuids)
-
- def filter_rows_no_anchor():
- all_obj_infos = {}
- for col_name, col_val in cols.items():
- # give chance for zk heartbeat/ping
- gevent.sleep(0)
- col_name_arr = utils.decode_string(col_name).split(':')
- obj_uuid = col_name_arr[-1]
- all_obj_infos[obj_uuid] = (col_name_arr[:-1], obj_uuid)
-
- filter_cols = ['prop:%s' %(fname) for fname, _ in filter_fields]
- if filter_cols:
- filt_obj_infos = filter_rows(all_obj_infos, filter_cols, filter_fields)
- else: # no filters specified
- filt_obj_infos = all_obj_infos
-
- return filt_obj_infos.values()
- # end filter_rows_no_anchor
-
- if count:
- return (True, len(filter_rows_no_anchor()))
-
- children_fq_names_uuids = filter_rows_no_anchor()
-
- return (True, children_fq_names_uuids)
- #end _cassandra_database_node_list
-
- def _cassandra_database_node_delete(self, obj_uuid):
- obj_uuid_cf = self._obj_uuid_cf
- fq_name = json.loads(obj_uuid_cf.get(obj_uuid, columns = ['fq_name'])['fq_name'])
- bch = obj_uuid_cf.batch()
-
- # unlink from parent
- col_start = 'parent:'
- col_fin = 'parent;'
- col_name_iter = obj_uuid_cf.xget(obj_uuid, column_start = col_start, column_finish = col_fin)
- for (col_name, col_val) in col_name_iter:
- (_, parent_type, parent_uuid) = col_name.split(':')
- self._delete_child(bch, parent_type, parent_uuid, 'database_node', obj_uuid)
-
- # remove refs
- col_start = 'ref:'
- col_fin = 'ref;'
- col_name_iter = obj_uuid_cf.xget(obj_uuid, column_start = col_start, column_finish = col_fin)
- for (col_name, col_val) in col_name_iter:
- (_, ref_type, ref_uuid) = col_name.split(':')
- self._delete_ref(bch, 'database_node', obj_uuid, ref_type, ref_uuid)
-
- bch.remove(obj_uuid)
- bch.send()
-
- # Update fqname table
- fq_name_str = ':'.join(fq_name)
- fq_name_col = utils.encode_string(fq_name_str) + ':' + obj_uuid
- self._obj_fq_name_cf.remove('database_node', columns = [fq_name_col])
-
-
- return (True, '')
- #end _cassandra_database_node_delete
-
- def _cassandra_routing_instance_alloc(self, fq_name):
- return (True, '')
- #end _cassandra_routing_instance_alloc
-
- def _cassandra_routing_instance_create(self, obj_ids, obj_dict):
- # Gather column values for obj and updates to backrefs
- # in a batch and write it at the end
- bch = self._obj_uuid_cf.batch()
-
- obj_cols = {}
- obj_cols['fq_name'] = json.dumps(obj_dict['fq_name'])
- obj_cols['type'] = json.dumps('routing_instance')
- if 'parent_type' in obj_dict:
- # non config-root child
- parent_type = obj_dict['parent_type']
- parent_method_type = parent_type.replace('-', '_')
- parent_fq_name = obj_dict['fq_name'][:-1]
- obj_cols['parent_type'] = json.dumps(parent_type)
- parent_uuid = self.fq_name_to_uuid(parent_method_type, parent_fq_name)
- self._create_child(bch, parent_method_type, parent_uuid, 'routing_instance', obj_ids['uuid'])
-
- # Properties
- field = obj_dict.get('service_chain_information', None)
- if field is not None:
- self._create_prop(bch, obj_ids['uuid'], 'service_chain_information', field)
-
- field = obj_dict.get('routing_instance_is_default', None)
- if field is not None:
- self._create_prop(bch, obj_ids['uuid'], 'routing_instance_is_default', field)
-
- field = obj_dict.get('static_route_entries', None)
- if field is not None:
- self._create_prop(bch, obj_ids['uuid'], 'static_route_entries', field)
-
- field = obj_dict.get('default_ce_protocol', None)
- if field is not None:
- self._create_prop(bch, obj_ids['uuid'], 'default_ce_protocol', field)
-
- field = obj_dict.get('id_perms', None)
- if field is not None:
- field['created'] = datetime.datetime.utcnow().isoformat()
- field['last_modified'] = field['created']
- self._create_prop(bch, obj_ids['uuid'], 'id_perms', field)
-
- field = obj_dict.get('display_name', None)
- if field is not None:
- self._create_prop(bch, obj_ids['uuid'], 'display_name', field)
-
-
- # References
- refs = obj_dict.get('routing_instance_refs', [])
- for ref in refs:
- ref_uuid = self.fq_name_to_uuid('routing_instance', ref['to'])
- ref_attr = ref.get('attr', None)
- ref_data = {'attr': ref_attr, 'is_weakref': False}
- self._create_ref(bch, 'routing_instance', obj_ids['uuid'], 'routing_instance', ref_uuid, ref_data)
- refs = obj_dict.get('route_target_refs', [])
- for ref in refs:
- ref_uuid = self.fq_name_to_uuid('route_target', ref['to'])
- ref_attr = ref.get('attr', None)
- ref_data = {'attr': ref_attr, 'is_weakref': False}
- self._create_ref(bch, 'routing_instance', obj_ids['uuid'], 'route_target', ref_uuid, ref_data)
-
- bch.insert(obj_ids['uuid'], obj_cols)
- bch.send()
-
- # Update fqname table
- fq_name_str = ':'.join(obj_dict['fq_name'])
- fq_name_cols = {utils.encode_string(fq_name_str) + ':' + obj_ids['uuid']: json.dumps(None)}
- self._obj_fq_name_cf.insert('routing_instance', fq_name_cols)
-
- return (True, '')
- #end _cassandra_routing_instance_create
-
- def _cassandra_routing_instance_read(self, obj_uuids, field_names = None):
- # if field_names = None, all fields will be read/returned
-
- obj_uuid_cf = self._obj_uuid_cf
-
- # optimize for common case of reading non-backref, non-children fields
- # ignoring columns starting from 'b' and 'c' - significant performance
- # impact in scaled setting. e.g. read of project
- if (field_names is None or
- (set(field_names) & (RoutingInstance.backref_fields | RoutingInstance.children_fields))):
- # atleast one backref/children field is needed
- obj_rows = obj_uuid_cf.multiget(obj_uuids,
- column_count = 10000000,
- include_timestamp = True)
- else: # ignore reading backref + children columns
- obj_rows = obj_uuid_cf.multiget(obj_uuids,
- column_start = 'd',
- column_count = 10000000,
- include_timestamp = True)
-
- if (len(obj_uuids) == 1) and not obj_rows:
- raise cfgm_common.exceptions.NoIdError(obj_uuids[0])
-
- results = []
- for row_key in obj_rows:
- # give chance for zk heartbeat/ping
- gevent.sleep(0)
- obj_uuid = row_key
- obj_cols = obj_rows[obj_uuid]
- result = {}
- result['uuid'] = obj_uuid
- result['fq_name'] = json.loads(obj_cols['fq_name'][0])
- for col_name in obj_cols.keys():
- if self._re_match_parent.match(col_name):
- # non config-root child
- (_, _, parent_uuid) = col_name.split(':')
- parent_type = json.loads(obj_cols['parent_type'][0])
- result['parent_type'] = parent_type
- try:
- result['parent_uuid'] = parent_uuid
- result['parent_href'] = self._generate_url(parent_type, parent_uuid)
- except cfgm_common.exceptions.NoIdError:
- err_msg = 'Unknown uuid for parent ' + result['fq_name'][-2]
- return (False, err_msg)
-
- # TODO use compiled RE
- if self._re_match_prop.match(col_name):
- (_, prop_name) = col_name.split(':')
- result[prop_name] = json.loads(obj_cols[col_name][0])
-
- # TODO use compiled RE
- if self._re_match_children.match(col_name):
- (_, child_type, child_uuid) = col_name.split(':')
- if field_names and '%ss' %(child_type) not in field_names:
- continue
-
- child_tstamp = obj_cols[col_name][1]
- try:
- self._read_child(result, obj_uuid, child_type, child_uuid, child_tstamp)
- except cfgm_common.exceptions.NoIdError:
- continue
-
- # TODO use compiled RE
- if self._re_match_ref.match(col_name):
- (_, ref_type, ref_uuid) = col_name.split(':')
- self._read_ref(result, obj_uuid, ref_type, ref_uuid, obj_cols[col_name][0])
-
- if self._re_match_backref.match(col_name):
- (_, back_ref_type, back_ref_uuid) = col_name.split(':')
- if field_names and '%s_back_refs' %(back_ref_type) not in field_names:
- continue
-
- try:
- self._read_back_ref(result, obj_uuid, back_ref_type, back_ref_uuid,
- obj_cols[col_name][0])
- except cfgm_common.exceptions.NoIdError:
- continue
-
- # for all column names
-
- if 'bgp_routers' in result:
- # sort children; TODO do this based on schema
- sorted_children = sorted(result['bgp_routers'], key = itemgetter('tstamp'))
- # re-write result's children without timestamp
- result['bgp_routers'] = sorted_children
- [child.pop('tstamp') for child in result['bgp_routers']]
-
- results.append(result)
- # end for all rows
-
- return (True, results)
- #end _cassandra_routing_instance_read
-
- def _cassandra_routing_instance_count_children(self, obj_uuid, child_type):
- # if child_type = None, return
- if child_type is None:
- return (False, '')
-
- obj_uuid_cf = self._obj_uuid_cf
- if child_type not in RoutingInstance.children_fields:
- return (False, '%s is not a valid children of RoutingInstance' %(child_type))
-
- col_start = 'children:'+child_type[:-1]+':'
- col_finish = 'children:'+child_type[:-1]+';'
- num_children = obj_uuid_cf.get_count(obj_uuid,
- column_start = col_start,
- column_finish = col_finish,
- max_count = 10000000)
- return (True, num_children)
- #end _cassandra_routing_instance_count_children
-
- def _cassandra_routing_instance_update(self, obj_uuid, new_obj_dict):
- # Grab ref-uuids and properties in new version
- new_ref_infos = {}
-
- if 'routing_instance_refs' in new_obj_dict:
- new_ref_infos['routing_instance'] = {}
- new_refs = new_obj_dict['routing_instance_refs']
- if new_refs:
- for new_ref in new_refs:
- new_ref_uuid = self.fq_name_to_uuid('routing_instance', new_ref['to'])
- new_ref_attr = new_ref.get('attr', None)
- new_ref_data = {'attr': new_ref_attr, 'is_weakref': False}
- new_ref_infos['routing_instance'][new_ref_uuid] = new_ref_data
-
- if 'route_target_refs' in new_obj_dict:
- new_ref_infos['route_target'] = {}
- new_refs = new_obj_dict['route_target_refs']
- if new_refs:
- for new_ref in new_refs:
- new_ref_uuid = self.fq_name_to_uuid('route_target', new_ref['to'])
- new_ref_attr = new_ref.get('attr', None)
- new_ref_data = {'attr': new_ref_attr, 'is_weakref': False}
- new_ref_infos['route_target'][new_ref_uuid] = new_ref_data
-
- new_props = {}
- if 'service_chain_information' in new_obj_dict:
- new_props['service_chain_information'] = new_obj_dict['service_chain_information']
- if 'routing_instance_is_default' in new_obj_dict:
- new_props['routing_instance_is_default'] = new_obj_dict['routing_instance_is_default']
- if 'static_route_entries' in new_obj_dict:
- new_props['static_route_entries'] = new_obj_dict['static_route_entries']
- if 'default_ce_protocol' in new_obj_dict:
- new_props['default_ce_protocol'] = new_obj_dict['default_ce_protocol']
- if 'id_perms' in new_obj_dict:
- new_props['id_perms'] = new_obj_dict['id_perms']
- if 'display_name' in new_obj_dict:
- new_props['display_name'] = new_obj_dict['display_name']
- # Gather column values for obj and updates to backrefs
- # in a batch and write it at the end
- obj_uuid_cf = self._obj_uuid_cf
- obj_cols_iter = obj_uuid_cf.xget(obj_uuid)
- # TODO optimize this (converts tuple to dict)
- obj_cols = {}
- for col_info in obj_cols_iter:
- obj_cols[col_info[0]] = col_info[1]
-
- bch = obj_uuid_cf.batch()
- for col_name in obj_cols.keys():
- # TODO use compiled RE
- if re.match('prop:', col_name):
- (_, prop_name) = col_name.split(':')
- if prop_name == 'id_perms':
- # id-perms always has to be updated for last-mod timestamp
- # get it from request dict(or from db if not in request dict)
- new_id_perms = new_obj_dict.get(prop_name, json.loads(obj_cols[col_name]))
- self.update_last_modified(bch, obj_uuid, new_id_perms)
- elif prop_name in new_obj_dict:
- self._update_prop(bch, obj_uuid, prop_name, new_props)
-
- # TODO use compiled RE
- if re.match('ref:', col_name):
- (_, ref_type, ref_uuid) = col_name.split(':')
- self._update_ref(bch, 'routing_instance', obj_uuid, ref_type, ref_uuid, new_ref_infos)
- # for all column names
-
- # create new refs
- for ref_type in new_ref_infos.keys():
- for ref_uuid in new_ref_infos[ref_type].keys():
- ref_data = new_ref_infos[ref_type][ref_uuid]
- self._create_ref(bch, 'routing_instance', obj_uuid, ref_type, ref_uuid, ref_data)
-
- # create new props
- for prop_name in new_props.keys():
- self._create_prop(bch, obj_uuid, prop_name, new_props[prop_name])
-
- bch.send()
-
- return (True, '')
- #end _cassandra_routing_instance_update
-
- def _cassandra_routing_instance_list(self, parent_uuids=None, back_ref_uuids=None,
- obj_uuids=None, count=False, filters=None):
- children_fq_names_uuids = []
- if filters:
- fnames = filters.get('field_names', [])
- fvalues = filters.get('field_values', [])
- filter_fields = [(fnames[i], fvalues[i]) for i in range(len(fnames))]
- else:
- filter_fields = []
-
- def filter_rows(coll_infos, filter_cols, filter_params):
- filt_infos = {}
- coll_rows = obj_uuid_cf.multiget(coll_infos.keys(),
- columns=filter_cols,
- column_count=10000000)
- for row in coll_rows:
- # give chance for zk heartbeat/ping
- gevent.sleep(0)
- full_match = True
- for fname, fval in filter_params:
- if coll_rows[row]['prop:%s' %(fname)] != fval:
- full_match = False
- break
- if full_match:
- filt_infos[row] = coll_infos[row]
- return filt_infos
- # end filter_rows
-
- def get_fq_name_uuid_list(obj_uuids):
- ret_list = []
- for obj_uuid in obj_uuids:
- try:
- obj_fq_name = self.uuid_to_fq_name(obj_uuid)
- ret_list.append((obj_fq_name, obj_uuid))
- except cfgm_common.exceptions.NoIdError:
- pass
- return ret_list
- # end get_fq_name_uuid_list
-
- if parent_uuids:
- # go from parent to child
- obj_uuid_cf = self._obj_uuid_cf
- col_start = 'children:routing_instance:'
- col_fin = 'children:routing_instance;'
- try:
- obj_rows = obj_uuid_cf.multiget(parent_uuids,
- column_start = col_start,
- column_finish = col_fin,
- column_count = 10000000,
- include_timestamp = True)
- except pycassa.NotFoundException:
- if count:
- return (True, 0)
- else:
- return (True, children_fq_names_uuids)
-
- def filter_rows_parent_anchor(sort=False):
- # flatten to [('children:<type>:<uuid>', (<val>,<ts>), *]
- all_cols = [cols for obj_key in obj_rows.keys() for cols in obj_rows[obj_key].items()]
- all_child_infos = {}
- for col_name, col_val_ts in all_cols:
- # give chance for zk heartbeat/ping
- gevent.sleep(0)
- child_uuid = col_name.split(':')[2]
- if obj_uuids and child_uuid not in obj_uuids:
- continue
- all_child_infos[child_uuid] = {'uuid': child_uuid, 'tstamp': col_val_ts[1]}
-
- filter_cols = ['prop:%s' %(fname) for fname, _ in filter_fields]
- if filter_cols:
- filt_child_infos = filter_rows(all_child_infos, filter_cols, filter_fields)
- else: # no filter specified
- filt_child_infos = all_child_infos
-
- if not sort:
- ret_child_infos = filt_child_infos.values()
- else:
- ret_child_infos = sorted(filt_child_infos.values(), key=itemgetter('tstamp'))
-
- return get_fq_name_uuid_list(r['uuid'] for r in ret_child_infos)
- # end filter_rows_parent_anchor
-
- if count:
- return (True, len(filter_rows_parent_anchor()))
-
- children_fq_names_uuids = filter_rows_parent_anchor(sort=True)
-
- if back_ref_uuids:
- # go from anchor to backrefs
- obj_uuid_cf = self._obj_uuid_cf
- col_start = 'backref:routing_instance:'
- col_fin = 'backref:routing_instance;'
- try:
- obj_rows = obj_uuid_cf.multiget(back_ref_uuids,
- column_start = col_start,
- column_finish = col_fin,
- column_count = 10000000,
- include_timestamp = True)
- except pycassa.NotFoundException:
- if count:
- return (True, 0)
- else:
- return (True, children_fq_names_uuids)
-
- def filter_rows_backref_anchor():
- # flatten to [('<fqnstr>:<uuid>', (<val>,<ts>), *]
- all_cols = [cols for obj_key in obj_rows.keys() for cols in obj_rows[obj_key].items()]
- all_backref_infos = {}
- for col_name, col_val_ts in all_cols:
- # give chance for zk heartbeat/ping
- gevent.sleep(0)
- col_name_arr = col_name.split(':')
- fq_name = col_name_arr[:-1]
- obj_uuid = col_name_arr[-1]
- if obj_uuids and obj_uuid not in obj_uuids:
- continue
- all_backref_infos[obj_uuid] = \
- {'uuid': obj_uuid, 'fq_name': fq_name, 'tstamp': col_val_ts[1]}
-
- filter_cols = ['prop:%s' %(fname) for fname, _ in filter_fields]
- if filter_cols:
- filt_backref_infos = filter_rows(all_backref_infos, filter_cols, filter_fields)
- else: # no filter specified
- filt_backref_infos = all_backref_infos
-
- return [(br_info['fq_name'], br_info['uuid']) for br_info in filt_backref_infos.values()]
- # end filter_rows_backref_anchor
-
- if count:
- return (True, len(filter_rows_backref_anchor()))
-
- children_fq_names_uuids = filter_rows_backref_anchor()
-
- if not parent_uuids and not back_ref_uuids:
- obj_uuid_cf = self._obj_uuid_cf
- if obj_uuids:
- # exact objects specified
- def filter_rows_object_list():
- all_obj_infos = {}
- for obj_uuid in obj_uuids:
- all_obj_infos[obj_uuid] = None
-
- filter_cols = ['prop:%s' %(fname) for fname, _ in filter_fields]
- if filter_cols:
- filt_obj_infos = filter_rows(all_obj_infos, filter_cols, filter_fields)
- else: # no filters specified
- filt_obj_infos = all_obj_infos
-
- return get_fq_name_uuid_list(filt_obj_infos.keys())
- # end filter_rows_object_list
-
- if count:
- return (True, len(filter_rows_object_list()))
- children_fq_names_uuids = filter_rows_object_list()
-
- else: # grab all resources of this type
- obj_fq_name_cf = self._obj_fq_name_cf
- try:
- cols = obj_fq_name_cf.get('routing_instance', column_count = 10000000)
- except pycassa.NotFoundException:
- if count:
- return (True, 0)
- else:
- return (True, children_fq_names_uuids)
-
- def filter_rows_no_anchor():
- all_obj_infos = {}
- for col_name, col_val in cols.items():
- # give chance for zk heartbeat/ping
- gevent.sleep(0)
- col_name_arr = utils.decode_string(col_name).split(':')
- obj_uuid = col_name_arr[-1]
- all_obj_infos[obj_uuid] = (col_name_arr[:-1], obj_uuid)
-
- filter_cols = ['prop:%s' %(fname) for fname, _ in filter_fields]
- if filter_cols:
- filt_obj_infos = filter_rows(all_obj_infos, filter_cols, filter_fields)
- else: # no filters specified
- filt_obj_infos = all_obj_infos
-
- return filt_obj_infos.values()
- # end filter_rows_no_anchor
-
- if count:
- return (True, len(filter_rows_no_anchor()))
-
- children_fq_names_uuids = filter_rows_no_anchor()
-
- return (True, children_fq_names_uuids)
- #end _cassandra_routing_instance_list
-
- def _cassandra_routing_instance_delete(self, obj_uuid):
- obj_uuid_cf = self._obj_uuid_cf
- fq_name = json.loads(obj_uuid_cf.get(obj_uuid, columns = ['fq_name'])['fq_name'])
- bch = obj_uuid_cf.batch()
-
- # unlink from parent
- col_start = 'parent:'
- col_fin = 'parent;'
- col_name_iter = obj_uuid_cf.xget(obj_uuid, column_start = col_start, column_finish = col_fin)
- for (col_name, col_val) in col_name_iter:
- (_, parent_type, parent_uuid) = col_name.split(':')
- self._delete_child(bch, parent_type, parent_uuid, 'routing_instance', obj_uuid)
-
- # remove refs
- col_start = 'ref:'
- col_fin = 'ref;'
- col_name_iter = obj_uuid_cf.xget(obj_uuid, column_start = col_start, column_finish = col_fin)
- for (col_name, col_val) in col_name_iter:
- (_, ref_type, ref_uuid) = col_name.split(':')
- self._delete_ref(bch, 'routing_instance', obj_uuid, ref_type, ref_uuid)
-
- bch.remove(obj_uuid)
- bch.send()
-
- # Update fqname table
- fq_name_str = ':'.join(fq_name)
- fq_name_col = utils.encode_string(fq_name_str) + ':' + obj_uuid
- self._obj_fq_name_cf.remove('routing_instance', columns = [fq_name_col])
-
-
- return (True, '')
- #end _cassandra_routing_instance_delete
-
- def _cassandra_network_ipam_alloc(self, fq_name):
- return (True, '')
- #end _cassandra_network_ipam_alloc
-
- def _cassandra_network_ipam_create(self, obj_ids, obj_dict):
- # Gather column values for obj and updates to backrefs
- # in a batch and write it at the end
- bch = self._obj_uuid_cf.batch()
-
- obj_cols = {}
- obj_cols['fq_name'] = json.dumps(obj_dict['fq_name'])
- obj_cols['type'] = json.dumps('network_ipam')
- if 'parent_type' in obj_dict:
- # non config-root child
- parent_type = obj_dict['parent_type']
- parent_method_type = parent_type.replace('-', '_')
- parent_fq_name = obj_dict['fq_name'][:-1]
- obj_cols['parent_type'] = json.dumps(parent_type)
- parent_uuid = self.fq_name_to_uuid(parent_method_type, parent_fq_name)
- self._create_child(bch, parent_method_type, parent_uuid, 'network_ipam', obj_ids['uuid'])
-
- # Properties
- field = obj_dict.get('network_ipam_mgmt', None)
- if field is not None:
- self._create_prop(bch, obj_ids['uuid'], 'network_ipam_mgmt', field)
-
- field = obj_dict.get('id_perms', None)
- if field is not None:
- field['created'] = datetime.datetime.utcnow().isoformat()
- field['last_modified'] = field['created']
- self._create_prop(bch, obj_ids['uuid'], 'id_perms', field)
-
- field = obj_dict.get('display_name', None)
- if field is not None:
- self._create_prop(bch, obj_ids['uuid'], 'display_name', field)
-
-
- # References
- refs = obj_dict.get('virtual_DNS_refs', [])
- for ref in refs:
- ref_uuid = self.fq_name_to_uuid('virtual_DNS', ref['to'])
- ref_attr = ref.get('attr', None)
- ref_data = {'attr': ref_attr, 'is_weakref': False}
- self._create_ref(bch, 'network_ipam', obj_ids['uuid'], 'virtual_DNS', ref_uuid, ref_data)
-
- bch.insert(obj_ids['uuid'], obj_cols)
- bch.send()
-
- # Update fqname table
- fq_name_str = ':'.join(obj_dict['fq_name'])
- fq_name_cols = {utils.encode_string(fq_name_str) + ':' + obj_ids['uuid']: json.dumps(None)}
- self._obj_fq_name_cf.insert('network_ipam', fq_name_cols)
-
- return (True, '')
- #end _cassandra_network_ipam_create
-
- def _cassandra_network_ipam_read(self, obj_uuids, field_names = None):
- # if field_names = None, all fields will be read/returned
-
- obj_uuid_cf = self._obj_uuid_cf
-
- # optimize for common case of reading non-backref, non-children fields
- # ignoring columns starting from 'b' and 'c' - significant performance
- # impact in scaled setting. e.g. read of project
- if (field_names is None or
- (set(field_names) & (NetworkIpam.backref_fields | NetworkIpam.children_fields))):
- # atleast one backref/children field is needed
- obj_rows = obj_uuid_cf.multiget(obj_uuids,
- column_count = 10000000,
- include_timestamp = True)
- else: # ignore reading backref + children columns
- obj_rows = obj_uuid_cf.multiget(obj_uuids,
- column_start = 'd',
- column_count = 10000000,
- include_timestamp = True)
-
- if (len(obj_uuids) == 1) and not obj_rows:
- raise cfgm_common.exceptions.NoIdError(obj_uuids[0])
-
- results = []
- for row_key in obj_rows:
- # give chance for zk heartbeat/ping
- gevent.sleep(0)
- obj_uuid = row_key
- obj_cols = obj_rows[obj_uuid]
- result = {}
- result['uuid'] = obj_uuid
- result['fq_name'] = json.loads(obj_cols['fq_name'][0])
- for col_name in obj_cols.keys():
- if self._re_match_parent.match(col_name):
- # non config-root child
- (_, _, parent_uuid) = col_name.split(':')
- parent_type = json.loads(obj_cols['parent_type'][0])
- result['parent_type'] = parent_type
- try:
- result['parent_uuid'] = parent_uuid
- result['parent_href'] = self._generate_url(parent_type, parent_uuid)
- except cfgm_common.exceptions.NoIdError:
- err_msg = 'Unknown uuid for parent ' + result['fq_name'][-2]
- return (False, err_msg)
-
- # TODO use compiled RE
- if self._re_match_prop.match(col_name):
- (_, prop_name) = col_name.split(':')
- result[prop_name] = json.loads(obj_cols[col_name][0])
-
- # TODO use compiled RE
- if self._re_match_children.match(col_name):
- (_, child_type, child_uuid) = col_name.split(':')
- if field_names and '%ss' %(child_type) not in field_names:
- continue
-
- child_tstamp = obj_cols[col_name][1]
- try:
- self._read_child(result, obj_uuid, child_type, child_uuid, child_tstamp)
- except cfgm_common.exceptions.NoIdError:
- continue
-
- # TODO use compiled RE
- if self._re_match_ref.match(col_name):
- (_, ref_type, ref_uuid) = col_name.split(':')
- self._read_ref(result, obj_uuid, ref_type, ref_uuid, obj_cols[col_name][0])
-
- if self._re_match_backref.match(col_name):
- (_, back_ref_type, back_ref_uuid) = col_name.split(':')
- if field_names and '%s_back_refs' %(back_ref_type) not in field_names:
- continue
-
- try:
- self._read_back_ref(result, obj_uuid, back_ref_type, back_ref_uuid,
- obj_cols[col_name][0])
- except cfgm_common.exceptions.NoIdError:
- continue
-
- # for all column names
-
-
- results.append(result)
- # end for all rows
-
- return (True, results)
- #end _cassandra_network_ipam_read
-
- def _cassandra_network_ipam_count_children(self, obj_uuid, child_type):
- # if child_type = None, return
- if child_type is None:
- return (False, '')
-
- obj_uuid_cf = self._obj_uuid_cf
- if child_type not in NetworkIpam.children_fields:
- return (False, '%s is not a valid children of NetworkIpam' %(child_type))
-
- col_start = 'children:'+child_type[:-1]+':'
- col_finish = 'children:'+child_type[:-1]+';'
- num_children = obj_uuid_cf.get_count(obj_uuid,
- column_start = col_start,
- column_finish = col_finish,
- max_count = 10000000)
- return (True, num_children)
- #end _cassandra_network_ipam_count_children
-
- def _cassandra_network_ipam_update(self, obj_uuid, new_obj_dict):
- # Grab ref-uuids and properties in new version
- new_ref_infos = {}
-
- if 'virtual_DNS_refs' in new_obj_dict:
- new_ref_infos['virtual_DNS'] = {}
- new_refs = new_obj_dict['virtual_DNS_refs']
- if new_refs:
- for new_ref in new_refs:
- new_ref_uuid = self.fq_name_to_uuid('virtual_DNS', new_ref['to'])
- new_ref_attr = new_ref.get('attr', None)
- new_ref_data = {'attr': new_ref_attr, 'is_weakref': False}
- new_ref_infos['virtual_DNS'][new_ref_uuid] = new_ref_data
-
- new_props = {}
- if 'network_ipam_mgmt' in new_obj_dict:
- new_props['network_ipam_mgmt'] = new_obj_dict['network_ipam_mgmt']
- if 'id_perms' in new_obj_dict:
- new_props['id_perms'] = new_obj_dict['id_perms']
- if 'display_name' in new_obj_dict:
- new_props['display_name'] = new_obj_dict['display_name']
- # Gather column values for obj and updates to backrefs
- # in a batch and write it at the end
- obj_uuid_cf = self._obj_uuid_cf
- obj_cols_iter = obj_uuid_cf.xget(obj_uuid)
- # TODO optimize this (converts tuple to dict)
- obj_cols = {}
- for col_info in obj_cols_iter:
- obj_cols[col_info[0]] = col_info[1]
-
- bch = obj_uuid_cf.batch()
- for col_name in obj_cols.keys():
- # TODO use compiled RE
- if re.match('prop:', col_name):
- (_, prop_name) = col_name.split(':')
- if prop_name == 'id_perms':
- # id-perms always has to be updated for last-mod timestamp
- # get it from request dict(or from db if not in request dict)
- new_id_perms = new_obj_dict.get(prop_name, json.loads(obj_cols[col_name]))
- self.update_last_modified(bch, obj_uuid, new_id_perms)
- elif prop_name in new_obj_dict:
- self._update_prop(bch, obj_uuid, prop_name, new_props)
-
- # TODO use compiled RE
- if re.match('ref:', col_name):
- (_, ref_type, ref_uuid) = col_name.split(':')
- self._update_ref(bch, 'network_ipam', obj_uuid, ref_type, ref_uuid, new_ref_infos)
- # for all column names
-
- # create new refs
- for ref_type in new_ref_infos.keys():
- for ref_uuid in new_ref_infos[ref_type].keys():
- ref_data = new_ref_infos[ref_type][ref_uuid]
- self._create_ref(bch, 'network_ipam', obj_uuid, ref_type, ref_uuid, ref_data)
-
- # create new props
- for prop_name in new_props.keys():
- self._create_prop(bch, obj_uuid, prop_name, new_props[prop_name])
-
- bch.send()
-
- return (True, '')
- #end _cassandra_network_ipam_update
-
- def _cassandra_network_ipam_list(self, parent_uuids=None, back_ref_uuids=None,
- obj_uuids=None, count=False, filters=None):
- children_fq_names_uuids = []
- if filters:
- fnames = filters.get('field_names', [])
- fvalues = filters.get('field_values', [])
- filter_fields = [(fnames[i], fvalues[i]) for i in range(len(fnames))]
- else:
- filter_fields = []
-
- def filter_rows(coll_infos, filter_cols, filter_params):
- filt_infos = {}
- coll_rows = obj_uuid_cf.multiget(coll_infos.keys(),
- columns=filter_cols,
- column_count=10000000)
- for row in coll_rows:
- # give chance for zk heartbeat/ping
- gevent.sleep(0)
- full_match = True
- for fname, fval in filter_params:
- if coll_rows[row]['prop:%s' %(fname)] != fval:
- full_match = False
- break
- if full_match:
- filt_infos[row] = coll_infos[row]
- return filt_infos
- # end filter_rows
-
- def get_fq_name_uuid_list(obj_uuids):
- ret_list = []
- for obj_uuid in obj_uuids:
- try:
- obj_fq_name = self.uuid_to_fq_name(obj_uuid)
- ret_list.append((obj_fq_name, obj_uuid))
- except cfgm_common.exceptions.NoIdError:
- pass
- return ret_list
- # end get_fq_name_uuid_list
-
- if parent_uuids:
- # go from parent to child
- obj_uuid_cf = self._obj_uuid_cf
- col_start = 'children:network_ipam:'
- col_fin = 'children:network_ipam;'
- try:
- obj_rows = obj_uuid_cf.multiget(parent_uuids,
- column_start = col_start,
- column_finish = col_fin,
- column_count = 10000000,
- include_timestamp = True)
- except pycassa.NotFoundException:
- if count:
- return (True, 0)
- else:
- return (True, children_fq_names_uuids)
-
- def filter_rows_parent_anchor(sort=False):
- # flatten to [('children:<type>:<uuid>', (<val>,<ts>), *]
- all_cols = [cols for obj_key in obj_rows.keys() for cols in obj_rows[obj_key].items()]
- all_child_infos = {}
- for col_name, col_val_ts in all_cols:
- # give chance for zk heartbeat/ping
- gevent.sleep(0)
- child_uuid = col_name.split(':')[2]
- if obj_uuids and child_uuid not in obj_uuids:
- continue
- all_child_infos[child_uuid] = {'uuid': child_uuid, 'tstamp': col_val_ts[1]}
-
- filter_cols = ['prop:%s' %(fname) for fname, _ in filter_fields]
- if filter_cols:
- filt_child_infos = filter_rows(all_child_infos, filter_cols, filter_fields)
- else: # no filter specified
- filt_child_infos = all_child_infos
-
- if not sort:
- ret_child_infos = filt_child_infos.values()
- else:
- ret_child_infos = sorted(filt_child_infos.values(), key=itemgetter('tstamp'))
-
- return get_fq_name_uuid_list(r['uuid'] for r in ret_child_infos)
- # end filter_rows_parent_anchor
-
- if count:
- return (True, len(filter_rows_parent_anchor()))
-
- children_fq_names_uuids = filter_rows_parent_anchor(sort=True)
-
- if back_ref_uuids:
- # go from anchor to backrefs
- obj_uuid_cf = self._obj_uuid_cf
- col_start = 'backref:network_ipam:'
- col_fin = 'backref:network_ipam;'
- try:
- obj_rows = obj_uuid_cf.multiget(back_ref_uuids,
- column_start = col_start,
- column_finish = col_fin,
- column_count = 10000000,
- include_timestamp = True)
- except pycassa.NotFoundException:
- if count:
- return (True, 0)
- else:
- return (True, children_fq_names_uuids)
-
- def filter_rows_backref_anchor():
- # flatten to [('<fqnstr>:<uuid>', (<val>,<ts>), *]
- all_cols = [cols for obj_key in obj_rows.keys() for cols in obj_rows[obj_key].items()]
- all_backref_infos = {}
- for col_name, col_val_ts in all_cols:
- # give chance for zk heartbeat/ping
- gevent.sleep(0)
- col_name_arr = col_name.split(':')
- fq_name = col_name_arr[:-1]
- obj_uuid = col_name_arr[-1]
- if obj_uuids and obj_uuid not in obj_uuids:
- continue
- all_backref_infos[obj_uuid] = \
- {'uuid': obj_uuid, 'fq_name': fq_name, 'tstamp': col_val_ts[1]}
-
- filter_cols = ['prop:%s' %(fname) for fname, _ in filter_fields]
- if filter_cols:
- filt_backref_infos = filter_rows(all_backref_infos, filter_cols, filter_fields)
- else: # no filter specified
- filt_backref_infos = all_backref_infos
-
- return [(br_info['fq_name'], br_info['uuid']) for br_info in filt_backref_infos.values()]
- # end filter_rows_backref_anchor
-
- if count:
- return (True, len(filter_rows_backref_anchor()))
-
- children_fq_names_uuids = filter_rows_backref_anchor()
-
- if not parent_uuids and not back_ref_uuids:
- obj_uuid_cf = self._obj_uuid_cf
- if obj_uuids:
- # exact objects specified
- def filter_rows_object_list():
- all_obj_infos = {}
- for obj_uuid in obj_uuids:
- all_obj_infos[obj_uuid] = None
-
- filter_cols = ['prop:%s' %(fname) for fname, _ in filter_fields]
- if filter_cols:
- filt_obj_infos = filter_rows(all_obj_infos, filter_cols, filter_fields)
- else: # no filters specified
- filt_obj_infos = all_obj_infos
-
- return get_fq_name_uuid_list(filt_obj_infos.keys())
- # end filter_rows_object_list
-
- if count:
- return (True, len(filter_rows_object_list()))
- children_fq_names_uuids = filter_rows_object_list()
-
- else: # grab all resources of this type
- obj_fq_name_cf = self._obj_fq_name_cf
- try:
- cols = obj_fq_name_cf.get('network_ipam', column_count = 10000000)
- except pycassa.NotFoundException:
- if count:
- return (True, 0)
- else:
- return (True, children_fq_names_uuids)
-
- def filter_rows_no_anchor():
- all_obj_infos = {}
- for col_name, col_val in cols.items():
- # give chance for zk heartbeat/ping
- gevent.sleep(0)
- col_name_arr = utils.decode_string(col_name).split(':')
- obj_uuid = col_name_arr[-1]
- all_obj_infos[obj_uuid] = (col_name_arr[:-1], obj_uuid)
-
- filter_cols = ['prop:%s' %(fname) for fname, _ in filter_fields]
- if filter_cols:
- filt_obj_infos = filter_rows(all_obj_infos, filter_cols, filter_fields)
- else: # no filters specified
- filt_obj_infos = all_obj_infos
-
- return filt_obj_infos.values()
- # end filter_rows_no_anchor
-
- if count:
- return (True, len(filter_rows_no_anchor()))
-
- children_fq_names_uuids = filter_rows_no_anchor()
-
- return (True, children_fq_names_uuids)
- #end _cassandra_network_ipam_list
-
- def _cassandra_network_ipam_delete(self, obj_uuid):
- obj_uuid_cf = self._obj_uuid_cf
- fq_name = json.loads(obj_uuid_cf.get(obj_uuid, columns = ['fq_name'])['fq_name'])
- bch = obj_uuid_cf.batch()
-
- # unlink from parent
- col_start = 'parent:'
- col_fin = 'parent;'
- col_name_iter = obj_uuid_cf.xget(obj_uuid, column_start = col_start, column_finish = col_fin)
- for (col_name, col_val) in col_name_iter:
- (_, parent_type, parent_uuid) = col_name.split(':')
- self._delete_child(bch, parent_type, parent_uuid, 'network_ipam', obj_uuid)
-
- # remove refs
- col_start = 'ref:'
- col_fin = 'ref;'
- col_name_iter = obj_uuid_cf.xget(obj_uuid, column_start = col_start, column_finish = col_fin)
- for (col_name, col_val) in col_name_iter:
- (_, ref_type, ref_uuid) = col_name.split(':')
- self._delete_ref(bch, 'network_ipam', obj_uuid, ref_type, ref_uuid)
-
- bch.remove(obj_uuid)
- bch.send()
-
- # Update fqname table
- fq_name_str = ':'.join(fq_name)
- fq_name_col = utils.encode_string(fq_name_str) + ':' + obj_uuid
- self._obj_fq_name_cf.remove('network_ipam', columns = [fq_name_col])
-
-
- return (True, '')
- #end _cassandra_network_ipam_delete
-
- def _cassandra_logical_router_alloc(self, fq_name):
- return (True, '')
- #end _cassandra_logical_router_alloc
-
- def _cassandra_logical_router_create(self, obj_ids, obj_dict):
- # Gather column values for obj and updates to backrefs
- # in a batch and write it at the end
- bch = self._obj_uuid_cf.batch()
-
- obj_cols = {}
- obj_cols['fq_name'] = json.dumps(obj_dict['fq_name'])
- obj_cols['type'] = json.dumps('logical_router')
- if 'parent_type' in obj_dict:
- # non config-root child
- parent_type = obj_dict['parent_type']
- parent_method_type = parent_type.replace('-', '_')
- parent_fq_name = obj_dict['fq_name'][:-1]
- obj_cols['parent_type'] = json.dumps(parent_type)
- parent_uuid = self.fq_name_to_uuid(parent_method_type, parent_fq_name)
- self._create_child(bch, parent_method_type, parent_uuid, 'logical_router', obj_ids['uuid'])
-
- # Properties
- field = obj_dict.get('id_perms', None)
- if field is not None:
- field['created'] = datetime.datetime.utcnow().isoformat()
- field['last_modified'] = field['created']
- self._create_prop(bch, obj_ids['uuid'], 'id_perms', field)
-
- field = obj_dict.get('display_name', None)
- if field is not None:
- self._create_prop(bch, obj_ids['uuid'], 'display_name', field)
-
-
- # References
- refs = obj_dict.get('virtual_machine_interface_refs', [])
- for ref in refs:
- ref_uuid = self.fq_name_to_uuid('virtual_machine_interface', ref['to'])
- ref_attr = ref.get('attr', None)
- ref_data = {'attr': ref_attr, 'is_weakref': False}
- self._create_ref(bch, 'logical_router', obj_ids['uuid'], 'virtual_machine_interface', ref_uuid, ref_data)
- refs = obj_dict.get('route_target_refs', [])
- for ref in refs:
- ref_uuid = self.fq_name_to_uuid('route_target', ref['to'])
- ref_attr = ref.get('attr', None)
- ref_data = {'attr': ref_attr, 'is_weakref': False}
- self._create_ref(bch, 'logical_router', obj_ids['uuid'], 'route_target', ref_uuid, ref_data)
- refs = obj_dict.get('virtual_network_refs', [])
- for ref in refs:
- ref_uuid = self.fq_name_to_uuid('virtual_network', ref['to'])
- ref_attr = ref.get('attr', None)
- ref_data = {'attr': ref_attr, 'is_weakref': False}
- self._create_ref(bch, 'logical_router', obj_ids['uuid'], 'virtual_network', ref_uuid, ref_data)
- refs = obj_dict.get('service_instance_refs', [])
- for ref in refs:
- ref_uuid = self.fq_name_to_uuid('service_instance', ref['to'])
- ref_attr = ref.get('attr', None)
- ref_data = {'attr': ref_attr, 'is_weakref': False}
- self._create_ref(bch, 'logical_router', obj_ids['uuid'], 'service_instance', ref_uuid, ref_data)
-
- bch.insert(obj_ids['uuid'], obj_cols)
- bch.send()
-
- # Update fqname table
- fq_name_str = ':'.join(obj_dict['fq_name'])
- fq_name_cols = {utils.encode_string(fq_name_str) + ':' + obj_ids['uuid']: json.dumps(None)}
- self._obj_fq_name_cf.insert('logical_router', fq_name_cols)
-
- return (True, '')
- #end _cassandra_logical_router_create
-
- def _cassandra_logical_router_read(self, obj_uuids, field_names = None):
- # if field_names = None, all fields will be read/returned
-
- obj_uuid_cf = self._obj_uuid_cf
-
- # optimize for common case of reading non-backref, non-children fields
- # ignoring columns starting from 'b' and 'c' - significant performance
- # impact in scaled setting. e.g. read of project
- if (field_names is None or
- (set(field_names) & (LogicalRouter.backref_fields | LogicalRouter.children_fields))):
- # atleast one backref/children field is needed
- obj_rows = obj_uuid_cf.multiget(obj_uuids,
- column_count = 10000000,
- include_timestamp = True)
- else: # ignore reading backref + children columns
- obj_rows = obj_uuid_cf.multiget(obj_uuids,
- column_start = 'd',
- column_count = 10000000,
- include_timestamp = True)
-
- if (len(obj_uuids) == 1) and not obj_rows:
- raise cfgm_common.exceptions.NoIdError(obj_uuids[0])
-
- results = []
- for row_key in obj_rows:
- # give chance for zk heartbeat/ping
- gevent.sleep(0)
- obj_uuid = row_key
- obj_cols = obj_rows[obj_uuid]
- result = {}
- result['uuid'] = obj_uuid
- result['fq_name'] = json.loads(obj_cols['fq_name'][0])
- for col_name in obj_cols.keys():
- if self._re_match_parent.match(col_name):
- # non config-root child
- (_, _, parent_uuid) = col_name.split(':')
- parent_type = json.loads(obj_cols['parent_type'][0])
- result['parent_type'] = parent_type
- try:
- result['parent_uuid'] = parent_uuid
- result['parent_href'] = self._generate_url(parent_type, parent_uuid)
- except cfgm_common.exceptions.NoIdError:
- err_msg = 'Unknown uuid for parent ' + result['fq_name'][-2]
- return (False, err_msg)
-
- # TODO use compiled RE
- if self._re_match_prop.match(col_name):
- (_, prop_name) = col_name.split(':')
- result[prop_name] = json.loads(obj_cols[col_name][0])
-
- # TODO use compiled RE
- if self._re_match_children.match(col_name):
- (_, child_type, child_uuid) = col_name.split(':')
- if field_names and '%ss' %(child_type) not in field_names:
- continue
-
- child_tstamp = obj_cols[col_name][1]
- try:
- self._read_child(result, obj_uuid, child_type, child_uuid, child_tstamp)
- except cfgm_common.exceptions.NoIdError:
- continue
-
- # TODO use compiled RE
- if self._re_match_ref.match(col_name):
- (_, ref_type, ref_uuid) = col_name.split(':')
- self._read_ref(result, obj_uuid, ref_type, ref_uuid, obj_cols[col_name][0])
-
- if self._re_match_backref.match(col_name):
- (_, back_ref_type, back_ref_uuid) = col_name.split(':')
- if field_names and '%s_back_refs' %(back_ref_type) not in field_names:
- continue
-
- try:
- self._read_back_ref(result, obj_uuid, back_ref_type, back_ref_uuid,
- obj_cols[col_name][0])
- except cfgm_common.exceptions.NoIdError:
- continue
-
- # for all column names
-
-
- results.append(result)
- # end for all rows
-
- return (True, results)
- #end _cassandra_logical_router_read
-
- def _cassandra_logical_router_count_children(self, obj_uuid, child_type):
- # if child_type = None, return
- if child_type is None:
- return (False, '')
-
- obj_uuid_cf = self._obj_uuid_cf
- if child_type not in LogicalRouter.children_fields:
- return (False, '%s is not a valid children of LogicalRouter' %(child_type))
-
- col_start = 'children:'+child_type[:-1]+':'
- col_finish = 'children:'+child_type[:-1]+';'
- num_children = obj_uuid_cf.get_count(obj_uuid,
- column_start = col_start,
- column_finish = col_finish,
- max_count = 10000000)
- return (True, num_children)
- #end _cassandra_logical_router_count_children
-
- def _cassandra_logical_router_update(self, obj_uuid, new_obj_dict):
- # Grab ref-uuids and properties in new version
- new_ref_infos = {}
-
- if 'virtual_machine_interface_refs' in new_obj_dict:
- new_ref_infos['virtual_machine_interface'] = {}
- new_refs = new_obj_dict['virtual_machine_interface_refs']
- if new_refs:
- for new_ref in new_refs:
- new_ref_uuid = self.fq_name_to_uuid('virtual_machine_interface', new_ref['to'])
- new_ref_attr = new_ref.get('attr', None)
- new_ref_data = {'attr': new_ref_attr, 'is_weakref': False}
- new_ref_infos['virtual_machine_interface'][new_ref_uuid] = new_ref_data
-
- if 'route_target_refs' in new_obj_dict:
- new_ref_infos['route_target'] = {}
- new_refs = new_obj_dict['route_target_refs']
- if new_refs:
- for new_ref in new_refs:
- new_ref_uuid = self.fq_name_to_uuid('route_target', new_ref['to'])
- new_ref_attr = new_ref.get('attr', None)
- new_ref_data = {'attr': new_ref_attr, 'is_weakref': False}
- new_ref_infos['route_target'][new_ref_uuid] = new_ref_data
-
- if 'virtual_network_refs' in new_obj_dict:
- new_ref_infos['virtual_network'] = {}
- new_refs = new_obj_dict['virtual_network_refs']
- if new_refs:
- for new_ref in new_refs:
- new_ref_uuid = self.fq_name_to_uuid('virtual_network', new_ref['to'])
- new_ref_attr = new_ref.get('attr', None)
- new_ref_data = {'attr': new_ref_attr, 'is_weakref': False}
- new_ref_infos['virtual_network'][new_ref_uuid] = new_ref_data
-
- if 'service_instance_refs' in new_obj_dict:
- new_ref_infos['service_instance'] = {}
- new_refs = new_obj_dict['service_instance_refs']
- if new_refs:
- for new_ref in new_refs:
- new_ref_uuid = self.fq_name_to_uuid('service_instance', new_ref['to'])
- new_ref_attr = new_ref.get('attr', None)
- new_ref_data = {'attr': new_ref_attr, 'is_weakref': False}
- new_ref_infos['service_instance'][new_ref_uuid] = new_ref_data
-
- new_props = {}
- if 'id_perms' in new_obj_dict:
- new_props['id_perms'] = new_obj_dict['id_perms']
- if 'display_name' in new_obj_dict:
- new_props['display_name'] = new_obj_dict['display_name']
- # Gather column values for obj and updates to backrefs
- # in a batch and write it at the end
- obj_uuid_cf = self._obj_uuid_cf
- obj_cols_iter = obj_uuid_cf.xget(obj_uuid)
- # TODO optimize this (converts tuple to dict)
- obj_cols = {}
- for col_info in obj_cols_iter:
- obj_cols[col_info[0]] = col_info[1]
-
- bch = obj_uuid_cf.batch()
- for col_name in obj_cols.keys():
- # TODO use compiled RE
- if re.match('prop:', col_name):
- (_, prop_name) = col_name.split(':')
- if prop_name == 'id_perms':
- # id-perms always has to be updated for last-mod timestamp
- # get it from request dict(or from db if not in request dict)
- new_id_perms = new_obj_dict.get(prop_name, json.loads(obj_cols[col_name]))
- self.update_last_modified(bch, obj_uuid, new_id_perms)
- elif prop_name in new_obj_dict:
- self._update_prop(bch, obj_uuid, prop_name, new_props)
-
- # TODO use compiled RE
- if re.match('ref:', col_name):
- (_, ref_type, ref_uuid) = col_name.split(':')
- self._update_ref(bch, 'logical_router', obj_uuid, ref_type, ref_uuid, new_ref_infos)
- # for all column names
-
- # create new refs
- for ref_type in new_ref_infos.keys():
- for ref_uuid in new_ref_infos[ref_type].keys():
- ref_data = new_ref_infos[ref_type][ref_uuid]
- self._create_ref(bch, 'logical_router', obj_uuid, ref_type, ref_uuid, ref_data)
-
- # create new props
- for prop_name in new_props.keys():
- self._create_prop(bch, obj_uuid, prop_name, new_props[prop_name])
-
- bch.send()
-
- return (True, '')
- #end _cassandra_logical_router_update
-
- def _cassandra_logical_router_list(self, parent_uuids=None, back_ref_uuids=None,
- obj_uuids=None, count=False, filters=None):
- children_fq_names_uuids = []
- if filters:
- fnames = filters.get('field_names', [])
- fvalues = filters.get('field_values', [])
- filter_fields = [(fnames[i], fvalues[i]) for i in range(len(fnames))]
- else:
- filter_fields = []
-
- def filter_rows(coll_infos, filter_cols, filter_params):
- filt_infos = {}
- coll_rows = obj_uuid_cf.multiget(coll_infos.keys(),
- columns=filter_cols,
- column_count=10000000)
- for row in coll_rows:
- # give chance for zk heartbeat/ping
- gevent.sleep(0)
- full_match = True
- for fname, fval in filter_params:
- if coll_rows[row]['prop:%s' %(fname)] != fval:
- full_match = False
- break
- if full_match:
- filt_infos[row] = coll_infos[row]
- return filt_infos
- # end filter_rows
-
- def get_fq_name_uuid_list(obj_uuids):
- ret_list = []
- for obj_uuid in obj_uuids:
- try:
- obj_fq_name = self.uuid_to_fq_name(obj_uuid)
- ret_list.append((obj_fq_name, obj_uuid))
- except cfgm_common.exceptions.NoIdError:
- pass
- return ret_list
- # end get_fq_name_uuid_list
-
- if parent_uuids:
- # go from parent to child
- obj_uuid_cf = self._obj_uuid_cf
- col_start = 'children:logical_router:'
- col_fin = 'children:logical_router;'
- try:
- obj_rows = obj_uuid_cf.multiget(parent_uuids,
- column_start = col_start,
- column_finish = col_fin,
- column_count = 10000000,
- include_timestamp = True)
- except pycassa.NotFoundException:
- if count:
- return (True, 0)
- else:
- return (True, children_fq_names_uuids)
-
- def filter_rows_parent_anchor(sort=False):
- # flatten to [('children:<type>:<uuid>', (<val>,<ts>), *]
- all_cols = [cols for obj_key in obj_rows.keys() for cols in obj_rows[obj_key].items()]
- all_child_infos = {}
- for col_name, col_val_ts in all_cols:
- # give chance for zk heartbeat/ping
- gevent.sleep(0)
- child_uuid = col_name.split(':')[2]
- if obj_uuids and child_uuid not in obj_uuids:
- continue
- all_child_infos[child_uuid] = {'uuid': child_uuid, 'tstamp': col_val_ts[1]}
-
- filter_cols = ['prop:%s' %(fname) for fname, _ in filter_fields]
- if filter_cols:
- filt_child_infos = filter_rows(all_child_infos, filter_cols, filter_fields)
- else: # no filter specified
- filt_child_infos = all_child_infos
-
- if not sort:
- ret_child_infos = filt_child_infos.values()
- else:
- ret_child_infos = sorted(filt_child_infos.values(), key=itemgetter('tstamp'))
-
- return get_fq_name_uuid_list(r['uuid'] for r in ret_child_infos)
- # end filter_rows_parent_anchor
-
- if count:
- return (True, len(filter_rows_parent_anchor()))
-
- children_fq_names_uuids = filter_rows_parent_anchor(sort=True)
-
- if back_ref_uuids:
- # go from anchor to backrefs
- obj_uuid_cf = self._obj_uuid_cf
- col_start = 'backref:logical_router:'
- col_fin = 'backref:logical_router;'
- try:
- obj_rows = obj_uuid_cf.multiget(back_ref_uuids,
- column_start = col_start,
- column_finish = col_fin,
- column_count = 10000000,
- include_timestamp = True)
- except pycassa.NotFoundException:
- if count:
- return (True, 0)
- else:
- return (True, children_fq_names_uuids)
-
- def filter_rows_backref_anchor():
- # flatten to [('<fqnstr>:<uuid>', (<val>,<ts>), *]
- all_cols = [cols for obj_key in obj_rows.keys() for cols in obj_rows[obj_key].items()]
- all_backref_infos = {}
- for col_name, col_val_ts in all_cols:
- # give chance for zk heartbeat/ping
- gevent.sleep(0)
- col_name_arr = col_name.split(':')
- fq_name = col_name_arr[:-1]
- obj_uuid = col_name_arr[-1]
- if obj_uuids and obj_uuid not in obj_uuids:
- continue
- all_backref_infos[obj_uuid] = \
- {'uuid': obj_uuid, 'fq_name': fq_name, 'tstamp': col_val_ts[1]}
-
- filter_cols = ['prop:%s' %(fname) for fname, _ in filter_fields]
- if filter_cols:
- filt_backref_infos = filter_rows(all_backref_infos, filter_cols, filter_fields)
- else: # no filter specified
- filt_backref_infos = all_backref_infos
-
- return [(br_info['fq_name'], br_info['uuid']) for br_info in filt_backref_infos.values()]
- # end filter_rows_backref_anchor
-
- if count:
- return (True, len(filter_rows_backref_anchor()))
-
- children_fq_names_uuids = filter_rows_backref_anchor()
-
- if not parent_uuids and not back_ref_uuids:
- obj_uuid_cf = self._obj_uuid_cf
- if obj_uuids:
- # exact objects specified
- def filter_rows_object_list():
- all_obj_infos = {}
- for obj_uuid in obj_uuids:
- all_obj_infos[obj_uuid] = None
-
- filter_cols = ['prop:%s' %(fname) for fname, _ in filter_fields]
- if filter_cols:
- filt_obj_infos = filter_rows(all_obj_infos, filter_cols, filter_fields)
- else: # no filters specified
- filt_obj_infos = all_obj_infos
-
- return get_fq_name_uuid_list(filt_obj_infos.keys())
- # end filter_rows_object_list
-
- if count:
- return (True, len(filter_rows_object_list()))
- children_fq_names_uuids = filter_rows_object_list()
-
- else: # grab all resources of this type
- obj_fq_name_cf = self._obj_fq_name_cf
- try:
- cols = obj_fq_name_cf.get('logical_router', column_count = 10000000)
- except pycassa.NotFoundException:
- if count:
- return (True, 0)
- else:
- return (True, children_fq_names_uuids)
-
- def filter_rows_no_anchor():
- all_obj_infos = {}
- for col_name, col_val in cols.items():
- # give chance for zk heartbeat/ping
- gevent.sleep(0)
- col_name_arr = utils.decode_string(col_name).split(':')
- obj_uuid = col_name_arr[-1]
- all_obj_infos[obj_uuid] = (col_name_arr[:-1], obj_uuid)
-
- filter_cols = ['prop:%s' %(fname) for fname, _ in filter_fields]
- if filter_cols:
- filt_obj_infos = filter_rows(all_obj_infos, filter_cols, filter_fields)
- else: # no filters specified
- filt_obj_infos = all_obj_infos
-
- return filt_obj_infos.values()
- # end filter_rows_no_anchor
-
- if count:
- return (True, len(filter_rows_no_anchor()))
-
- children_fq_names_uuids = filter_rows_no_anchor()
-
- return (True, children_fq_names_uuids)
- #end _cassandra_logical_router_list
-
- def _cassandra_logical_router_delete(self, obj_uuid):
- obj_uuid_cf = self._obj_uuid_cf
- fq_name = json.loads(obj_uuid_cf.get(obj_uuid, columns = ['fq_name'])['fq_name'])
- bch = obj_uuid_cf.batch()
-
- # unlink from parent
- col_start = 'parent:'
- col_fin = 'parent;'
- col_name_iter = obj_uuid_cf.xget(obj_uuid, column_start = col_start, column_finish = col_fin)
- for (col_name, col_val) in col_name_iter:
- (_, parent_type, parent_uuid) = col_name.split(':')
- self._delete_child(bch, parent_type, parent_uuid, 'logical_router', obj_uuid)
-
- # remove refs
- col_start = 'ref:'
- col_fin = 'ref;'
- col_name_iter = obj_uuid_cf.xget(obj_uuid, column_start = col_start, column_finish = col_fin)
- for (col_name, col_val) in col_name_iter:
- (_, ref_type, ref_uuid) = col_name.split(':')
- self._delete_ref(bch, 'logical_router', obj_uuid, ref_type, ref_uuid)
-
- bch.remove(obj_uuid)
- bch.send()
-
- # Update fqname table
- fq_name_str = ':'.join(fq_name)
- fq_name_col = utils.encode_string(fq_name_str) + ':' + obj_uuid
- self._obj_fq_name_cf.remove('logical_router', columns = [fq_name_col])
-
-
- return (True, '')
- #end _cassandra_logical_router_delete
-
diff --git a/Testcases/vnc_api/gen/vnc_cassandra_client_gen.pyc b/Testcases/vnc_api/gen/vnc_cassandra_client_gen.pyc
deleted file mode 100644
index fb5b140..0000000
--- a/Testcases/vnc_api/gen/vnc_cassandra_client_gen.pyc
+++ /dev/null
Binary files differ
diff --git a/Testcases/vnc_api/gen/vnc_ifmap_client_gen.py b/Testcases/vnc_api/gen/vnc_ifmap_client_gen.py
deleted file mode 100644
index c07130d..0000000
--- a/Testcases/vnc_api/gen/vnc_ifmap_client_gen.py
+++ /dev/null
@@ -1,10533 +0,0 @@
-
-# AUTO-GENERATED file from IFMapApiGenerator. Do Not Edit!
-
-import re
-import json
-import cStringIO
-from lxml import etree
-
-from cfgm_common.ifmap.client import client, namespaces
-from cfgm_common.ifmap.request import NewSessionRequest, RenewSessionRequest
-from cfgm_common.ifmap.request import EndSessionRequest, PublishRequest
-from cfgm_common.ifmap.request import SearchRequest, SubscribeRequest, PurgeRequest, PollRequest
-from cfgm_common.ifmap.id import IPAddress, MACAddress, Device, AccessRequest, Identity, CustomIdentity
-from cfgm_common.ifmap.operations import PublishUpdateOperation, PublishNotifyOperation
-from cfgm_common.ifmap.operations import PublishDeleteOperation, SubscribeUpdateOperation, SubscribeDeleteOperation
-from cfgm_common.ifmap.util import attr, link_ids
-from cfgm_common.ifmap.response import Response, newSessionResult
-from cfgm_common.ifmap.metadata import Metadata
-
-import cfgm_common.imid
-import cfgm_common.exceptions
-from cfgm_common.imid import escape
-from resource_xsd import *
-
-class VncIfmapClientGen(object):
- def __init__(self):
- self._parent_metas = {}
- self._parent_metas['domain'] = {}
- self._parent_metas['domain']['project'] = 'domain-project'
- self._parent_metas['domain']['namespace'] = 'domain-namespace'
- self._parent_metas['domain']['service-template'] = 'domain-service-template'
- self._parent_metas['domain']['virtual-DNS'] = 'domain-virtual-DNS'
- self._parent_metas['global-vrouter-config'] = {}
- self._parent_metas['instance-ip'] = {}
- self._parent_metas['network-policy'] = {}
- self._parent_metas['loadbalancer-pool'] = {}
- self._parent_metas['loadbalancer-pool']['loadbalancer-member'] = 'loadbalancer-pool-loadbalancer-member'
- self._parent_metas['virtual-DNS-record'] = {}
- self._parent_metas['route-target'] = {}
- self._parent_metas['floating-ip'] = {}
- self._parent_metas['floating-ip-pool'] = {}
- self._parent_metas['floating-ip-pool']['floating-ip'] = 'floating-ip-pool-floating-ip'
- self._parent_metas['physical-router'] = {}
- self._parent_metas['physical-router']['physical-interface'] = 'physical-router-physical-interface'
- self._parent_metas['physical-router']['logical-interface'] = 'physical-router-logical-interface'
- self._parent_metas['bgp-router'] = {}
- self._parent_metas['virtual-router'] = {}
- self._parent_metas['config-root'] = {}
- self._parent_metas['config-root']['global-system-config'] = 'config-root-global-system-config'
- self._parent_metas['config-root']['domain'] = 'config-root-domain'
- self._parent_metas['subnet'] = {}
- self._parent_metas['global-system-config'] = {}
- self._parent_metas['global-system-config']['global-vrouter-config'] = 'global-system-config-global-vrouter-config'
- self._parent_metas['global-system-config']['physical-router'] = 'global-system-config-physical-router'
- self._parent_metas['global-system-config']['virtual-router'] = 'global-system-config-virtual-router'
- self._parent_metas['global-system-config']['config-node'] = 'global-system-config-config-node'
- self._parent_metas['global-system-config']['analytics-node'] = 'global-system-config-analytics-node'
- self._parent_metas['global-system-config']['database-node'] = 'global-system-config-database-node'
- self._parent_metas['global-system-config']['service-appliance-set'] = 'global-system-config-service-appliance-set'
- self._parent_metas['service-appliance'] = {}
- self._parent_metas['service-instance'] = {}
- self._parent_metas['namespace'] = {}
- self._parent_metas['logical-interface'] = {}
- self._parent_metas['route-table'] = {}
- self._parent_metas['physical-interface'] = {}
- self._parent_metas['physical-interface']['logical-interface'] = 'physical-interface-logical-interface'
- self._parent_metas['access-control-list'] = {}
- self._parent_metas['analytics-node'] = {}
- self._parent_metas['virtual-DNS'] = {}
- self._parent_metas['virtual-DNS']['virtual-DNS-record'] = 'virtual-DNS-virtual-DNS-record'
- self._parent_metas['customer-attachment'] = {}
- self._parent_metas['service-appliance-set'] = {}
- self._parent_metas['service-appliance-set']['service-appliance'] = 'service-appliance-set-service-appliance'
- self._parent_metas['config-node'] = {}
- self._parent_metas['qos-queue'] = {}
- self._parent_metas['virtual-machine'] = {}
- self._parent_metas['virtual-machine']['virtual-machine-interface'] = 'virtual-machine-virtual-machine-interface'
- self._parent_metas['interface-route-table'] = {}
- self._parent_metas['service-template'] = {}
- self._parent_metas['virtual-ip'] = {}
- self._parent_metas['loadbalancer-member'] = {}
- self._parent_metas['security-group'] = {}
- self._parent_metas['security-group']['access-control-list'] = 'security-group-access-control-list'
- self._parent_metas['provider-attachment'] = {}
- self._parent_metas['virtual-machine-interface'] = {}
- self._parent_metas['loadbalancer-healthmonitor'] = {}
- self._parent_metas['virtual-network'] = {}
- self._parent_metas['virtual-network']['access-control-list'] = 'virtual-network-access-control-list'
- self._parent_metas['virtual-network']['floating-ip-pool'] = 'virtual-network-floating-ip-pool'
- self._parent_metas['virtual-network']['routing-instance'] = 'virtual-network-routing-instance'
- self._parent_metas['project'] = {}
- self._parent_metas['project']['security-group'] = 'project-security-group'
- self._parent_metas['project']['virtual-network'] = 'project-virtual-network'
- self._parent_metas['project']['qos-queue'] = 'project-qos-queue'
- self._parent_metas['project']['qos-forwarding-class'] = 'project-qos-forwarding-class'
- self._parent_metas['project']['network-ipam'] = 'project-network-ipam'
- self._parent_metas['project']['network-policy'] = 'project-network-policy'
- self._parent_metas['project']['virtual-machine-interface'] = 'project-virtual-machine-interface'
- self._parent_metas['project']['service-instance'] = 'project-service-instance'
- self._parent_metas['project']['route-table'] = 'project-route-table'
- self._parent_metas['project']['interface-route-table'] = 'project-interface-route-table'
- self._parent_metas['project']['logical-router'] = 'project-logical-router'
- self._parent_metas['project']['loadbalancer-pool'] = 'project-loadbalancer-pool'
- self._parent_metas['project']['loadbalancer-healthmonitor'] = 'project-loadbalancer-healthmonitor'
- self._parent_metas['project']['virtual-ip'] = 'project-virtual-ip'
- self._parent_metas['qos-forwarding-class'] = {}
- self._parent_metas['database-node'] = {}
- self._parent_metas['routing-instance'] = {}
- self._parent_metas['routing-instance']['bgp-router'] = 'instance-bgp-router'
- self._parent_metas['network-ipam'] = {}
- self._parent_metas['logical-router'] = {}
- #end __init__
-
- def _ifmap_domain_alloc(self, parent_type, fq_name):
- imid = self._imid_handler
- (my_imid, parent_imid) = \
- imid.domain_alloc_ifmap_id(parent_type, fq_name)
- if my_imid is None or parent_imid is None:
- return (False, (my_imid, parent_imid))
- return (True, (my_imid, parent_imid))
- #end _ifmap_domain_alloc
-
- def _ifmap_domain_set(self, my_imid, existing_metas, obj_dict):
- # Properties Meta
- update = {}
- field = obj_dict.get('domain_limits', None)
- if field is not None:
- # construct object of xsd-type and get its xml repr
- field = obj_dict['domain_limits']
- buf = cStringIO.StringIO()
- # perms might be inserted at server as obj.
- # obj construction diff from dict construction.
- if isinstance(field, dict):
- DomainLimitsType(**field).exportChildren(buf, level = 1, name_ = 'domain-limits', pretty_print = False)
- else: # object
- field.exportChildren(buf, level = 1, name_ = 'domain-limits', pretty_print = False)
- domain_limits_xml = buf.getvalue()
- buf.close()
- meta = Metadata('domain-limits' , '',
- {'ifmap-cardinality':'singleValue'}, ns_prefix = 'contrail',
- elements = domain_limits_xml)
-
- if (existing_metas and 'domain-limits' in existing_metas and
- str(existing_metas['domain-limits'][0]['meta']) == str(meta)):
- # no change
- pass
- else:
- self._update_id_self_meta(update, meta)
-
- field = obj_dict.get('api_access_list', None)
- if field is not None:
- # construct object of xsd-type and get its xml repr
- field = obj_dict['api_access_list']
- buf = cStringIO.StringIO()
- # perms might be inserted at server as obj.
- # obj construction diff from dict construction.
- if isinstance(field, dict):
- ApiAccessListType(**field).exportChildren(buf, level = 1, name_ = 'api-access-list', pretty_print = False)
- else: # object
- field.exportChildren(buf, level = 1, name_ = 'api-access-list', pretty_print = False)
- api_access_list_xml = buf.getvalue()
- buf.close()
- meta = Metadata('api-access-list' , '',
- {'ifmap-cardinality':'singleValue'}, ns_prefix = 'contrail',
- elements = api_access_list_xml)
-
- if (existing_metas and 'api-access-list' in existing_metas and
- str(existing_metas['api-access-list'][0]['meta']) == str(meta)):
- # no change
- pass
- else:
- self._update_id_self_meta(update, meta)
-
- field = obj_dict.get('id_perms', None)
- if field is not None:
- # construct object of xsd-type and get its xml repr
- field = obj_dict['id_perms']
- buf = cStringIO.StringIO()
- # perms might be inserted at server as obj.
- # obj construction diff from dict construction.
- if isinstance(field, dict):
- IdPermsType(**field).exportChildren(buf, level = 1, name_ = 'id-perms', pretty_print = False)
- else: # object
- field.exportChildren(buf, level = 1, name_ = 'id-perms', pretty_print = False)
- id_perms_xml = buf.getvalue()
- buf.close()
- meta = Metadata('id-perms' , '',
- {'ifmap-cardinality':'singleValue'}, ns_prefix = 'contrail',
- elements = id_perms_xml)
-
- if (existing_metas and 'id-perms' in existing_metas and
- str(existing_metas['id-perms'][0]['meta']) == str(meta)):
- # no change
- pass
- else:
- self._update_id_self_meta(update, meta)
-
- field = obj_dict.get('display_name', None)
- if field is not None:
- norm_str = escape(str(obj_dict['display_name']))
- meta = Metadata('display-name', norm_str,
- {'ifmap-cardinality':'singleValue'}, ns_prefix = 'contrail')
-
- if (existing_metas and 'display-name' in existing_metas and
- str(existing_metas['display-name'][0]['meta']) == str(meta)):
- # no change
- pass
- else:
- self._update_id_self_meta(update, meta)
-
- # Ref Link Metas
- imid = self._imid_handler
-
- # construct object of xsd-type and get its xml repr
- refs = obj_dict.get('project_refs', None)
- if refs:
- for ref in refs:
- ref_fq_name = ref['to']
- obj_type = 'project'
- to_imid = cfgm_common.imid.get_ifmap_id_from_fq_name(obj_type, ref_fq_name)
- domain_project_xml = ''
- meta = Metadata('domain-project' , '',
- {'ifmap-cardinality':'singleValue'}, ns_prefix = 'contrail',
- elements = domain_project_xml)
- self._update_id_pair_meta(update, to_imid, meta)
-
- # construct object of xsd-type and get its xml repr
- refs = obj_dict.get('namespace_refs', None)
- if refs:
- for ref in refs:
- ref_fq_name = ref['to']
- obj_type = 'namespace'
- to_imid = cfgm_common.imid.get_ifmap_id_from_fq_name(obj_type, ref_fq_name)
- domain_namespace_xml = ''
- meta = Metadata('domain-namespace' , '',
- {'ifmap-cardinality':'singleValue'}, ns_prefix = 'contrail',
- elements = domain_namespace_xml)
- self._update_id_pair_meta(update, to_imid, meta)
-
- # construct object of xsd-type and get its xml repr
- refs = obj_dict.get('service_template_refs', None)
- if refs:
- for ref in refs:
- ref_fq_name = ref['to']
- obj_type = 'service-template'
- to_imid = cfgm_common.imid.get_ifmap_id_from_fq_name(obj_type, ref_fq_name)
- domain_service_template_xml = ''
- meta = Metadata('domain-service-template' , '',
- {'ifmap-cardinality':'singleValue'}, ns_prefix = 'contrail',
- elements = domain_service_template_xml)
- self._update_id_pair_meta(update, to_imid, meta)
-
- # construct object of xsd-type and get its xml repr
- refs = obj_dict.get('virtual_DNS_refs', None)
- if refs:
- for ref in refs:
- ref_fq_name = ref['to']
- obj_type = 'virtual-DNS'
- to_imid = cfgm_common.imid.get_ifmap_id_from_fq_name(obj_type, ref_fq_name)
- domain_virtual_DNS_xml = ''
- meta = Metadata('domain-virtual-DNS' , '',
- {'ifmap-cardinality':'singleValue'}, ns_prefix = 'contrail',
- elements = domain_virtual_DNS_xml)
- self._update_id_pair_meta(update, to_imid, meta)
-
-
- self._publish_update(my_imid, update)
- return (True, '')
- #end _ifmap_domain_set
-
- def _ifmap_domain_create(self, obj_ids, obj_dict):
- if not 'parent_type' in obj_dict:
- # parent is config-root
- parent_type = 'config-root'
- parent_imid = 'contrail:config-root:root'
- else:
- parent_type = obj_dict['parent_type']
- parent_imid = obj_ids.get('parent_imid', None)
-
- # Parent Link Meta
- update = {}
- parent_link_meta = self._parent_metas[parent_type]['domain']
- meta = Metadata(parent_link_meta, '',
- {'ifmap-cardinality':'singleValue'}, ns_prefix = 'contrail')
- self._update_id_pair_meta(update, obj_ids['imid'], meta)
- self._publish_update(parent_imid, update)
-
- (ok, result) = self._ifmap_domain_set(obj_ids['imid'], None, obj_dict)
- return (ok, result)
- #end _ifmap_domain_create
-
-
- def _ifmap_domain_read_to_meta_index(self, ifmap_id, field_names = None):
- # field_names = None means all fields will be read
- imid = self._imid_handler
- start_id = str(Identity(name = ifmap_id, type = 'other',
- other_type = 'extended'))
- # if id-perms missing, identity doesn't exist
-
- all_metas = [u'domain-limits', u'api-access-list', u'id-perms', u'display-name', u'domain-project', u'domain-namespace', u'domain-service-template', u'domain-virtual-DNS']
- if not field_names:
- metas_to_read = all_metas
- else: # read only requested fields
- metas_to_read = set(all_metas) & set(field_names.keys())
-
- # metas is a dict where key is meta-name and val is list of dict
- # of form [{'meta':meta}, {'id':id1, 'meta':meta}, {'id':id2, 'meta':meta}]
- metas = {}
- for meta_name in metas_to_read:
- if meta_name in self._id_to_metas[ifmap_id]:
- metas[meta_name] = self._id_to_metas[ifmap_id][meta_name]
- return metas
- #end _ifmap_domain_read_to_meta_index
-
- def _ifmap_domain_update(self, ifmap_id, new_obj_dict):
- # read in refs from ifmap to determine which ones become inactive after update
- existing_metas = self._ifmap_domain_read_to_meta_index(ifmap_id)
-
- # remove properties that are no longer active
- props = ['domain-limits', 'api-access-list', 'id-perms', 'display-name']
- for prop in props:
- prop_m = prop.replace('-', '_')
- if prop in existing_metas and prop_m not in new_obj_dict:
- self._delete_id_self_meta(ifmap_id, 'contrail:'+prop)
- # remove refs that are no longer active
- delete_list = []
- refs = {}
- for meta, to_name in refs.items():
- old_set = set([m['id'] for m in existing_metas.get(meta, [])])
- new_set = set()
- to_name_m = to_name.replace('-', '_')
- for ref in new_obj_dict.get(to_name_m+'_refs', []):
- to_imid = self.fq_name_to_ifmap_id(to_name, ref['to'])
- new_set.add(to_imid)
-
- for inact_ref in old_set - new_set:
- delete_list.append((inact_ref, 'contrail:'+meta))
-
- if delete_list:
- self._delete_id_pair_meta_list(ifmap_id, delete_list)
-
- (ok, result) = self._ifmap_domain_set(ifmap_id, existing_metas, new_obj_dict)
- return (ok, result)
- #end _ifmap_domain_update
-
- def _ifmap_domain_delete(self, obj_ids):
- ifmap_id = obj_ids['imid']
- parent_imid = obj_ids.get('parent_imid', None)
- existing_metas = self._ifmap_domain_read_to_meta_index(ifmap_id)
- meta_list = []
- for meta_name, meta_infos in existing_metas.items():
- for meta_info in meta_infos:
- ref_imid = meta_info.get('id')
- if ref_imid is None:
- continue
- meta_list.append((ref_imid, 'contrail:'+meta_name))
-
- if parent_imid:
- # Remove link from parent
- meta_list.append((parent_imid, None))
-
- if meta_list:
- self._delete_id_pair_meta_list(ifmap_id, meta_list)
-
- # Remove all property metadata associated with this ident
- self._delete_id_self_meta(ifmap_id, None)
-
- return (True, '')
- #end _ifmap_domain_delete
-
- def _ifmap_global_vrouter_config_alloc(self, parent_type, fq_name):
- imid = self._imid_handler
- (my_imid, parent_imid) = \
- imid.global_vrouter_config_alloc_ifmap_id(parent_type, fq_name)
- if my_imid is None or parent_imid is None:
- return (False, (my_imid, parent_imid))
- return (True, (my_imid, parent_imid))
- #end _ifmap_global_vrouter_config_alloc
-
- def _ifmap_global_vrouter_config_set(self, my_imid, existing_metas, obj_dict):
- # Properties Meta
- update = {}
- field = obj_dict.get('linklocal_services', None)
- if field is not None:
- # construct object of xsd-type and get its xml repr
- field = obj_dict['linklocal_services']
- buf = cStringIO.StringIO()
- # perms might be inserted at server as obj.
- # obj construction diff from dict construction.
- if isinstance(field, dict):
- LinklocalServicesTypes(**field).exportChildren(buf, level = 1, name_ = 'linklocal-services', pretty_print = False)
- else: # object
- field.exportChildren(buf, level = 1, name_ = 'linklocal-services', pretty_print = False)
- linklocal_services_xml = buf.getvalue()
- buf.close()
- meta = Metadata('linklocal-services' , '',
- {'ifmap-cardinality':'singleValue'}, ns_prefix = 'contrail',
- elements = linklocal_services_xml)
-
- if (existing_metas and 'linklocal-services' in existing_metas and
- str(existing_metas['linklocal-services'][0]['meta']) == str(meta)):
- # no change
- pass
- else:
- self._update_id_self_meta(update, meta)
-
- field = obj_dict.get('encapsulation_priorities', None)
- if field is not None:
- # construct object of xsd-type and get its xml repr
- field = obj_dict['encapsulation_priorities']
- buf = cStringIO.StringIO()
- # perms might be inserted at server as obj.
- # obj construction diff from dict construction.
- if isinstance(field, dict):
- EncapsulationPrioritiesType(**field).exportChildren(buf, level = 1, name_ = 'encapsulation-priorities', pretty_print = False)
- else: # object
- field.exportChildren(buf, level = 1, name_ = 'encapsulation-priorities', pretty_print = False)
- encapsulation_priorities_xml = buf.getvalue()
- buf.close()
- meta = Metadata('encapsulation-priorities' , '',
- {'ifmap-cardinality':'singleValue'}, ns_prefix = 'contrail',
- elements = encapsulation_priorities_xml)
-
- if (existing_metas and 'encapsulation-priorities' in existing_metas and
- str(existing_metas['encapsulation-priorities'][0]['meta']) == str(meta)):
- # no change
- pass
- else:
- self._update_id_self_meta(update, meta)
-
- field = obj_dict.get('vxlan_network_identifier_mode', None)
- if field is not None:
- norm_str = escape(str(obj_dict['vxlan_network_identifier_mode']))
- meta = Metadata('vxlan-network-identifier-mode', norm_str,
- {'ifmap-cardinality':'singleValue'}, ns_prefix = 'contrail')
-
- if (existing_metas and 'vxlan-network-identifier-mode' in existing_metas and
- str(existing_metas['vxlan-network-identifier-mode'][0]['meta']) == str(meta)):
- # no change
- pass
- else:
- self._update_id_self_meta(update, meta)
-
- field = obj_dict.get('id_perms', None)
- if field is not None:
- # construct object of xsd-type and get its xml repr
- field = obj_dict['id_perms']
- buf = cStringIO.StringIO()
- # perms might be inserted at server as obj.
- # obj construction diff from dict construction.
- if isinstance(field, dict):
- IdPermsType(**field).exportChildren(buf, level = 1, name_ = 'id-perms', pretty_print = False)
- else: # object
- field.exportChildren(buf, level = 1, name_ = 'id-perms', pretty_print = False)
- id_perms_xml = buf.getvalue()
- buf.close()
- meta = Metadata('id-perms' , '',
- {'ifmap-cardinality':'singleValue'}, ns_prefix = 'contrail',
- elements = id_perms_xml)
-
- if (existing_metas and 'id-perms' in existing_metas and
- str(existing_metas['id-perms'][0]['meta']) == str(meta)):
- # no change
- pass
- else:
- self._update_id_self_meta(update, meta)
-
- field = obj_dict.get('display_name', None)
- if field is not None:
- norm_str = escape(str(obj_dict['display_name']))
- meta = Metadata('display-name', norm_str,
- {'ifmap-cardinality':'singleValue'}, ns_prefix = 'contrail')
-
- if (existing_metas and 'display-name' in existing_metas and
- str(existing_metas['display-name'][0]['meta']) == str(meta)):
- # no change
- pass
- else:
- self._update_id_self_meta(update, meta)
-
- # Ref Link Metas
- imid = self._imid_handler
-
-
- self._publish_update(my_imid, update)
- return (True, '')
- #end _ifmap_global_vrouter_config_set
-
- def _ifmap_global_vrouter_config_create(self, obj_ids, obj_dict):
- if not 'parent_type' in obj_dict:
- # parent is config-root
- parent_type = 'config-root'
- parent_imid = 'contrail:config-root:root'
- else:
- parent_type = obj_dict['parent_type']
- parent_imid = obj_ids.get('parent_imid', None)
-
- # Parent Link Meta
- update = {}
- parent_link_meta = self._parent_metas[parent_type]['global-vrouter-config']
- meta = Metadata(parent_link_meta, '',
- {'ifmap-cardinality':'singleValue'}, ns_prefix = 'contrail')
- self._update_id_pair_meta(update, obj_ids['imid'], meta)
- self._publish_update(parent_imid, update)
-
- (ok, result) = self._ifmap_global_vrouter_config_set(obj_ids['imid'], None, obj_dict)
- return (ok, result)
- #end _ifmap_global_vrouter_config_create
-
-
- def _ifmap_global_vrouter_config_read_to_meta_index(self, ifmap_id, field_names = None):
- # field_names = None means all fields will be read
- imid = self._imid_handler
- start_id = str(Identity(name = ifmap_id, type = 'other',
- other_type = 'extended'))
- # if id-perms missing, identity doesn't exist
-
- all_metas = [u'linklocal-services', u'encapsulation-priorities', u'vxlan-network-identifier-mode', u'id-perms', u'display-name']
- if not field_names:
- metas_to_read = all_metas
- else: # read only requested fields
- metas_to_read = set(all_metas) & set(field_names.keys())
-
- # metas is a dict where key is meta-name and val is list of dict
- # of form [{'meta':meta}, {'id':id1, 'meta':meta}, {'id':id2, 'meta':meta}]
- metas = {}
- for meta_name in metas_to_read:
- if meta_name in self._id_to_metas[ifmap_id]:
- metas[meta_name] = self._id_to_metas[ifmap_id][meta_name]
- return metas
- #end _ifmap_global-vrouter-config_read_to_meta_index
-
- def _ifmap_global_vrouter_config_update(self, ifmap_id, new_obj_dict):
- # read in refs from ifmap to determine which ones become inactive after update
- existing_metas = self._ifmap_global_vrouter_config_read_to_meta_index(ifmap_id)
-
- # remove properties that are no longer active
- props = ['linklocal-services', 'encapsulation-priorities', 'vxlan-network-identifier-mode', 'id-perms', 'display-name']
- for prop in props:
- prop_m = prop.replace('-', '_')
- if prop in existing_metas and prop_m not in new_obj_dict:
- self._delete_id_self_meta(ifmap_id, 'contrail:'+prop)
- # remove refs that are no longer active
- delete_list = []
- refs = {}
- for meta, to_name in refs.items():
- old_set = set([m['id'] for m in existing_metas.get(meta, [])])
- new_set = set()
- to_name_m = to_name.replace('-', '_')
- for ref in new_obj_dict.get(to_name_m+'_refs', []):
- to_imid = self.fq_name_to_ifmap_id(to_name, ref['to'])
- new_set.add(to_imid)
-
- for inact_ref in old_set - new_set:
- delete_list.append((inact_ref, 'contrail:'+meta))
-
- if delete_list:
- self._delete_id_pair_meta_list(ifmap_id, delete_list)
-
- (ok, result) = self._ifmap_global_vrouter_config_set(ifmap_id, existing_metas, new_obj_dict)
- return (ok, result)
- #end _ifmap_global_vrouter_config_update
-
- def _ifmap_global_vrouter_config_delete(self, obj_ids):
- ifmap_id = obj_ids['imid']
- parent_imid = obj_ids.get('parent_imid', None)
- existing_metas = self._ifmap_global_vrouter_config_read_to_meta_index(ifmap_id)
- meta_list = []
- for meta_name, meta_infos in existing_metas.items():
- for meta_info in meta_infos:
- ref_imid = meta_info.get('id')
- if ref_imid is None:
- continue
- meta_list.append((ref_imid, 'contrail:'+meta_name))
-
- if parent_imid:
- # Remove link from parent
- meta_list.append((parent_imid, None))
-
- if meta_list:
- self._delete_id_pair_meta_list(ifmap_id, meta_list)
-
- # Remove all property metadata associated with this ident
- self._delete_id_self_meta(ifmap_id, None)
-
- return (True, '')
- #end _ifmap_global_vrouter_config_delete
-
- def _ifmap_instance_ip_alloc(self, parent_type, fq_name):
- imid = self._imid_handler
- (my_imid, parent_imid) = \
- imid.instance_ip_alloc_ifmap_id(parent_type, fq_name)
- if my_imid is None or parent_imid is None:
- return (False, (my_imid, parent_imid))
- return (True, (my_imid, parent_imid))
- #end _ifmap_instance_ip_alloc
-
- def _ifmap_instance_ip_set(self, my_imid, existing_metas, obj_dict):
- # Properties Meta
- update = {}
- field = obj_dict.get('instance_ip_address', None)
- if field is not None:
- norm_str = escape(str(obj_dict['instance_ip_address']))
- meta = Metadata('instance-ip-address', norm_str,
- {'ifmap-cardinality':'singleValue'}, ns_prefix = 'contrail')
-
- if (existing_metas and 'instance-ip-address' in existing_metas and
- str(existing_metas['instance-ip-address'][0]['meta']) == str(meta)):
- # no change
- pass
- else:
- self._update_id_self_meta(update, meta)
-
- field = obj_dict.get('instance_ip_family', None)
- if field is not None:
- norm_str = escape(str(obj_dict['instance_ip_family']))
- meta = Metadata('instance-ip-family', norm_str,
- {'ifmap-cardinality':'singleValue'}, ns_prefix = 'contrail')
-
- if (existing_metas and 'instance-ip-family' in existing_metas and
- str(existing_metas['instance-ip-family'][0]['meta']) == str(meta)):
- # no change
- pass
- else:
- self._update_id_self_meta(update, meta)
-
- field = obj_dict.get('instance_ip_mode', None)
- if field is not None:
- norm_str = escape(str(obj_dict['instance_ip_mode']))
- meta = Metadata('instance-ip-mode', norm_str,
- {'ifmap-cardinality':'singleValue'}, ns_prefix = 'contrail')
-
- if (existing_metas and 'instance-ip-mode' in existing_metas and
- str(existing_metas['instance-ip-mode'][0]['meta']) == str(meta)):
- # no change
- pass
- else:
- self._update_id_self_meta(update, meta)
-
- field = obj_dict.get('subnet_uuid', None)
- if field is not None:
- norm_str = escape(str(obj_dict['subnet_uuid']))
- meta = Metadata('subnet-uuid', norm_str,
- {'ifmap-cardinality':'singleValue'}, ns_prefix = 'contrail')
-
- if (existing_metas and 'subnet-uuid' in existing_metas and
- str(existing_metas['subnet-uuid'][0]['meta']) == str(meta)):
- # no change
- pass
- else:
- self._update_id_self_meta(update, meta)
-
- field = obj_dict.get('id_perms', None)
- if field is not None:
- # construct object of xsd-type and get its xml repr
- field = obj_dict['id_perms']
- buf = cStringIO.StringIO()
- # perms might be inserted at server as obj.
- # obj construction diff from dict construction.
- if isinstance(field, dict):
- IdPermsType(**field).exportChildren(buf, level = 1, name_ = 'id-perms', pretty_print = False)
- else: # object
- field.exportChildren(buf, level = 1, name_ = 'id-perms', pretty_print = False)
- id_perms_xml = buf.getvalue()
- buf.close()
- meta = Metadata('id-perms' , '',
- {'ifmap-cardinality':'singleValue'}, ns_prefix = 'contrail',
- elements = id_perms_xml)
-
- if (existing_metas and 'id-perms' in existing_metas and
- str(existing_metas['id-perms'][0]['meta']) == str(meta)):
- # no change
- pass
- else:
- self._update_id_self_meta(update, meta)
-
- field = obj_dict.get('display_name', None)
- if field is not None:
- norm_str = escape(str(obj_dict['display_name']))
- meta = Metadata('display-name', norm_str,
- {'ifmap-cardinality':'singleValue'}, ns_prefix = 'contrail')
-
- if (existing_metas and 'display-name' in existing_metas and
- str(existing_metas['display-name'][0]['meta']) == str(meta)):
- # no change
- pass
- else:
- self._update_id_self_meta(update, meta)
-
- # Ref Link Metas
- imid = self._imid_handler
-
- # construct object of xsd-type and get its xml repr
- refs = obj_dict.get('virtual_network_refs', None)
- if refs:
- for ref in refs:
- ref_fq_name = ref['to']
- obj_type = 'virtual-network'
- to_imid = cfgm_common.imid.get_ifmap_id_from_fq_name(obj_type, ref_fq_name)
- instance_ip_virtual_network_xml = ''
- meta = Metadata('instance-ip-virtual-network' , '',
- {'ifmap-cardinality':'singleValue'}, ns_prefix = 'contrail',
- elements = instance_ip_virtual_network_xml)
- self._update_id_pair_meta(update, to_imid, meta)
-
- # construct object of xsd-type and get its xml repr
- refs = obj_dict.get('virtual_machine_interface_refs', None)
- if refs:
- for ref in refs:
- ref_fq_name = ref['to']
- obj_type = 'virtual-machine-interface'
- to_imid = cfgm_common.imid.get_ifmap_id_from_fq_name(obj_type, ref_fq_name)
- instance_ip_virtual_machine_interface_xml = ''
- meta = Metadata('instance-ip-virtual-machine-interface' , '',
- {'ifmap-cardinality':'singleValue'}, ns_prefix = 'contrail',
- elements = instance_ip_virtual_machine_interface_xml)
- self._update_id_pair_meta(update, to_imid, meta)
-
-
- self._publish_update(my_imid, update)
- return (True, '')
- #end _ifmap_instance_ip_set
-
- def _ifmap_instance_ip_create(self, obj_ids, obj_dict):
- (ok, result) = self._ifmap_instance_ip_set(obj_ids['imid'], None, obj_dict)
- return (ok, result)
- #end _ifmap_instance_ip_create
-
-
- def _ifmap_instance_ip_read_to_meta_index(self, ifmap_id, field_names = None):
- # field_names = None means all fields will be read
- imid = self._imid_handler
- start_id = str(Identity(name = ifmap_id, type = 'other',
- other_type = 'extended'))
- # if id-perms missing, identity doesn't exist
-
- all_metas = [u'instance-ip-virtual-network', u'instance-ip-virtual-machine-interface', u'instance-ip-address', u'instance-ip-family', u'instance-ip-mode', u'subnet-uuid', u'id-perms', u'display-name']
- if not field_names:
- metas_to_read = all_metas
- else: # read only requested fields
- metas_to_read = set(all_metas) & set(field_names.keys())
-
- # metas is a dict where key is meta-name and val is list of dict
- # of form [{'meta':meta}, {'id':id1, 'meta':meta}, {'id':id2, 'meta':meta}]
- metas = {}
- for meta_name in metas_to_read:
- if meta_name in self._id_to_metas[ifmap_id]:
- metas[meta_name] = self._id_to_metas[ifmap_id][meta_name]
- return metas
- #end _ifmap_instance-ip_read_to_meta_index
-
- def _ifmap_instance_ip_update(self, ifmap_id, new_obj_dict):
- # read in refs from ifmap to determine which ones become inactive after update
- existing_metas = self._ifmap_instance_ip_read_to_meta_index(ifmap_id)
-
- # remove properties that are no longer active
- props = ['instance-ip-address', 'instance-ip-family', 'instance-ip-mode', 'subnet-uuid', 'id-perms', 'display-name']
- for prop in props:
- prop_m = prop.replace('-', '_')
- if prop in existing_metas and prop_m not in new_obj_dict:
- self._delete_id_self_meta(ifmap_id, 'contrail:'+prop)
- # remove refs that are no longer active
- delete_list = []
- refs = {'instance-ip-virtual-network': 'virtual-network',
- 'instance-ip-virtual-machine-interface': 'virtual-machine-interface'}
- for meta, to_name in refs.items():
- old_set = set([m['id'] for m in existing_metas.get(meta, [])])
- new_set = set()
- to_name_m = to_name.replace('-', '_')
- for ref in new_obj_dict.get(to_name_m+'_refs', []):
- to_imid = self.fq_name_to_ifmap_id(to_name, ref['to'])
- new_set.add(to_imid)
-
- for inact_ref in old_set - new_set:
- delete_list.append((inact_ref, 'contrail:'+meta))
-
- if delete_list:
- self._delete_id_pair_meta_list(ifmap_id, delete_list)
-
- (ok, result) = self._ifmap_instance_ip_set(ifmap_id, existing_metas, new_obj_dict)
- return (ok, result)
- #end _ifmap_instance_ip_update
-
- def _ifmap_instance_ip_delete(self, obj_ids):
- ifmap_id = obj_ids['imid']
- parent_imid = obj_ids.get('parent_imid', None)
- existing_metas = self._ifmap_instance_ip_read_to_meta_index(ifmap_id)
- meta_list = []
- for meta_name, meta_infos in existing_metas.items():
- for meta_info in meta_infos:
- ref_imid = meta_info.get('id')
- if ref_imid is None:
- continue
- meta_list.append((ref_imid, 'contrail:'+meta_name))
-
- if parent_imid:
- # Remove link from parent
- meta_list.append((parent_imid, None))
-
- if meta_list:
- self._delete_id_pair_meta_list(ifmap_id, meta_list)
-
- # Remove all property metadata associated with this ident
- self._delete_id_self_meta(ifmap_id, None)
-
- return (True, '')
- #end _ifmap_instance_ip_delete
-
- def _ifmap_network_policy_alloc(self, parent_type, fq_name):
- imid = self._imid_handler
- (my_imid, parent_imid) = \
- imid.network_policy_alloc_ifmap_id(parent_type, fq_name)
- if my_imid is None or parent_imid is None:
- return (False, (my_imid, parent_imid))
- return (True, (my_imid, parent_imid))
- #end _ifmap_network_policy_alloc
-
- def _ifmap_network_policy_set(self, my_imid, existing_metas, obj_dict):
- # Properties Meta
- update = {}
- field = obj_dict.get('network_policy_entries', None)
- if field is not None:
- # construct object of xsd-type and get its xml repr
- field = obj_dict['network_policy_entries']
- buf = cStringIO.StringIO()
- # perms might be inserted at server as obj.
- # obj construction diff from dict construction.
- if isinstance(field, dict):
- PolicyEntriesType(**field).exportChildren(buf, level = 1, name_ = 'network-policy-entries', pretty_print = False)
- else: # object
- field.exportChildren(buf, level = 1, name_ = 'network-policy-entries', pretty_print = False)
- network_policy_entries_xml = buf.getvalue()
- buf.close()
- meta = Metadata('network-policy-entries' , '',
- {'ifmap-cardinality':'singleValue'}, ns_prefix = 'contrail',
- elements = network_policy_entries_xml)
-
- if (existing_metas and 'network-policy-entries' in existing_metas and
- str(existing_metas['network-policy-entries'][0]['meta']) == str(meta)):
- # no change
- pass
- else:
- self._update_id_self_meta(update, meta)
-
- field = obj_dict.get('id_perms', None)
- if field is not None:
- # construct object of xsd-type and get its xml repr
- field = obj_dict['id_perms']
- buf = cStringIO.StringIO()
- # perms might be inserted at server as obj.
- # obj construction diff from dict construction.
- if isinstance(field, dict):
- IdPermsType(**field).exportChildren(buf, level = 1, name_ = 'id-perms', pretty_print = False)
- else: # object
- field.exportChildren(buf, level = 1, name_ = 'id-perms', pretty_print = False)
- id_perms_xml = buf.getvalue()
- buf.close()
- meta = Metadata('id-perms' , '',
- {'ifmap-cardinality':'singleValue'}, ns_prefix = 'contrail',
- elements = id_perms_xml)
-
- if (existing_metas and 'id-perms' in existing_metas and
- str(existing_metas['id-perms'][0]['meta']) == str(meta)):
- # no change
- pass
- else:
- self._update_id_self_meta(update, meta)
-
- field = obj_dict.get('display_name', None)
- if field is not None:
- norm_str = escape(str(obj_dict['display_name']))
- meta = Metadata('display-name', norm_str,
- {'ifmap-cardinality':'singleValue'}, ns_prefix = 'contrail')
-
- if (existing_metas and 'display-name' in existing_metas and
- str(existing_metas['display-name'][0]['meta']) == str(meta)):
- # no change
- pass
- else:
- self._update_id_self_meta(update, meta)
-
- # Ref Link Metas
- imid = self._imid_handler
-
-
- self._publish_update(my_imid, update)
- return (True, '')
- #end _ifmap_network_policy_set
-
- def _ifmap_network_policy_create(self, obj_ids, obj_dict):
- if not 'parent_type' in obj_dict:
- # parent is config-root
- parent_type = 'config-root'
- parent_imid = 'contrail:config-root:root'
- else:
- parent_type = obj_dict['parent_type']
- parent_imid = obj_ids.get('parent_imid', None)
-
- # Parent Link Meta
- update = {}
- parent_link_meta = self._parent_metas[parent_type]['network-policy']
- meta = Metadata(parent_link_meta, '',
- {'ifmap-cardinality':'singleValue'}, ns_prefix = 'contrail')
- self._update_id_pair_meta(update, obj_ids['imid'], meta)
- self._publish_update(parent_imid, update)
-
- (ok, result) = self._ifmap_network_policy_set(obj_ids['imid'], None, obj_dict)
- return (ok, result)
- #end _ifmap_network_policy_create
-
-
- def _ifmap_network_policy_read_to_meta_index(self, ifmap_id, field_names = None):
- # field_names = None means all fields will be read
- imid = self._imid_handler
- start_id = str(Identity(name = ifmap_id, type = 'other',
- other_type = 'extended'))
- # if id-perms missing, identity doesn't exist
-
- all_metas = [u'virtual-network-network-policy', u'network-policy-entries', u'id-perms', u'display-name']
- if not field_names:
- metas_to_read = all_metas
- else: # read only requested fields
- metas_to_read = set(all_metas) & set(field_names.keys())
-
- # metas is a dict where key is meta-name and val is list of dict
- # of form [{'meta':meta}, {'id':id1, 'meta':meta}, {'id':id2, 'meta':meta}]
- metas = {}
- for meta_name in metas_to_read:
- if meta_name in self._id_to_metas[ifmap_id]:
- metas[meta_name] = self._id_to_metas[ifmap_id][meta_name]
- return metas
- #end _ifmap_network-policy_read_to_meta_index
-
- def _ifmap_network_policy_update(self, ifmap_id, new_obj_dict):
- # read in refs from ifmap to determine which ones become inactive after update
- existing_metas = self._ifmap_network_policy_read_to_meta_index(ifmap_id)
-
- # remove properties that are no longer active
- props = ['network-policy-entries', 'id-perms', 'display-name']
- for prop in props:
- prop_m = prop.replace('-', '_')
- if prop in existing_metas and prop_m not in new_obj_dict:
- self._delete_id_self_meta(ifmap_id, 'contrail:'+prop)
- # remove refs that are no longer active
- delete_list = []
- refs = {}
- for meta, to_name in refs.items():
- old_set = set([m['id'] for m in existing_metas.get(meta, [])])
- new_set = set()
- to_name_m = to_name.replace('-', '_')
- for ref in new_obj_dict.get(to_name_m+'_refs', []):
- to_imid = self.fq_name_to_ifmap_id(to_name, ref['to'])
- new_set.add(to_imid)
-
- for inact_ref in old_set - new_set:
- delete_list.append((inact_ref, 'contrail:'+meta))
-
- if delete_list:
- self._delete_id_pair_meta_list(ifmap_id, delete_list)
-
- (ok, result) = self._ifmap_network_policy_set(ifmap_id, existing_metas, new_obj_dict)
- return (ok, result)
- #end _ifmap_network_policy_update
-
- def _ifmap_network_policy_delete(self, obj_ids):
- ifmap_id = obj_ids['imid']
- parent_imid = obj_ids.get('parent_imid', None)
- existing_metas = self._ifmap_network_policy_read_to_meta_index(ifmap_id)
- meta_list = []
- for meta_name, meta_infos in existing_metas.items():
- for meta_info in meta_infos:
- ref_imid = meta_info.get('id')
- if ref_imid is None:
- continue
- meta_list.append((ref_imid, 'contrail:'+meta_name))
-
- if parent_imid:
- # Remove link from parent
- meta_list.append((parent_imid, None))
-
- if meta_list:
- self._delete_id_pair_meta_list(ifmap_id, meta_list)
-
- # Remove all property metadata associated with this ident
- self._delete_id_self_meta(ifmap_id, None)
-
- return (True, '')
- #end _ifmap_network_policy_delete
-
- def _ifmap_loadbalancer_pool_alloc(self, parent_type, fq_name):
- imid = self._imid_handler
- (my_imid, parent_imid) = \
- imid.loadbalancer_pool_alloc_ifmap_id(parent_type, fq_name)
- if my_imid is None or parent_imid is None:
- return (False, (my_imid, parent_imid))
- return (True, (my_imid, parent_imid))
- #end _ifmap_loadbalancer_pool_alloc
-
- def _ifmap_loadbalancer_pool_set(self, my_imid, existing_metas, obj_dict):
- # Properties Meta
- update = {}
- field = obj_dict.get('loadbalancer_pool_properties', None)
- if field is not None:
- # construct object of xsd-type and get its xml repr
- field = obj_dict['loadbalancer_pool_properties']
- buf = cStringIO.StringIO()
- # perms might be inserted at server as obj.
- # obj construction diff from dict construction.
- if isinstance(field, dict):
- LoadbalancerPoolType(**field).exportChildren(buf, level = 1, name_ = 'loadbalancer-pool-properties', pretty_print = False)
- else: # object
- field.exportChildren(buf, level = 1, name_ = 'loadbalancer-pool-properties', pretty_print = False)
- loadbalancer_pool_properties_xml = buf.getvalue()
- buf.close()
- meta = Metadata('loadbalancer-pool-properties' , '',
- {'ifmap-cardinality':'singleValue'}, ns_prefix = 'contrail',
- elements = loadbalancer_pool_properties_xml)
-
- if (existing_metas and 'loadbalancer-pool-properties' in existing_metas and
- str(existing_metas['loadbalancer-pool-properties'][0]['meta']) == str(meta)):
- # no change
- pass
- else:
- self._update_id_self_meta(update, meta)
-
- field = obj_dict.get('loadbalancer_pool_provider', None)
- if field is not None:
- norm_str = escape(str(obj_dict['loadbalancer_pool_provider']))
- meta = Metadata('loadbalancer-pool-provider', norm_str,
- {'ifmap-cardinality':'singleValue'}, ns_prefix = 'contrail')
-
- if (existing_metas and 'loadbalancer-pool-provider' in existing_metas and
- str(existing_metas['loadbalancer-pool-provider'][0]['meta']) == str(meta)):
- # no change
- pass
- else:
- self._update_id_self_meta(update, meta)
-
- field = obj_dict.get('id_perms', None)
- if field is not None:
- # construct object of xsd-type and get its xml repr
- field = obj_dict['id_perms']
- buf = cStringIO.StringIO()
- # perms might be inserted at server as obj.
- # obj construction diff from dict construction.
- if isinstance(field, dict):
- IdPermsType(**field).exportChildren(buf, level = 1, name_ = 'id-perms', pretty_print = False)
- else: # object
- field.exportChildren(buf, level = 1, name_ = 'id-perms', pretty_print = False)
- id_perms_xml = buf.getvalue()
- buf.close()
- meta = Metadata('id-perms' , '',
- {'ifmap-cardinality':'singleValue'}, ns_prefix = 'contrail',
- elements = id_perms_xml)
-
- if (existing_metas and 'id-perms' in existing_metas and
- str(existing_metas['id-perms'][0]['meta']) == str(meta)):
- # no change
- pass
- else:
- self._update_id_self_meta(update, meta)
-
- field = obj_dict.get('display_name', None)
- if field is not None:
- norm_str = escape(str(obj_dict['display_name']))
- meta = Metadata('display-name', norm_str,
- {'ifmap-cardinality':'singleValue'}, ns_prefix = 'contrail')
-
- if (existing_metas and 'display-name' in existing_metas and
- str(existing_metas['display-name'][0]['meta']) == str(meta)):
- # no change
- pass
- else:
- self._update_id_self_meta(update, meta)
-
- # Ref Link Metas
- imid = self._imid_handler
-
- # construct object of xsd-type and get its xml repr
- refs = obj_dict.get('service_instance_refs', None)
- if refs:
- for ref in refs:
- ref_fq_name = ref['to']
- obj_type = 'service-instance'
- to_imid = cfgm_common.imid.get_ifmap_id_from_fq_name(obj_type, ref_fq_name)
- loadbalancer_pool_service_instance_xml = ''
- meta = Metadata('loadbalancer-pool-service-instance' , '',
- {'ifmap-cardinality':'singleValue'}, ns_prefix = 'contrail',
- elements = loadbalancer_pool_service_instance_xml)
- self._update_id_pair_meta(update, to_imid, meta)
-
- # construct object of xsd-type and get its xml repr
- refs = obj_dict.get('virtual_machine_interface_refs', None)
- if refs:
- for ref in refs:
- ref_fq_name = ref['to']
- obj_type = 'virtual-machine-interface'
- to_imid = cfgm_common.imid.get_ifmap_id_from_fq_name(obj_type, ref_fq_name)
- loadbalancer_pool_virtual_machine_interface_xml = ''
- meta = Metadata('loadbalancer-pool-virtual-machine-interface' , '',
- {'ifmap-cardinality':'singleValue'}, ns_prefix = 'contrail',
- elements = loadbalancer_pool_virtual_machine_interface_xml)
- self._update_id_pair_meta(update, to_imid, meta)
-
- # construct object of xsd-type and get its xml repr
- refs = obj_dict.get('service_appliance_set_refs', None)
- if refs:
- for ref in refs:
- ref_fq_name = ref['to']
- obj_type = 'service-appliance-set'
- to_imid = cfgm_common.imid.get_ifmap_id_from_fq_name(obj_type, ref_fq_name)
- loadbalancer_pool_service_appliance_set_xml = ''
- meta = Metadata('loadbalancer-pool-service-appliance-set' , '',
- {'ifmap-cardinality':'singleValue'}, ns_prefix = 'contrail',
- elements = loadbalancer_pool_service_appliance_set_xml)
- self._update_id_pair_meta(update, to_imid, meta)
-
- # construct object of xsd-type and get its xml repr
- refs = obj_dict.get('loadbalancer_member_refs', None)
- if refs:
- for ref in refs:
- ref_fq_name = ref['to']
- obj_type = 'loadbalancer-member'
- to_imid = cfgm_common.imid.get_ifmap_id_from_fq_name(obj_type, ref_fq_name)
- loadbalancer_pool_loadbalancer_member_xml = ''
- meta = Metadata('loadbalancer-pool-loadbalancer-member' , '',
- {'ifmap-cardinality':'singleValue'}, ns_prefix = 'contrail',
- elements = loadbalancer_pool_loadbalancer_member_xml)
- self._update_id_pair_meta(update, to_imid, meta)
-
- # construct object of xsd-type and get its xml repr
- refs = obj_dict.get('loadbalancer_healthmonitor_refs', None)
- if refs:
- for ref in refs:
- ref_fq_name = ref['to']
- obj_type = 'loadbalancer-healthmonitor'
- to_imid = cfgm_common.imid.get_ifmap_id_from_fq_name(obj_type, ref_fq_name)
- loadbalancer_pool_loadbalancer_healthmonitor_xml = ''
- meta = Metadata('loadbalancer-pool-loadbalancer-healthmonitor' , '',
- {'ifmap-cardinality':'singleValue'}, ns_prefix = 'contrail',
- elements = loadbalancer_pool_loadbalancer_healthmonitor_xml)
- self._update_id_pair_meta(update, to_imid, meta)
-
-
- self._publish_update(my_imid, update)
- return (True, '')
- #end _ifmap_loadbalancer_pool_set
-
- def _ifmap_loadbalancer_pool_create(self, obj_ids, obj_dict):
- if not 'parent_type' in obj_dict:
- # parent is config-root
- parent_type = 'config-root'
- parent_imid = 'contrail:config-root:root'
- else:
- parent_type = obj_dict['parent_type']
- parent_imid = obj_ids.get('parent_imid', None)
-
- # Parent Link Meta
- update = {}
- parent_link_meta = self._parent_metas[parent_type]['loadbalancer-pool']
- meta = Metadata(parent_link_meta, '',
- {'ifmap-cardinality':'singleValue'}, ns_prefix = 'contrail')
- self._update_id_pair_meta(update, obj_ids['imid'], meta)
- self._publish_update(parent_imid, update)
-
- (ok, result) = self._ifmap_loadbalancer_pool_set(obj_ids['imid'], None, obj_dict)
- return (ok, result)
- #end _ifmap_loadbalancer_pool_create
-
-
- def _ifmap_loadbalancer_pool_read_to_meta_index(self, ifmap_id, field_names = None):
- # field_names = None means all fields will be read
- imid = self._imid_handler
- start_id = str(Identity(name = ifmap_id, type = 'other',
- other_type = 'extended'))
- # if id-perms missing, identity doesn't exist
-
- all_metas = [u'loadbalancer-pool-service-instance', u'loadbalancer-pool-virtual-machine-interface', u'loadbalancer-pool-service-appliance-set', u'loadbalancer-pool-loadbalancer-healthmonitor', u'virtual-ip-loadbalancer-pool', u'loadbalancer-pool-properties', u'loadbalancer-pool-provider', u'id-perms', u'display-name', u'loadbalancer-pool-loadbalancer-member']
- if not field_names:
- metas_to_read = all_metas
- else: # read only requested fields
- metas_to_read = set(all_metas) & set(field_names.keys())
-
- # metas is a dict where key is meta-name and val is list of dict
- # of form [{'meta':meta}, {'id':id1, 'meta':meta}, {'id':id2, 'meta':meta}]
- metas = {}
- for meta_name in metas_to_read:
- if meta_name in self._id_to_metas[ifmap_id]:
- metas[meta_name] = self._id_to_metas[ifmap_id][meta_name]
- return metas
- #end _ifmap_loadbalancer-pool_read_to_meta_index
-
- def _ifmap_loadbalancer_pool_update(self, ifmap_id, new_obj_dict):
- # read in refs from ifmap to determine which ones become inactive after update
- existing_metas = self._ifmap_loadbalancer_pool_read_to_meta_index(ifmap_id)
-
- # remove properties that are no longer active
- props = ['loadbalancer-pool-properties', 'loadbalancer-pool-provider', 'id-perms', 'display-name']
- for prop in props:
- prop_m = prop.replace('-', '_')
- if prop in existing_metas and prop_m not in new_obj_dict:
- self._delete_id_self_meta(ifmap_id, 'contrail:'+prop)
- # remove refs that are no longer active
- delete_list = []
- refs = {'loadbalancer-pool-service-instance': 'service-instance',
- 'loadbalancer-pool-virtual-machine-interface': 'virtual-machine-interface',
- 'loadbalancer-pool-service-appliance-set': 'service-appliance-set',
- 'loadbalancer-pool-loadbalancer-healthmonitor': 'loadbalancer-healthmonitor'}
- for meta, to_name in refs.items():
- old_set = set([m['id'] for m in existing_metas.get(meta, [])])
- new_set = set()
- to_name_m = to_name.replace('-', '_')
- for ref in new_obj_dict.get(to_name_m+'_refs', []):
- to_imid = self.fq_name_to_ifmap_id(to_name, ref['to'])
- new_set.add(to_imid)
-
- for inact_ref in old_set - new_set:
- delete_list.append((inact_ref, 'contrail:'+meta))
-
- if delete_list:
- self._delete_id_pair_meta_list(ifmap_id, delete_list)
-
- (ok, result) = self._ifmap_loadbalancer_pool_set(ifmap_id, existing_metas, new_obj_dict)
- return (ok, result)
- #end _ifmap_loadbalancer_pool_update
-
- def _ifmap_loadbalancer_pool_delete(self, obj_ids):
- ifmap_id = obj_ids['imid']
- parent_imid = obj_ids.get('parent_imid', None)
- existing_metas = self._ifmap_loadbalancer_pool_read_to_meta_index(ifmap_id)
- meta_list = []
- for meta_name, meta_infos in existing_metas.items():
- for meta_info in meta_infos:
- ref_imid = meta_info.get('id')
- if ref_imid is None:
- continue
- meta_list.append((ref_imid, 'contrail:'+meta_name))
-
- if parent_imid:
- # Remove link from parent
- meta_list.append((parent_imid, None))
-
- if meta_list:
- self._delete_id_pair_meta_list(ifmap_id, meta_list)
-
- # Remove all property metadata associated with this ident
- self._delete_id_self_meta(ifmap_id, None)
-
- return (True, '')
- #end _ifmap_loadbalancer_pool_delete
-
- def _ifmap_virtual_DNS_record_alloc(self, parent_type, fq_name):
- imid = self._imid_handler
- (my_imid, parent_imid) = \
- imid.virtual_DNS_record_alloc_ifmap_id(parent_type, fq_name)
- if my_imid is None or parent_imid is None:
- return (False, (my_imid, parent_imid))
- return (True, (my_imid, parent_imid))
- #end _ifmap_virtual_DNS_record_alloc
-
- def _ifmap_virtual_DNS_record_set(self, my_imid, existing_metas, obj_dict):
- # Properties Meta
- update = {}
- field = obj_dict.get('virtual_DNS_record_data', None)
- if field is not None:
- # construct object of xsd-type and get its xml repr
- field = obj_dict['virtual_DNS_record_data']
- buf = cStringIO.StringIO()
- # perms might be inserted at server as obj.
- # obj construction diff from dict construction.
- if isinstance(field, dict):
- VirtualDnsRecordType(**field).exportChildren(buf, level = 1, name_ = 'virtual-DNS-record-data', pretty_print = False)
- else: # object
- field.exportChildren(buf, level = 1, name_ = 'virtual-DNS-record-data', pretty_print = False)
- virtual_DNS_record_data_xml = buf.getvalue()
- buf.close()
- meta = Metadata('virtual-DNS-record-data' , '',
- {'ifmap-cardinality':'singleValue'}, ns_prefix = 'contrail',
- elements = virtual_DNS_record_data_xml)
-
- if (existing_metas and 'virtual-DNS-record-data' in existing_metas and
- str(existing_metas['virtual-DNS-record-data'][0]['meta']) == str(meta)):
- # no change
- pass
- else:
- self._update_id_self_meta(update, meta)
-
- field = obj_dict.get('id_perms', None)
- if field is not None:
- # construct object of xsd-type and get its xml repr
- field = obj_dict['id_perms']
- buf = cStringIO.StringIO()
- # perms might be inserted at server as obj.
- # obj construction diff from dict construction.
- if isinstance(field, dict):
- IdPermsType(**field).exportChildren(buf, level = 1, name_ = 'id-perms', pretty_print = False)
- else: # object
- field.exportChildren(buf, level = 1, name_ = 'id-perms', pretty_print = False)
- id_perms_xml = buf.getvalue()
- buf.close()
- meta = Metadata('id-perms' , '',
- {'ifmap-cardinality':'singleValue'}, ns_prefix = 'contrail',
- elements = id_perms_xml)
-
- if (existing_metas and 'id-perms' in existing_metas and
- str(existing_metas['id-perms'][0]['meta']) == str(meta)):
- # no change
- pass
- else:
- self._update_id_self_meta(update, meta)
-
- field = obj_dict.get('display_name', None)
- if field is not None:
- norm_str = escape(str(obj_dict['display_name']))
- meta = Metadata('display-name', norm_str,
- {'ifmap-cardinality':'singleValue'}, ns_prefix = 'contrail')
-
- if (existing_metas and 'display-name' in existing_metas and
- str(existing_metas['display-name'][0]['meta']) == str(meta)):
- # no change
- pass
- else:
- self._update_id_self_meta(update, meta)
-
- # Ref Link Metas
- imid = self._imid_handler
-
-
- self._publish_update(my_imid, update)
- return (True, '')
- #end _ifmap_virtual_DNS_record_set
-
- def _ifmap_virtual_DNS_record_create(self, obj_ids, obj_dict):
- if not 'parent_type' in obj_dict:
- # parent is config-root
- parent_type = 'config-root'
- parent_imid = 'contrail:config-root:root'
- else:
- parent_type = obj_dict['parent_type']
- parent_imid = obj_ids.get('parent_imid', None)
-
- # Parent Link Meta
- update = {}
- parent_link_meta = self._parent_metas[parent_type]['virtual-DNS-record']
- meta = Metadata(parent_link_meta, '',
- {'ifmap-cardinality':'singleValue'}, ns_prefix = 'contrail')
- self._update_id_pair_meta(update, obj_ids['imid'], meta)
- self._publish_update(parent_imid, update)
-
- (ok, result) = self._ifmap_virtual_DNS_record_set(obj_ids['imid'], None, obj_dict)
- return (ok, result)
- #end _ifmap_virtual_DNS_record_create
-
-
- def _ifmap_virtual_DNS_record_read_to_meta_index(self, ifmap_id, field_names = None):
- # field_names = None means all fields will be read
- imid = self._imid_handler
- start_id = str(Identity(name = ifmap_id, type = 'other',
- other_type = 'extended'))
- # if id-perms missing, identity doesn't exist
-
- all_metas = [u'virtual-DNS-record-data', u'id-perms', u'display-name']
- if not field_names:
- metas_to_read = all_metas
- else: # read only requested fields
- metas_to_read = set(all_metas) & set(field_names.keys())
-
- # metas is a dict where key is meta-name and val is list of dict
- # of form [{'meta':meta}, {'id':id1, 'meta':meta}, {'id':id2, 'meta':meta}]
- metas = {}
- for meta_name in metas_to_read:
- if meta_name in self._id_to_metas[ifmap_id]:
- metas[meta_name] = self._id_to_metas[ifmap_id][meta_name]
- return metas
- #end _ifmap_virtual-DNS-record_read_to_meta_index
-
- def _ifmap_virtual_DNS_record_update(self, ifmap_id, new_obj_dict):
- # read in refs from ifmap to determine which ones become inactive after update
- existing_metas = self._ifmap_virtual_DNS_record_read_to_meta_index(ifmap_id)
-
- # remove properties that are no longer active
- props = ['virtual-DNS-record-data', 'id-perms', 'display-name']
- for prop in props:
- prop_m = prop.replace('-', '_')
- if prop in existing_metas and prop_m not in new_obj_dict:
- self._delete_id_self_meta(ifmap_id, 'contrail:'+prop)
- # remove refs that are no longer active
- delete_list = []
- refs = {}
- for meta, to_name in refs.items():
- old_set = set([m['id'] for m in existing_metas.get(meta, [])])
- new_set = set()
- to_name_m = to_name.replace('-', '_')
- for ref in new_obj_dict.get(to_name_m+'_refs', []):
- to_imid = self.fq_name_to_ifmap_id(to_name, ref['to'])
- new_set.add(to_imid)
-
- for inact_ref in old_set - new_set:
- delete_list.append((inact_ref, 'contrail:'+meta))
-
- if delete_list:
- self._delete_id_pair_meta_list(ifmap_id, delete_list)
-
- (ok, result) = self._ifmap_virtual_DNS_record_set(ifmap_id, existing_metas, new_obj_dict)
- return (ok, result)
- #end _ifmap_virtual_DNS_record_update
-
- def _ifmap_virtual_DNS_record_delete(self, obj_ids):
- ifmap_id = obj_ids['imid']
- parent_imid = obj_ids.get('parent_imid', None)
- existing_metas = self._ifmap_virtual_DNS_record_read_to_meta_index(ifmap_id)
- meta_list = []
- for meta_name, meta_infos in existing_metas.items():
- for meta_info in meta_infos:
- ref_imid = meta_info.get('id')
- if ref_imid is None:
- continue
- meta_list.append((ref_imid, 'contrail:'+meta_name))
-
- if parent_imid:
- # Remove link from parent
- meta_list.append((parent_imid, None))
-
- if meta_list:
- self._delete_id_pair_meta_list(ifmap_id, meta_list)
-
- # Remove all property metadata associated with this ident
- self._delete_id_self_meta(ifmap_id, None)
-
- return (True, '')
- #end _ifmap_virtual_DNS_record_delete
-
- def _ifmap_route_target_alloc(self, parent_type, fq_name):
- imid = self._imid_handler
- (my_imid, parent_imid) = \
- imid.route_target_alloc_ifmap_id(parent_type, fq_name)
- if my_imid is None or parent_imid is None:
- return (False, (my_imid, parent_imid))
- return (True, (my_imid, parent_imid))
- #end _ifmap_route_target_alloc
-
- def _ifmap_route_target_set(self, my_imid, existing_metas, obj_dict):
- # Properties Meta
- update = {}
- field = obj_dict.get('id_perms', None)
- if field is not None:
- # construct object of xsd-type and get its xml repr
- field = obj_dict['id_perms']
- buf = cStringIO.StringIO()
- # perms might be inserted at server as obj.
- # obj construction diff from dict construction.
- if isinstance(field, dict):
- IdPermsType(**field).exportChildren(buf, level = 1, name_ = 'id-perms', pretty_print = False)
- else: # object
- field.exportChildren(buf, level = 1, name_ = 'id-perms', pretty_print = False)
- id_perms_xml = buf.getvalue()
- buf.close()
- meta = Metadata('id-perms' , '',
- {'ifmap-cardinality':'singleValue'}, ns_prefix = 'contrail',
- elements = id_perms_xml)
-
- if (existing_metas and 'id-perms' in existing_metas and
- str(existing_metas['id-perms'][0]['meta']) == str(meta)):
- # no change
- pass
- else:
- self._update_id_self_meta(update, meta)
-
- field = obj_dict.get('display_name', None)
- if field is not None:
- norm_str = escape(str(obj_dict['display_name']))
- meta = Metadata('display-name', norm_str,
- {'ifmap-cardinality':'singleValue'}, ns_prefix = 'contrail')
-
- if (existing_metas and 'display-name' in existing_metas and
- str(existing_metas['display-name'][0]['meta']) == str(meta)):
- # no change
- pass
- else:
- self._update_id_self_meta(update, meta)
-
- # Ref Link Metas
- imid = self._imid_handler
-
-
- self._publish_update(my_imid, update)
- return (True, '')
- #end _ifmap_route_target_set
-
- def _ifmap_route_target_create(self, obj_ids, obj_dict):
- (ok, result) = self._ifmap_route_target_set(obj_ids['imid'], None, obj_dict)
- return (ok, result)
- #end _ifmap_route_target_create
-
-
- def _ifmap_route_target_read_to_meta_index(self, ifmap_id, field_names = None):
- # field_names = None means all fields will be read
- imid = self._imid_handler
- start_id = str(Identity(name = ifmap_id, type = 'other',
- other_type = 'extended'))
- # if id-perms missing, identity doesn't exist
-
- all_metas = [u'logical-router-target', u'instance-target', u'id-perms', u'display-name']
- if not field_names:
- metas_to_read = all_metas
- else: # read only requested fields
- metas_to_read = set(all_metas) & set(field_names.keys())
-
- # metas is a dict where key is meta-name and val is list of dict
- # of form [{'meta':meta}, {'id':id1, 'meta':meta}, {'id':id2, 'meta':meta}]
- metas = {}
- for meta_name in metas_to_read:
- if meta_name in self._id_to_metas[ifmap_id]:
- metas[meta_name] = self._id_to_metas[ifmap_id][meta_name]
- return metas
- #end _ifmap_route-target_read_to_meta_index
-
- def _ifmap_route_target_update(self, ifmap_id, new_obj_dict):
- # read in refs from ifmap to determine which ones become inactive after update
- existing_metas = self._ifmap_route_target_read_to_meta_index(ifmap_id)
-
- # remove properties that are no longer active
- props = ['id-perms', 'display-name']
- for prop in props:
- prop_m = prop.replace('-', '_')
- if prop in existing_metas and prop_m not in new_obj_dict:
- self._delete_id_self_meta(ifmap_id, 'contrail:'+prop)
- # remove refs that are no longer active
- delete_list = []
- refs = {}
- for meta, to_name in refs.items():
- old_set = set([m['id'] for m in existing_metas.get(meta, [])])
- new_set = set()
- to_name_m = to_name.replace('-', '_')
- for ref in new_obj_dict.get(to_name_m+'_refs', []):
- to_imid = self.fq_name_to_ifmap_id(to_name, ref['to'])
- new_set.add(to_imid)
-
- for inact_ref in old_set - new_set:
- delete_list.append((inact_ref, 'contrail:'+meta))
-
- if delete_list:
- self._delete_id_pair_meta_list(ifmap_id, delete_list)
-
- (ok, result) = self._ifmap_route_target_set(ifmap_id, existing_metas, new_obj_dict)
- return (ok, result)
- #end _ifmap_route_target_update
-
- def _ifmap_route_target_delete(self, obj_ids):
- ifmap_id = obj_ids['imid']
- parent_imid = obj_ids.get('parent_imid', None)
- existing_metas = self._ifmap_route_target_read_to_meta_index(ifmap_id)
- meta_list = []
- for meta_name, meta_infos in existing_metas.items():
- for meta_info in meta_infos:
- ref_imid = meta_info.get('id')
- if ref_imid is None:
- continue
- meta_list.append((ref_imid, 'contrail:'+meta_name))
-
- if parent_imid:
- # Remove link from parent
- meta_list.append((parent_imid, None))
-
- if meta_list:
- self._delete_id_pair_meta_list(ifmap_id, meta_list)
-
- # Remove all property metadata associated with this ident
- self._delete_id_self_meta(ifmap_id, None)
-
- return (True, '')
- #end _ifmap_route_target_delete
-
- def _ifmap_floating_ip_alloc(self, parent_type, fq_name):
- imid = self._imid_handler
- (my_imid, parent_imid) = \
- imid.floating_ip_alloc_ifmap_id(parent_type, fq_name)
- if my_imid is None or parent_imid is None:
- return (False, (my_imid, parent_imid))
- return (True, (my_imid, parent_imid))
- #end _ifmap_floating_ip_alloc
-
- def _ifmap_floating_ip_set(self, my_imid, existing_metas, obj_dict):
- # Properties Meta
- update = {}
- field = obj_dict.get('floating_ip_address', None)
- if field is not None:
- norm_str = escape(str(obj_dict['floating_ip_address']))
- meta = Metadata('floating-ip-address', norm_str,
- {'ifmap-cardinality':'singleValue'}, ns_prefix = 'contrail')
-
- if (existing_metas and 'floating-ip-address' in existing_metas and
- str(existing_metas['floating-ip-address'][0]['meta']) == str(meta)):
- # no change
- pass
- else:
- self._update_id_self_meta(update, meta)
-
- field = obj_dict.get('floating_ip_is_virtual_ip', None)
- if field is not None:
- norm_str = escape(str(obj_dict['floating_ip_is_virtual_ip']))
- meta = Metadata('floating-ip-is-virtual-ip', norm_str,
- {'ifmap-cardinality':'singleValue'}, ns_prefix = 'contrail')
-
- if (existing_metas and 'floating-ip-is-virtual-ip' in existing_metas and
- str(existing_metas['floating-ip-is-virtual-ip'][0]['meta']) == str(meta)):
- # no change
- pass
- else:
- self._update_id_self_meta(update, meta)
-
- field = obj_dict.get('floating_ip_fixed_ip_address', None)
- if field is not None:
- norm_str = escape(str(obj_dict['floating_ip_fixed_ip_address']))
- meta = Metadata('floating-ip-fixed-ip-address', norm_str,
- {'ifmap-cardinality':'singleValue'}, ns_prefix = 'contrail')
-
- if (existing_metas and 'floating-ip-fixed-ip-address' in existing_metas and
- str(existing_metas['floating-ip-fixed-ip-address'][0]['meta']) == str(meta)):
- # no change
- pass
- else:
- self._update_id_self_meta(update, meta)
-
- field = obj_dict.get('floating_ip_address_family', None)
- if field is not None:
- norm_str = escape(str(obj_dict['floating_ip_address_family']))
- meta = Metadata('floating-ip-address-family', norm_str,
- {'ifmap-cardinality':'singleValue'}, ns_prefix = 'contrail')
-
- if (existing_metas and 'floating-ip-address-family' in existing_metas and
- str(existing_metas['floating-ip-address-family'][0]['meta']) == str(meta)):
- # no change
- pass
- else:
- self._update_id_self_meta(update, meta)
-
- field = obj_dict.get('id_perms', None)
- if field is not None:
- # construct object of xsd-type and get its xml repr
- field = obj_dict['id_perms']
- buf = cStringIO.StringIO()
- # perms might be inserted at server as obj.
- # obj construction diff from dict construction.
- if isinstance(field, dict):
- IdPermsType(**field).exportChildren(buf, level = 1, name_ = 'id-perms', pretty_print = False)
- else: # object
- field.exportChildren(buf, level = 1, name_ = 'id-perms', pretty_print = False)
- id_perms_xml = buf.getvalue()
- buf.close()
- meta = Metadata('id-perms' , '',
- {'ifmap-cardinality':'singleValue'}, ns_prefix = 'contrail',
- elements = id_perms_xml)
-
- if (existing_metas and 'id-perms' in existing_metas and
- str(existing_metas['id-perms'][0]['meta']) == str(meta)):
- # no change
- pass
- else:
- self._update_id_self_meta(update, meta)
-
- field = obj_dict.get('display_name', None)
- if field is not None:
- norm_str = escape(str(obj_dict['display_name']))
- meta = Metadata('display-name', norm_str,
- {'ifmap-cardinality':'singleValue'}, ns_prefix = 'contrail')
-
- if (existing_metas and 'display-name' in existing_metas and
- str(existing_metas['display-name'][0]['meta']) == str(meta)):
- # no change
- pass
- else:
- self._update_id_self_meta(update, meta)
-
- # Ref Link Metas
- imid = self._imid_handler
-
- # construct object of xsd-type and get its xml repr
- refs = obj_dict.get('project_refs', None)
- if refs:
- for ref in refs:
- ref_fq_name = ref['to']
- obj_type = 'project'
- to_imid = cfgm_common.imid.get_ifmap_id_from_fq_name(obj_type, ref_fq_name)
- floating_ip_project_xml = ''
- meta = Metadata('floating-ip-project' , '',
- {'ifmap-cardinality':'singleValue'}, ns_prefix = 'contrail',
- elements = floating_ip_project_xml)
- self._update_id_pair_meta(update, to_imid, meta)
-
- # construct object of xsd-type and get its xml repr
- refs = obj_dict.get('virtual_machine_interface_refs', None)
- if refs:
- for ref in refs:
- ref_fq_name = ref['to']
- obj_type = 'virtual-machine-interface'
- to_imid = cfgm_common.imid.get_ifmap_id_from_fq_name(obj_type, ref_fq_name)
- floating_ip_virtual_machine_interface_xml = ''
- meta = Metadata('floating-ip-virtual-machine-interface' , '',
- {'ifmap-cardinality':'singleValue'}, ns_prefix = 'contrail',
- elements = floating_ip_virtual_machine_interface_xml)
- self._update_id_pair_meta(update, to_imid, meta)
-
-
- self._publish_update(my_imid, update)
- return (True, '')
- #end _ifmap_floating_ip_set
-
- def _ifmap_floating_ip_create(self, obj_ids, obj_dict):
- if not 'parent_type' in obj_dict:
- # parent is config-root
- parent_type = 'config-root'
- parent_imid = 'contrail:config-root:root'
- else:
- parent_type = obj_dict['parent_type']
- parent_imid = obj_ids.get('parent_imid', None)
-
- # Parent Link Meta
- update = {}
- parent_link_meta = self._parent_metas[parent_type]['floating-ip']
- meta = Metadata(parent_link_meta, '',
- {'ifmap-cardinality':'singleValue'}, ns_prefix = 'contrail')
- self._update_id_pair_meta(update, obj_ids['imid'], meta)
- self._publish_update(parent_imid, update)
-
- (ok, result) = self._ifmap_floating_ip_set(obj_ids['imid'], None, obj_dict)
- return (ok, result)
- #end _ifmap_floating_ip_create
-
-
- def _ifmap_floating_ip_read_to_meta_index(self, ifmap_id, field_names = None):
- # field_names = None means all fields will be read
- imid = self._imid_handler
- start_id = str(Identity(name = ifmap_id, type = 'other',
- other_type = 'extended'))
- # if id-perms missing, identity doesn't exist
-
- all_metas = [u'floating-ip-project', u'floating-ip-virtual-machine-interface', u'customer-attachment-floating-ip', u'floating-ip-address', u'floating-ip-is-virtual-ip', u'floating-ip-fixed-ip-address', u'floating-ip-address-family', u'id-perms', u'display-name']
- if not field_names:
- metas_to_read = all_metas
- else: # read only requested fields
- metas_to_read = set(all_metas) & set(field_names.keys())
-
- # metas is a dict where key is meta-name and val is list of dict
- # of form [{'meta':meta}, {'id':id1, 'meta':meta}, {'id':id2, 'meta':meta}]
- metas = {}
- for meta_name in metas_to_read:
- if meta_name in self._id_to_metas[ifmap_id]:
- metas[meta_name] = self._id_to_metas[ifmap_id][meta_name]
- return metas
- #end _ifmap_floating-ip_read_to_meta_index
-
- def _ifmap_floating_ip_update(self, ifmap_id, new_obj_dict):
- # read in refs from ifmap to determine which ones become inactive after update
- existing_metas = self._ifmap_floating_ip_read_to_meta_index(ifmap_id)
-
- # remove properties that are no longer active
- props = ['floating-ip-address', 'floating-ip-is-virtual-ip', 'floating-ip-fixed-ip-address', 'floating-ip-address-family', 'id-perms', 'display-name']
- for prop in props:
- prop_m = prop.replace('-', '_')
- if prop in existing_metas and prop_m not in new_obj_dict:
- self._delete_id_self_meta(ifmap_id, 'contrail:'+prop)
- # remove refs that are no longer active
- delete_list = []
- refs = {'floating-ip-project': 'project',
- 'floating-ip-virtual-machine-interface': 'virtual-machine-interface'}
- for meta, to_name in refs.items():
- old_set = set([m['id'] for m in existing_metas.get(meta, [])])
- new_set = set()
- to_name_m = to_name.replace('-', '_')
- for ref in new_obj_dict.get(to_name_m+'_refs', []):
- to_imid = self.fq_name_to_ifmap_id(to_name, ref['to'])
- new_set.add(to_imid)
-
- for inact_ref in old_set - new_set:
- delete_list.append((inact_ref, 'contrail:'+meta))
-
- if delete_list:
- self._delete_id_pair_meta_list(ifmap_id, delete_list)
-
- (ok, result) = self._ifmap_floating_ip_set(ifmap_id, existing_metas, new_obj_dict)
- return (ok, result)
- #end _ifmap_floating_ip_update
-
- def _ifmap_floating_ip_delete(self, obj_ids):
- ifmap_id = obj_ids['imid']
- parent_imid = obj_ids.get('parent_imid', None)
- existing_metas = self._ifmap_floating_ip_read_to_meta_index(ifmap_id)
- meta_list = []
- for meta_name, meta_infos in existing_metas.items():
- for meta_info in meta_infos:
- ref_imid = meta_info.get('id')
- if ref_imid is None:
- continue
- meta_list.append((ref_imid, 'contrail:'+meta_name))
-
- if parent_imid:
- # Remove link from parent
- meta_list.append((parent_imid, None))
-
- if meta_list:
- self._delete_id_pair_meta_list(ifmap_id, meta_list)
-
- # Remove all property metadata associated with this ident
- self._delete_id_self_meta(ifmap_id, None)
-
- return (True, '')
- #end _ifmap_floating_ip_delete
-
- def _ifmap_floating_ip_pool_alloc(self, parent_type, fq_name):
- imid = self._imid_handler
- (my_imid, parent_imid) = \
- imid.floating_ip_pool_alloc_ifmap_id(parent_type, fq_name)
- if my_imid is None or parent_imid is None:
- return (False, (my_imid, parent_imid))
- return (True, (my_imid, parent_imid))
- #end _ifmap_floating_ip_pool_alloc
-
- def _ifmap_floating_ip_pool_set(self, my_imid, existing_metas, obj_dict):
- # Properties Meta
- update = {}
- field = obj_dict.get('floating_ip_pool_prefixes', None)
- if field is not None:
- # construct object of xsd-type and get its xml repr
- field = obj_dict['floating_ip_pool_prefixes']
- buf = cStringIO.StringIO()
- # perms might be inserted at server as obj.
- # obj construction diff from dict construction.
- if isinstance(field, dict):
- FloatingIpPoolType(**field).exportChildren(buf, level = 1, name_ = 'floating-ip-pool-prefixes', pretty_print = False)
- else: # object
- field.exportChildren(buf, level = 1, name_ = 'floating-ip-pool-prefixes', pretty_print = False)
- floating_ip_pool_prefixes_xml = buf.getvalue()
- buf.close()
- meta = Metadata('floating-ip-pool-prefixes' , '',
- {'ifmap-cardinality':'singleValue'}, ns_prefix = 'contrail',
- elements = floating_ip_pool_prefixes_xml)
-
- if (existing_metas and 'floating-ip-pool-prefixes' in existing_metas and
- str(existing_metas['floating-ip-pool-prefixes'][0]['meta']) == str(meta)):
- # no change
- pass
- else:
- self._update_id_self_meta(update, meta)
-
- field = obj_dict.get('id_perms', None)
- if field is not None:
- # construct object of xsd-type and get its xml repr
- field = obj_dict['id_perms']
- buf = cStringIO.StringIO()
- # perms might be inserted at server as obj.
- # obj construction diff from dict construction.
- if isinstance(field, dict):
- IdPermsType(**field).exportChildren(buf, level = 1, name_ = 'id-perms', pretty_print = False)
- else: # object
- field.exportChildren(buf, level = 1, name_ = 'id-perms', pretty_print = False)
- id_perms_xml = buf.getvalue()
- buf.close()
- meta = Metadata('id-perms' , '',
- {'ifmap-cardinality':'singleValue'}, ns_prefix = 'contrail',
- elements = id_perms_xml)
-
- if (existing_metas and 'id-perms' in existing_metas and
- str(existing_metas['id-perms'][0]['meta']) == str(meta)):
- # no change
- pass
- else:
- self._update_id_self_meta(update, meta)
-
- field = obj_dict.get('display_name', None)
- if field is not None:
- norm_str = escape(str(obj_dict['display_name']))
- meta = Metadata('display-name', norm_str,
- {'ifmap-cardinality':'singleValue'}, ns_prefix = 'contrail')
-
- if (existing_metas and 'display-name' in existing_metas and
- str(existing_metas['display-name'][0]['meta']) == str(meta)):
- # no change
- pass
- else:
- self._update_id_self_meta(update, meta)
-
- # Ref Link Metas
- imid = self._imid_handler
-
- # construct object of xsd-type and get its xml repr
- refs = obj_dict.get('floating_ip_refs', None)
- if refs:
- for ref in refs:
- ref_fq_name = ref['to']
- obj_type = 'floating-ip'
- to_imid = cfgm_common.imid.get_ifmap_id_from_fq_name(obj_type, ref_fq_name)
- floating_ip_pool_floating_ip_xml = ''
- meta = Metadata('floating-ip-pool-floating-ip' , '',
- {'ifmap-cardinality':'singleValue'}, ns_prefix = 'contrail',
- elements = floating_ip_pool_floating_ip_xml)
- self._update_id_pair_meta(update, to_imid, meta)
-
-
- self._publish_update(my_imid, update)
- return (True, '')
- #end _ifmap_floating_ip_pool_set
-
- def _ifmap_floating_ip_pool_create(self, obj_ids, obj_dict):
- if not 'parent_type' in obj_dict:
- # parent is config-root
- parent_type = 'config-root'
- parent_imid = 'contrail:config-root:root'
- else:
- parent_type = obj_dict['parent_type']
- parent_imid = obj_ids.get('parent_imid', None)
-
- # Parent Link Meta
- update = {}
- parent_link_meta = self._parent_metas[parent_type]['floating-ip-pool']
- meta = Metadata(parent_link_meta, '',
- {'ifmap-cardinality':'singleValue'}, ns_prefix = 'contrail')
- self._update_id_pair_meta(update, obj_ids['imid'], meta)
- self._publish_update(parent_imid, update)
-
- (ok, result) = self._ifmap_floating_ip_pool_set(obj_ids['imid'], None, obj_dict)
- return (ok, result)
- #end _ifmap_floating_ip_pool_create
-
-
- def _ifmap_floating_ip_pool_read_to_meta_index(self, ifmap_id, field_names = None):
- # field_names = None means all fields will be read
- imid = self._imid_handler
- start_id = str(Identity(name = ifmap_id, type = 'other',
- other_type = 'extended'))
- # if id-perms missing, identity doesn't exist
-
- all_metas = [u'project-floating-ip-pool', u'floating-ip-pool-prefixes', u'id-perms', u'display-name', u'floating-ip-pool-floating-ip']
- if not field_names:
- metas_to_read = all_metas
- else: # read only requested fields
- metas_to_read = set(all_metas) & set(field_names.keys())
-
- # metas is a dict where key is meta-name and val is list of dict
- # of form [{'meta':meta}, {'id':id1, 'meta':meta}, {'id':id2, 'meta':meta}]
- metas = {}
- for meta_name in metas_to_read:
- if meta_name in self._id_to_metas[ifmap_id]:
- metas[meta_name] = self._id_to_metas[ifmap_id][meta_name]
- return metas
- #end _ifmap_floating-ip-pool_read_to_meta_index
-
- def _ifmap_floating_ip_pool_update(self, ifmap_id, new_obj_dict):
- # read in refs from ifmap to determine which ones become inactive after update
- existing_metas = self._ifmap_floating_ip_pool_read_to_meta_index(ifmap_id)
-
- # remove properties that are no longer active
- props = ['floating-ip-pool-prefixes', 'id-perms', 'display-name']
- for prop in props:
- prop_m = prop.replace('-', '_')
- if prop in existing_metas and prop_m not in new_obj_dict:
- self._delete_id_self_meta(ifmap_id, 'contrail:'+prop)
- # remove refs that are no longer active
- delete_list = []
- refs = {}
- for meta, to_name in refs.items():
- old_set = set([m['id'] for m in existing_metas.get(meta, [])])
- new_set = set()
- to_name_m = to_name.replace('-', '_')
- for ref in new_obj_dict.get(to_name_m+'_refs', []):
- to_imid = self.fq_name_to_ifmap_id(to_name, ref['to'])
- new_set.add(to_imid)
-
- for inact_ref in old_set - new_set:
- delete_list.append((inact_ref, 'contrail:'+meta))
-
- if delete_list:
- self._delete_id_pair_meta_list(ifmap_id, delete_list)
-
- (ok, result) = self._ifmap_floating_ip_pool_set(ifmap_id, existing_metas, new_obj_dict)
- return (ok, result)
- #end _ifmap_floating_ip_pool_update
-
- def _ifmap_floating_ip_pool_delete(self, obj_ids):
- ifmap_id = obj_ids['imid']
- parent_imid = obj_ids.get('parent_imid', None)
- existing_metas = self._ifmap_floating_ip_pool_read_to_meta_index(ifmap_id)
- meta_list = []
- for meta_name, meta_infos in existing_metas.items():
- for meta_info in meta_infos:
- ref_imid = meta_info.get('id')
- if ref_imid is None:
- continue
- meta_list.append((ref_imid, 'contrail:'+meta_name))
-
- if parent_imid:
- # Remove link from parent
- meta_list.append((parent_imid, None))
-
- if meta_list:
- self._delete_id_pair_meta_list(ifmap_id, meta_list)
-
- # Remove all property metadata associated with this ident
- self._delete_id_self_meta(ifmap_id, None)
-
- return (True, '')
- #end _ifmap_floating_ip_pool_delete
-
- def _ifmap_physical_router_alloc(self, parent_type, fq_name):
- imid = self._imid_handler
- (my_imid, parent_imid) = \
- imid.physical_router_alloc_ifmap_id(parent_type, fq_name)
- if my_imid is None or parent_imid is None:
- return (False, (my_imid, parent_imid))
- return (True, (my_imid, parent_imid))
- #end _ifmap_physical_router_alloc
-
- def _ifmap_physical_router_set(self, my_imid, existing_metas, obj_dict):
- # Properties Meta
- update = {}
- field = obj_dict.get('physical_router_management_ip', None)
- if field is not None:
- norm_str = escape(str(obj_dict['physical_router_management_ip']))
- meta = Metadata('physical-router-management-ip', norm_str,
- {'ifmap-cardinality':'singleValue'}, ns_prefix = 'contrail')
-
- if (existing_metas and 'physical-router-management-ip' in existing_metas and
- str(existing_metas['physical-router-management-ip'][0]['meta']) == str(meta)):
- # no change
- pass
- else:
- self._update_id_self_meta(update, meta)
-
- field = obj_dict.get('physical_router_dataplane_ip', None)
- if field is not None:
- norm_str = escape(str(obj_dict['physical_router_dataplane_ip']))
- meta = Metadata('physical-router-dataplane-ip', norm_str,
- {'ifmap-cardinality':'singleValue'}, ns_prefix = 'contrail')
-
- if (existing_metas and 'physical-router-dataplane-ip' in existing_metas and
- str(existing_metas['physical-router-dataplane-ip'][0]['meta']) == str(meta)):
- # no change
- pass
- else:
- self._update_id_self_meta(update, meta)
-
- field = obj_dict.get('physical_router_vendor_name', None)
- if field is not None:
- norm_str = escape(str(obj_dict['physical_router_vendor_name']))
- meta = Metadata('physical-router-vendor-name', norm_str,
- {'ifmap-cardinality':'singleValue'}, ns_prefix = 'contrail')
-
- if (existing_metas and 'physical-router-vendor-name' in existing_metas and
- str(existing_metas['physical-router-vendor-name'][0]['meta']) == str(meta)):
- # no change
- pass
- else:
- self._update_id_self_meta(update, meta)
-
- field = obj_dict.get('physical_router_product_name', None)
- if field is not None:
- norm_str = escape(str(obj_dict['physical_router_product_name']))
- meta = Metadata('physical-router-product-name', norm_str,
- {'ifmap-cardinality':'singleValue'}, ns_prefix = 'contrail')
-
- if (existing_metas and 'physical-router-product-name' in existing_metas and
- str(existing_metas['physical-router-product-name'][0]['meta']) == str(meta)):
- # no change
- pass
- else:
- self._update_id_self_meta(update, meta)
-
- field = obj_dict.get('physical_router_vnc_managed', None)
- if field is not None:
- norm_str = escape(str(obj_dict['physical_router_vnc_managed']))
- meta = Metadata('physical-router-vnc-managed', norm_str,
- {'ifmap-cardinality':'singleValue'}, ns_prefix = 'contrail')
-
- if (existing_metas and 'physical-router-vnc-managed' in existing_metas and
- str(existing_metas['physical-router-vnc-managed'][0]['meta']) == str(meta)):
- # no change
- pass
- else:
- self._update_id_self_meta(update, meta)
-
- field = obj_dict.get('physical_router_user_credentials', None)
- if field is not None:
- # construct object of xsd-type and get its xml repr
- field = obj_dict['physical_router_user_credentials']
- buf = cStringIO.StringIO()
- # perms might be inserted at server as obj.
- # obj construction diff from dict construction.
- if isinstance(field, dict):
- UserCredentials(**field).exportChildren(buf, level = 1, name_ = 'physical-router-user-credentials', pretty_print = False)
- else: # object
- field.exportChildren(buf, level = 1, name_ = 'physical-router-user-credentials', pretty_print = False)
- physical_router_user_credentials_xml = buf.getvalue()
- buf.close()
- meta = Metadata('physical-router-user-credentials' , '',
- {'ifmap-cardinality':'singleValue'}, ns_prefix = 'contrail',
- elements = physical_router_user_credentials_xml)
-
- if (existing_metas and 'physical-router-user-credentials' in existing_metas and
- str(existing_metas['physical-router-user-credentials'][0]['meta']) == str(meta)):
- # no change
- pass
- else:
- self._update_id_self_meta(update, meta)
-
- field = obj_dict.get('physical_router_snmp_credentials', None)
- if field is not None:
- # construct object of xsd-type and get its xml repr
- field = obj_dict['physical_router_snmp_credentials']
- buf = cStringIO.StringIO()
- # perms might be inserted at server as obj.
- # obj construction diff from dict construction.
- if isinstance(field, dict):
- SNMPCredentials(**field).exportChildren(buf, level = 1, name_ = 'physical-router-snmp-credentials', pretty_print = False)
- else: # object
- field.exportChildren(buf, level = 1, name_ = 'physical-router-snmp-credentials', pretty_print = False)
- physical_router_snmp_credentials_xml = buf.getvalue()
- buf.close()
- meta = Metadata('physical-router-snmp-credentials' , '',
- {'ifmap-cardinality':'singleValue'}, ns_prefix = 'contrail',
- elements = physical_router_snmp_credentials_xml)
-
- if (existing_metas and 'physical-router-snmp-credentials' in existing_metas and
- str(existing_metas['physical-router-snmp-credentials'][0]['meta']) == str(meta)):
- # no change
- pass
- else:
- self._update_id_self_meta(update, meta)
-
- field = obj_dict.get('physical_router_junos_service_ports', None)
- if field is not None:
- # construct object of xsd-type and get its xml repr
- field = obj_dict['physical_router_junos_service_ports']
- buf = cStringIO.StringIO()
- # perms might be inserted at server as obj.
- # obj construction diff from dict construction.
- if isinstance(field, dict):
- JunosServicePorts(**field).exportChildren(buf, level = 1, name_ = 'physical-router-junos-service-ports', pretty_print = False)
- else: # object
- field.exportChildren(buf, level = 1, name_ = 'physical-router-junos-service-ports', pretty_print = False)
- physical_router_junos_service_ports_xml = buf.getvalue()
- buf.close()
- meta = Metadata('physical-router-junos-service-ports' , '',
- {'ifmap-cardinality':'singleValue'}, ns_prefix = 'contrail',
- elements = physical_router_junos_service_ports_xml)
-
- if (existing_metas and 'physical-router-junos-service-ports' in existing_metas and
- str(existing_metas['physical-router-junos-service-ports'][0]['meta']) == str(meta)):
- # no change
- pass
- else:
- self._update_id_self_meta(update, meta)
-
- field = obj_dict.get('id_perms', None)
- if field is not None:
- # construct object of xsd-type and get its xml repr
- field = obj_dict['id_perms']
- buf = cStringIO.StringIO()
- # perms might be inserted at server as obj.
- # obj construction diff from dict construction.
- if isinstance(field, dict):
- IdPermsType(**field).exportChildren(buf, level = 1, name_ = 'id-perms', pretty_print = False)
- else: # object
- field.exportChildren(buf, level = 1, name_ = 'id-perms', pretty_print = False)
- id_perms_xml = buf.getvalue()
- buf.close()
- meta = Metadata('id-perms' , '',
- {'ifmap-cardinality':'singleValue'}, ns_prefix = 'contrail',
- elements = id_perms_xml)
-
- if (existing_metas and 'id-perms' in existing_metas and
- str(existing_metas['id-perms'][0]['meta']) == str(meta)):
- # no change
- pass
- else:
- self._update_id_self_meta(update, meta)
-
- field = obj_dict.get('display_name', None)
- if field is not None:
- norm_str = escape(str(obj_dict['display_name']))
- meta = Metadata('display-name', norm_str,
- {'ifmap-cardinality':'singleValue'}, ns_prefix = 'contrail')
-
- if (existing_metas and 'display-name' in existing_metas and
- str(existing_metas['display-name'][0]['meta']) == str(meta)):
- # no change
- pass
- else:
- self._update_id_self_meta(update, meta)
-
- # Ref Link Metas
- imid = self._imid_handler
-
- # construct object of xsd-type and get its xml repr
- refs = obj_dict.get('virtual_router_refs', None)
- if refs:
- for ref in refs:
- ref_fq_name = ref['to']
- obj_type = 'virtual-router'
- to_imid = cfgm_common.imid.get_ifmap_id_from_fq_name(obj_type, ref_fq_name)
- physical_router_virtual_router_xml = ''
- meta = Metadata('physical-router-virtual-router' , '',
- {'ifmap-cardinality':'singleValue'}, ns_prefix = 'contrail',
- elements = physical_router_virtual_router_xml)
- self._update_id_pair_meta(update, to_imid, meta)
-
- # construct object of xsd-type and get its xml repr
- refs = obj_dict.get('bgp_router_refs', None)
- if refs:
- for ref in refs:
- ref_fq_name = ref['to']
- obj_type = 'bgp-router'
- to_imid = cfgm_common.imid.get_ifmap_id_from_fq_name(obj_type, ref_fq_name)
- physical_router_bgp_router_xml = ''
- meta = Metadata('physical-router-bgp-router' , '',
- {'ifmap-cardinality':'singleValue'}, ns_prefix = 'contrail',
- elements = physical_router_bgp_router_xml)
- self._update_id_pair_meta(update, to_imid, meta)
-
- # construct object of xsd-type and get its xml repr
- refs = obj_dict.get('virtual_network_refs', None)
- if refs:
- for ref in refs:
- ref_fq_name = ref['to']
- obj_type = 'virtual-network'
- to_imid = cfgm_common.imid.get_ifmap_id_from_fq_name(obj_type, ref_fq_name)
- physical_router_virtual_network_xml = ''
- meta = Metadata('physical-router-virtual-network' , '',
- {'ifmap-cardinality':'singleValue'}, ns_prefix = 'contrail',
- elements = physical_router_virtual_network_xml)
- self._update_id_pair_meta(update, to_imid, meta)
-
- # construct object of xsd-type and get its xml repr
- refs = obj_dict.get('physical_interface_refs', None)
- if refs:
- for ref in refs:
- ref_fq_name = ref['to']
- obj_type = 'physical-interface'
- to_imid = cfgm_common.imid.get_ifmap_id_from_fq_name(obj_type, ref_fq_name)
- physical_router_physical_interface_xml = ''
- meta = Metadata('physical-router-physical-interface' , '',
- {'ifmap-cardinality':'singleValue'}, ns_prefix = 'contrail',
- elements = physical_router_physical_interface_xml)
- self._update_id_pair_meta(update, to_imid, meta)
-
- # construct object of xsd-type and get its xml repr
- refs = obj_dict.get('logical_interface_refs', None)
- if refs:
- for ref in refs:
- ref_fq_name = ref['to']
- obj_type = 'logical-interface'
- to_imid = cfgm_common.imid.get_ifmap_id_from_fq_name(obj_type, ref_fq_name)
- physical_router_logical_interface_xml = ''
- meta = Metadata('physical-router-logical-interface' , '',
- {'ifmap-cardinality':'singleValue'}, ns_prefix = 'contrail',
- elements = physical_router_logical_interface_xml)
- self._update_id_pair_meta(update, to_imid, meta)
-
-
- self._publish_update(my_imid, update)
- return (True, '')
- #end _ifmap_physical_router_set
-
- def _ifmap_physical_router_create(self, obj_ids, obj_dict):
- if not 'parent_type' in obj_dict:
- # parent is config-root
- parent_type = 'config-root'
- parent_imid = 'contrail:config-root:root'
- else:
- parent_type = obj_dict['parent_type']
- parent_imid = obj_ids.get('parent_imid', None)
-
- # Parent Link Meta
- update = {}
- parent_link_meta = self._parent_metas[parent_type]['physical-router']
- meta = Metadata(parent_link_meta, '',
- {'ifmap-cardinality':'singleValue'}, ns_prefix = 'contrail')
- self._update_id_pair_meta(update, obj_ids['imid'], meta)
- self._publish_update(parent_imid, update)
-
- (ok, result) = self._ifmap_physical_router_set(obj_ids['imid'], None, obj_dict)
- return (ok, result)
- #end _ifmap_physical_router_create
-
-
- def _ifmap_physical_router_read_to_meta_index(self, ifmap_id, field_names = None):
- # field_names = None means all fields will be read
- imid = self._imid_handler
- start_id = str(Identity(name = ifmap_id, type = 'other',
- other_type = 'extended'))
- # if id-perms missing, identity doesn't exist
-
- all_metas = [u'physical-router-virtual-router', u'physical-router-bgp-router', u'physical-router-virtual-network', u'physical-router-management-ip', u'physical-router-dataplane-ip', u'physical-router-vendor-name', u'physical-router-product-name', u'physical-router-vnc-managed', u'physical-router-user-credentials', u'physical-router-snmp-credentials', u'physical-router-junos-service-ports', u'id-perms', u'display-name', u'physical-router-physical-interface', u'physical-router-logical-interface']
- if not field_names:
- metas_to_read = all_metas
- else: # read only requested fields
- metas_to_read = set(all_metas) & set(field_names.keys())
-
- # metas is a dict where key is meta-name and val is list of dict
- # of form [{'meta':meta}, {'id':id1, 'meta':meta}, {'id':id2, 'meta':meta}]
- metas = {}
- for meta_name in metas_to_read:
- if meta_name in self._id_to_metas[ifmap_id]:
- metas[meta_name] = self._id_to_metas[ifmap_id][meta_name]
- return metas
- #end _ifmap_physical-router_read_to_meta_index
-
- def _ifmap_physical_router_update(self, ifmap_id, new_obj_dict):
- # read in refs from ifmap to determine which ones become inactive after update
- existing_metas = self._ifmap_physical_router_read_to_meta_index(ifmap_id)
-
- # remove properties that are no longer active
- props = ['physical-router-management-ip', 'physical-router-dataplane-ip', 'physical-router-vendor-name', 'physical-router-product-name', 'physical-router-vnc-managed', 'physical-router-user-credentials', 'physical-router-snmp-credentials', 'physical-router-junos-service-ports', 'id-perms', 'display-name']
- for prop in props:
- prop_m = prop.replace('-', '_')
- if prop in existing_metas and prop_m not in new_obj_dict:
- self._delete_id_self_meta(ifmap_id, 'contrail:'+prop)
- # remove refs that are no longer active
- delete_list = []
- refs = {'physical-router-virtual-router': 'virtual-router',
- 'physical-router-bgp-router': 'bgp-router',
- 'physical-router-virtual-network': 'virtual-network'}
- for meta, to_name in refs.items():
- old_set = set([m['id'] for m in existing_metas.get(meta, [])])
- new_set = set()
- to_name_m = to_name.replace('-', '_')
- for ref in new_obj_dict.get(to_name_m+'_refs', []):
- to_imid = self.fq_name_to_ifmap_id(to_name, ref['to'])
- new_set.add(to_imid)
-
- for inact_ref in old_set - new_set:
- delete_list.append((inact_ref, 'contrail:'+meta))
-
- if delete_list:
- self._delete_id_pair_meta_list(ifmap_id, delete_list)
-
- (ok, result) = self._ifmap_physical_router_set(ifmap_id, existing_metas, new_obj_dict)
- return (ok, result)
- #end _ifmap_physical_router_update
-
- def _ifmap_physical_router_delete(self, obj_ids):
- ifmap_id = obj_ids['imid']
- parent_imid = obj_ids.get('parent_imid', None)
- existing_metas = self._ifmap_physical_router_read_to_meta_index(ifmap_id)
- meta_list = []
- for meta_name, meta_infos in existing_metas.items():
- for meta_info in meta_infos:
- ref_imid = meta_info.get('id')
- if ref_imid is None:
- continue
- meta_list.append((ref_imid, 'contrail:'+meta_name))
-
- if parent_imid:
- # Remove link from parent
- meta_list.append((parent_imid, None))
-
- if meta_list:
- self._delete_id_pair_meta_list(ifmap_id, meta_list)
-
- # Remove all property metadata associated with this ident
- self._delete_id_self_meta(ifmap_id, None)
-
- return (True, '')
- #end _ifmap_physical_router_delete
-
- def _ifmap_bgp_router_alloc(self, parent_type, fq_name):
- imid = self._imid_handler
- (my_imid, parent_imid) = \
- imid.bgp_router_alloc_ifmap_id(parent_type, fq_name)
- if my_imid is None or parent_imid is None:
- return (False, (my_imid, parent_imid))
- return (True, (my_imid, parent_imid))
- #end _ifmap_bgp_router_alloc
-
- def _ifmap_bgp_router_set(self, my_imid, existing_metas, obj_dict):
- # Properties Meta
- update = {}
- field = obj_dict.get('bgp_router_parameters', None)
- if field is not None:
- # construct object of xsd-type and get its xml repr
- field = obj_dict['bgp_router_parameters']
- buf = cStringIO.StringIO()
- # perms might be inserted at server as obj.
- # obj construction diff from dict construction.
- if isinstance(field, dict):
- BgpRouterParams(**field).exportChildren(buf, level = 1, name_ = 'bgp-router-parameters', pretty_print = False)
- else: # object
- field.exportChildren(buf, level = 1, name_ = 'bgp-router-parameters', pretty_print = False)
- bgp_router_parameters_xml = buf.getvalue()
- buf.close()
- meta = Metadata('bgp-router-parameters' , '',
- {'ifmap-cardinality':'singleValue'}, ns_prefix = 'contrail',
- elements = bgp_router_parameters_xml)
-
- if (existing_metas and 'bgp-router-parameters' in existing_metas and
- str(existing_metas['bgp-router-parameters'][0]['meta']) == str(meta)):
- # no change
- pass
- else:
- self._update_id_self_meta(update, meta)
-
- field = obj_dict.get('id_perms', None)
- if field is not None:
- # construct object of xsd-type and get its xml repr
- field = obj_dict['id_perms']
- buf = cStringIO.StringIO()
- # perms might be inserted at server as obj.
- # obj construction diff from dict construction.
- if isinstance(field, dict):
- IdPermsType(**field).exportChildren(buf, level = 1, name_ = 'id-perms', pretty_print = False)
- else: # object
- field.exportChildren(buf, level = 1, name_ = 'id-perms', pretty_print = False)
- id_perms_xml = buf.getvalue()
- buf.close()
- meta = Metadata('id-perms' , '',
- {'ifmap-cardinality':'singleValue'}, ns_prefix = 'contrail',
- elements = id_perms_xml)
-
- if (existing_metas and 'id-perms' in existing_metas and
- str(existing_metas['id-perms'][0]['meta']) == str(meta)):
- # no change
- pass
- else:
- self._update_id_self_meta(update, meta)
-
- field = obj_dict.get('display_name', None)
- if field is not None:
- norm_str = escape(str(obj_dict['display_name']))
- meta = Metadata('display-name', norm_str,
- {'ifmap-cardinality':'singleValue'}, ns_prefix = 'contrail')
-
- if (existing_metas and 'display-name' in existing_metas and
- str(existing_metas['display-name'][0]['meta']) == str(meta)):
- # no change
- pass
- else:
- self._update_id_self_meta(update, meta)
-
- # Ref Link Metas
- imid = self._imid_handler
-
- # construct object of xsd-type and get its xml repr
- refs = obj_dict.get('bgp_router_refs', None)
- if refs:
- for ref in refs:
- ref_fq_name = ref['to']
- obj_type = 'bgp-router'
- to_imid = cfgm_common.imid.get_ifmap_id_from_fq_name(obj_type, ref_fq_name)
- bgp_peering_xml = ''
- ref_data = ref['attr']
- if ref_data:
- buf = cStringIO.StringIO()
- BgpPeeringAttributes(**ref_data).exportChildren(buf, level = 1, name_ = 'bgp-peering', pretty_print = False)
- bgp_peering_xml = bgp_peering_xml + buf.getvalue()
- buf.close()
- meta = Metadata('bgp-peering' , '',
- {'ifmap-cardinality':'singleValue'}, ns_prefix = 'contrail',
- elements = bgp_peering_xml)
- self._update_id_pair_meta(update, to_imid, meta)
-
-
- self._publish_update(my_imid, update)
- return (True, '')
- #end _ifmap_bgp_router_set
-
- def _ifmap_bgp_router_create(self, obj_ids, obj_dict):
- if not 'parent_type' in obj_dict:
- # parent is config-root
- parent_type = 'config-root'
- parent_imid = 'contrail:config-root:root'
- else:
- parent_type = obj_dict['parent_type']
- parent_imid = obj_ids.get('parent_imid', None)
-
- # Parent Link Meta
- update = {}
- parent_link_meta = self._parent_metas[parent_type]['bgp-router']
- meta = Metadata(parent_link_meta, '',
- {'ifmap-cardinality':'singleValue'}, ns_prefix = 'contrail')
- self._update_id_pair_meta(update, obj_ids['imid'], meta)
- self._publish_update(parent_imid, update)
-
- (ok, result) = self._ifmap_bgp_router_set(obj_ids['imid'], None, obj_dict)
- return (ok, result)
- #end _ifmap_bgp_router_create
-
-
- def _ifmap_bgp_router_read_to_meta_index(self, ifmap_id, field_names = None):
- # field_names = None means all fields will be read
- imid = self._imid_handler
- start_id = str(Identity(name = ifmap_id, type = 'other',
- other_type = 'extended'))
- # if id-perms missing, identity doesn't exist
-
- all_metas = [u'bgp-peering', u'global-system-config-bgp-router', u'physical-router-bgp-router', u'virtual-router-bgp-router', u'bgp-peering', u'bgp-router-parameters', u'id-perms', u'display-name']
- if not field_names:
- metas_to_read = all_metas
- else: # read only requested fields
- metas_to_read = set(all_metas) & set(field_names.keys())
-
- # metas is a dict where key is meta-name and val is list of dict
- # of form [{'meta':meta}, {'id':id1, 'meta':meta}, {'id':id2, 'meta':meta}]
- metas = {}
- for meta_name in metas_to_read:
- if meta_name in self._id_to_metas[ifmap_id]:
- metas[meta_name] = self._id_to_metas[ifmap_id][meta_name]
- return metas
- #end _ifmap_bgp-router_read_to_meta_index
-
- def _ifmap_bgp_router_update(self, ifmap_id, new_obj_dict):
- # read in refs from ifmap to determine which ones become inactive after update
- existing_metas = self._ifmap_bgp_router_read_to_meta_index(ifmap_id)
-
- # remove properties that are no longer active
- props = ['bgp-router-parameters', 'id-perms', 'display-name']
- for prop in props:
- prop_m = prop.replace('-', '_')
- if prop in existing_metas and prop_m not in new_obj_dict:
- self._delete_id_self_meta(ifmap_id, 'contrail:'+prop)
- # remove refs that are no longer active
- delete_list = []
- refs = {'bgp-peering': 'bgp-router'}
- for meta, to_name in refs.items():
- old_set = set([m['id'] for m in existing_metas.get(meta, [])])
- new_set = set()
- to_name_m = to_name.replace('-', '_')
- for ref in new_obj_dict.get(to_name_m+'_refs', []):
- to_imid = self.fq_name_to_ifmap_id(to_name, ref['to'])
- new_set.add(to_imid)
-
- for inact_ref in old_set - new_set:
- delete_list.append((inact_ref, 'contrail:'+meta))
-
- if delete_list:
- self._delete_id_pair_meta_list(ifmap_id, delete_list)
-
- (ok, result) = self._ifmap_bgp_router_set(ifmap_id, existing_metas, new_obj_dict)
- return (ok, result)
- #end _ifmap_bgp_router_update
-
- def _ifmap_bgp_router_delete(self, obj_ids):
- ifmap_id = obj_ids['imid']
- parent_imid = obj_ids.get('parent_imid', None)
- existing_metas = self._ifmap_bgp_router_read_to_meta_index(ifmap_id)
- meta_list = []
- for meta_name, meta_infos in existing_metas.items():
- for meta_info in meta_infos:
- ref_imid = meta_info.get('id')
- if ref_imid is None:
- continue
- meta_list.append((ref_imid, 'contrail:'+meta_name))
-
- if parent_imid:
- # Remove link from parent
- meta_list.append((parent_imid, None))
-
- if meta_list:
- self._delete_id_pair_meta_list(ifmap_id, meta_list)
-
- # Remove all property metadata associated with this ident
- self._delete_id_self_meta(ifmap_id, None)
-
- return (True, '')
- #end _ifmap_bgp_router_delete
-
- def _ifmap_virtual_router_alloc(self, parent_type, fq_name):
- imid = self._imid_handler
- (my_imid, parent_imid) = \
- imid.virtual_router_alloc_ifmap_id(parent_type, fq_name)
- if my_imid is None or parent_imid is None:
- return (False, (my_imid, parent_imid))
- return (True, (my_imid, parent_imid))
- #end _ifmap_virtual_router_alloc
-
- def _ifmap_virtual_router_set(self, my_imid, existing_metas, obj_dict):
- # Properties Meta
- update = {}
- field = obj_dict.get('virtual_router_type', None)
- if field is not None:
- norm_str = escape(str(obj_dict['virtual_router_type']))
- meta = Metadata('virtual-router-type', norm_str,
- {'ifmap-cardinality':'singleValue'}, ns_prefix = 'contrail')
-
- if (existing_metas and 'virtual-router-type' in existing_metas and
- str(existing_metas['virtual-router-type'][0]['meta']) == str(meta)):
- # no change
- pass
- else:
- self._update_id_self_meta(update, meta)
-
- field = obj_dict.get('virtual_router_ip_address', None)
- if field is not None:
- norm_str = escape(str(obj_dict['virtual_router_ip_address']))
- meta = Metadata('virtual-router-ip-address', norm_str,
- {'ifmap-cardinality':'singleValue'}, ns_prefix = 'contrail')
-
- if (existing_metas and 'virtual-router-ip-address' in existing_metas and
- str(existing_metas['virtual-router-ip-address'][0]['meta']) == str(meta)):
- # no change
- pass
- else:
- self._update_id_self_meta(update, meta)
-
- field = obj_dict.get('id_perms', None)
- if field is not None:
- # construct object of xsd-type and get its xml repr
- field = obj_dict['id_perms']
- buf = cStringIO.StringIO()
- # perms might be inserted at server as obj.
- # obj construction diff from dict construction.
- if isinstance(field, dict):
- IdPermsType(**field).exportChildren(buf, level = 1, name_ = 'id-perms', pretty_print = False)
- else: # object
- field.exportChildren(buf, level = 1, name_ = 'id-perms', pretty_print = False)
- id_perms_xml = buf.getvalue()
- buf.close()
- meta = Metadata('id-perms' , '',
- {'ifmap-cardinality':'singleValue'}, ns_prefix = 'contrail',
- elements = id_perms_xml)
-
- if (existing_metas and 'id-perms' in existing_metas and
- str(existing_metas['id-perms'][0]['meta']) == str(meta)):
- # no change
- pass
- else:
- self._update_id_self_meta(update, meta)
-
- field = obj_dict.get('display_name', None)
- if field is not None:
- norm_str = escape(str(obj_dict['display_name']))
- meta = Metadata('display-name', norm_str,
- {'ifmap-cardinality':'singleValue'}, ns_prefix = 'contrail')
-
- if (existing_metas and 'display-name' in existing_metas and
- str(existing_metas['display-name'][0]['meta']) == str(meta)):
- # no change
- pass
- else:
- self._update_id_self_meta(update, meta)
-
- # Ref Link Metas
- imid = self._imid_handler
-
- # construct object of xsd-type and get its xml repr
- refs = obj_dict.get('bgp_router_refs', None)
- if refs:
- for ref in refs:
- ref_fq_name = ref['to']
- obj_type = 'bgp-router'
- to_imid = cfgm_common.imid.get_ifmap_id_from_fq_name(obj_type, ref_fq_name)
- virtual_router_bgp_router_xml = ''
- meta = Metadata('virtual-router-bgp-router' , '',
- {'ifmap-cardinality':'singleValue'}, ns_prefix = 'contrail',
- elements = virtual_router_bgp_router_xml)
- self._update_id_pair_meta(update, to_imid, meta)
-
- # construct object of xsd-type and get its xml repr
- refs = obj_dict.get('virtual_machine_refs', None)
- if refs:
- for ref in refs:
- ref_fq_name = ref['to']
- obj_type = 'virtual-machine'
- to_imid = cfgm_common.imid.get_ifmap_id_from_fq_name(obj_type, ref_fq_name)
- virtual_router_virtual_machine_xml = ''
- meta = Metadata('virtual-router-virtual-machine' , '',
- {'ifmap-cardinality':'singleValue'}, ns_prefix = 'contrail',
- elements = virtual_router_virtual_machine_xml)
- self._update_id_pair_meta(update, to_imid, meta)
-
-
- self._publish_update(my_imid, update)
- return (True, '')
- #end _ifmap_virtual_router_set
-
- def _ifmap_virtual_router_create(self, obj_ids, obj_dict):
- if not 'parent_type' in obj_dict:
- # parent is config-root
- parent_type = 'config-root'
- parent_imid = 'contrail:config-root:root'
- else:
- parent_type = obj_dict['parent_type']
- parent_imid = obj_ids.get('parent_imid', None)
-
- # Parent Link Meta
- update = {}
- parent_link_meta = self._parent_metas[parent_type]['virtual-router']
- meta = Metadata(parent_link_meta, '',
- {'ifmap-cardinality':'singleValue'}, ns_prefix = 'contrail')
- self._update_id_pair_meta(update, obj_ids['imid'], meta)
- self._publish_update(parent_imid, update)
-
- (ok, result) = self._ifmap_virtual_router_set(obj_ids['imid'], None, obj_dict)
- return (ok, result)
- #end _ifmap_virtual_router_create
-
-
- def _ifmap_virtual_router_read_to_meta_index(self, ifmap_id, field_names = None):
- # field_names = None means all fields will be read
- imid = self._imid_handler
- start_id = str(Identity(name = ifmap_id, type = 'other',
- other_type = 'extended'))
- # if id-perms missing, identity doesn't exist
-
- all_metas = [u'virtual-router-bgp-router', u'virtual-router-virtual-machine', u'physical-router-virtual-router', u'provider-attachment-virtual-router', u'virtual-router-type', u'virtual-router-ip-address', u'id-perms', u'display-name']
- if not field_names:
- metas_to_read = all_metas
- else: # read only requested fields
- metas_to_read = set(all_metas) & set(field_names.keys())
-
- # metas is a dict where key is meta-name and val is list of dict
- # of form [{'meta':meta}, {'id':id1, 'meta':meta}, {'id':id2, 'meta':meta}]
- metas = {}
- for meta_name in metas_to_read:
- if meta_name in self._id_to_metas[ifmap_id]:
- metas[meta_name] = self._id_to_metas[ifmap_id][meta_name]
- return metas
- #end _ifmap_virtual-router_read_to_meta_index
-
- def _ifmap_virtual_router_update(self, ifmap_id, new_obj_dict):
- # read in refs from ifmap to determine which ones become inactive after update
- existing_metas = self._ifmap_virtual_router_read_to_meta_index(ifmap_id)
-
- # remove properties that are no longer active
- props = ['virtual-router-type', 'virtual-router-ip-address', 'id-perms', 'display-name']
- for prop in props:
- prop_m = prop.replace('-', '_')
- if prop in existing_metas and prop_m not in new_obj_dict:
- self._delete_id_self_meta(ifmap_id, 'contrail:'+prop)
- # remove refs that are no longer active
- delete_list = []
- refs = {'virtual-router-bgp-router': 'bgp-router',
- 'virtual-router-virtual-machine': 'virtual-machine'}
- for meta, to_name in refs.items():
- old_set = set([m['id'] for m in existing_metas.get(meta, [])])
- new_set = set()
- to_name_m = to_name.replace('-', '_')
- for ref in new_obj_dict.get(to_name_m+'_refs', []):
- to_imid = self.fq_name_to_ifmap_id(to_name, ref['to'])
- new_set.add(to_imid)
-
- for inact_ref in old_set - new_set:
- delete_list.append((inact_ref, 'contrail:'+meta))
-
- if delete_list:
- self._delete_id_pair_meta_list(ifmap_id, delete_list)
-
- (ok, result) = self._ifmap_virtual_router_set(ifmap_id, existing_metas, new_obj_dict)
- return (ok, result)
- #end _ifmap_virtual_router_update
-
- def _ifmap_virtual_router_delete(self, obj_ids):
- ifmap_id = obj_ids['imid']
- parent_imid = obj_ids.get('parent_imid', None)
- existing_metas = self._ifmap_virtual_router_read_to_meta_index(ifmap_id)
- meta_list = []
- for meta_name, meta_infos in existing_metas.items():
- for meta_info in meta_infos:
- ref_imid = meta_info.get('id')
- if ref_imid is None:
- continue
- meta_list.append((ref_imid, 'contrail:'+meta_name))
-
- if parent_imid:
- # Remove link from parent
- meta_list.append((parent_imid, None))
-
- if meta_list:
- self._delete_id_pair_meta_list(ifmap_id, meta_list)
-
- # Remove all property metadata associated with this ident
- self._delete_id_self_meta(ifmap_id, None)
-
- return (True, '')
- #end _ifmap_virtual_router_delete
-
- def _ifmap_config_root_alloc(self, parent_type, fq_name):
- imid = self._imid_handler
- (my_imid, parent_imid) = \
- imid.config_root_alloc_ifmap_id(parent_type, fq_name)
- if my_imid is None or parent_imid is None:
- return (False, (my_imid, parent_imid))
- return (True, (my_imid, parent_imid))
- #end _ifmap_config_root_alloc
-
- def _ifmap_config_root_set(self, my_imid, existing_metas, obj_dict):
- # Properties Meta
- update = {}
- field = obj_dict.get('id_perms', None)
- if field is not None:
- # construct object of xsd-type and get its xml repr
- field = obj_dict['id_perms']
- buf = cStringIO.StringIO()
- # perms might be inserted at server as obj.
- # obj construction diff from dict construction.
- if isinstance(field, dict):
- IdPermsType(**field).exportChildren(buf, level = 1, name_ = 'id-perms', pretty_print = False)
- else: # object
- field.exportChildren(buf, level = 1, name_ = 'id-perms', pretty_print = False)
- id_perms_xml = buf.getvalue()
- buf.close()
- meta = Metadata('id-perms' , '',
- {'ifmap-cardinality':'singleValue'}, ns_prefix = 'contrail',
- elements = id_perms_xml)
-
- if (existing_metas and 'id-perms' in existing_metas and
- str(existing_metas['id-perms'][0]['meta']) == str(meta)):
- # no change
- pass
- else:
- self._update_id_self_meta(update, meta)
-
- field = obj_dict.get('display_name', None)
- if field is not None:
- norm_str = escape(str(obj_dict['display_name']))
- meta = Metadata('display-name', norm_str,
- {'ifmap-cardinality':'singleValue'}, ns_prefix = 'contrail')
-
- if (existing_metas and 'display-name' in existing_metas and
- str(existing_metas['display-name'][0]['meta']) == str(meta)):
- # no change
- pass
- else:
- self._update_id_self_meta(update, meta)
-
- # Ref Link Metas
- imid = self._imid_handler
-
- # construct object of xsd-type and get its xml repr
- refs = obj_dict.get('global_system_config_refs', None)
- if refs:
- for ref in refs:
- ref_fq_name = ref['to']
- obj_type = 'global-system-config'
- to_imid = cfgm_common.imid.get_ifmap_id_from_fq_name(obj_type, ref_fq_name)
- config_root_global_system_config_xml = ''
- meta = Metadata('config-root-global-system-config' , '',
- {'ifmap-cardinality':'singleValue'}, ns_prefix = 'contrail',
- elements = config_root_global_system_config_xml)
- self._update_id_pair_meta(update, to_imid, meta)
-
- # construct object of xsd-type and get its xml repr
- refs = obj_dict.get('domain_refs', None)
- if refs:
- for ref in refs:
- ref_fq_name = ref['to']
- obj_type = 'domain'
- to_imid = cfgm_common.imid.get_ifmap_id_from_fq_name(obj_type, ref_fq_name)
- config_root_domain_xml = ''
- meta = Metadata('config-root-domain' , '',
- {'ifmap-cardinality':'singleValue'}, ns_prefix = 'contrail',
- elements = config_root_domain_xml)
- self._update_id_pair_meta(update, to_imid, meta)
-
-
- self._publish_update(my_imid, update)
- return (True, '')
- #end _ifmap_config_root_set
-
- def _ifmap_config_root_create(self, obj_ids, obj_dict):
- (ok, result) = self._ifmap_config_root_set(obj_ids['imid'], None, obj_dict)
- return (ok, result)
- #end _ifmap_config_root_create
-
-
- def _ifmap_config_root_read_to_meta_index(self, ifmap_id, field_names = None):
- # field_names = None means all fields will be read
- imid = self._imid_handler
- start_id = str(Identity(name = ifmap_id, type = 'other',
- other_type = 'extended'))
- # if id-perms missing, identity doesn't exist
-
- all_metas = [u'id-perms', u'display-name', u'config-root-global-system-config', u'config-root-domain']
- if not field_names:
- metas_to_read = all_metas
- else: # read only requested fields
- metas_to_read = set(all_metas) & set(field_names.keys())
-
- # metas is a dict where key is meta-name and val is list of dict
- # of form [{'meta':meta}, {'id':id1, 'meta':meta}, {'id':id2, 'meta':meta}]
- metas = {}
- for meta_name in metas_to_read:
- if meta_name in self._id_to_metas[ifmap_id]:
- metas[meta_name] = self._id_to_metas[ifmap_id][meta_name]
- return metas
- #end _ifmap_config-root_read_to_meta_index
-
- def _ifmap_config_root_update(self, ifmap_id, new_obj_dict):
- # read in refs from ifmap to determine which ones become inactive after update
- existing_metas = self._ifmap_config_root_read_to_meta_index(ifmap_id)
-
- # remove properties that are no longer active
- props = ['id-perms', 'display-name']
- for prop in props:
- prop_m = prop.replace('-', '_')
- if prop in existing_metas and prop_m not in new_obj_dict:
- self._delete_id_self_meta(ifmap_id, 'contrail:'+prop)
- # remove refs that are no longer active
- delete_list = []
- refs = {}
- for meta, to_name in refs.items():
- old_set = set([m['id'] for m in existing_metas.get(meta, [])])
- new_set = set()
- to_name_m = to_name.replace('-', '_')
- for ref in new_obj_dict.get(to_name_m+'_refs', []):
- to_imid = self.fq_name_to_ifmap_id(to_name, ref['to'])
- new_set.add(to_imid)
-
- for inact_ref in old_set - new_set:
- delete_list.append((inact_ref, 'contrail:'+meta))
-
- if delete_list:
- self._delete_id_pair_meta_list(ifmap_id, delete_list)
-
- (ok, result) = self._ifmap_config_root_set(ifmap_id, existing_metas, new_obj_dict)
- return (ok, result)
- #end _ifmap_config_root_update
-
- def _ifmap_config_root_delete(self, obj_ids):
- ifmap_id = obj_ids['imid']
- parent_imid = obj_ids.get('parent_imid', None)
- existing_metas = self._ifmap_config_root_read_to_meta_index(ifmap_id)
- meta_list = []
- for meta_name, meta_infos in existing_metas.items():
- for meta_info in meta_infos:
- ref_imid = meta_info.get('id')
- if ref_imid is None:
- continue
- meta_list.append((ref_imid, 'contrail:'+meta_name))
-
- if parent_imid:
- # Remove link from parent
- meta_list.append((parent_imid, None))
-
- if meta_list:
- self._delete_id_pair_meta_list(ifmap_id, meta_list)
-
- # Remove all property metadata associated with this ident
- self._delete_id_self_meta(ifmap_id, None)
-
- return (True, '')
- #end _ifmap_config_root_delete
-
- def _ifmap_subnet_alloc(self, parent_type, fq_name):
- imid = self._imid_handler
- (my_imid, parent_imid) = \
- imid.subnet_alloc_ifmap_id(parent_type, fq_name)
- if my_imid is None or parent_imid is None:
- return (False, (my_imid, parent_imid))
- return (True, (my_imid, parent_imid))
- #end _ifmap_subnet_alloc
-
- def _ifmap_subnet_set(self, my_imid, existing_metas, obj_dict):
- # Properties Meta
- update = {}
- field = obj_dict.get('subnet_ip_prefix', None)
- if field is not None:
- # construct object of xsd-type and get its xml repr
- field = obj_dict['subnet_ip_prefix']
- buf = cStringIO.StringIO()
- # perms might be inserted at server as obj.
- # obj construction diff from dict construction.
- if isinstance(field, dict):
- SubnetType(**field).exportChildren(buf, level = 1, name_ = 'subnet-ip-prefix', pretty_print = False)
- else: # object
- field.exportChildren(buf, level = 1, name_ = 'subnet-ip-prefix', pretty_print = False)
- subnet_ip_prefix_xml = buf.getvalue()
- buf.close()
- meta = Metadata('subnet-ip-prefix' , '',
- {'ifmap-cardinality':'singleValue'}, ns_prefix = 'contrail',
- elements = subnet_ip_prefix_xml)
-
- if (existing_metas and 'subnet-ip-prefix' in existing_metas and
- str(existing_metas['subnet-ip-prefix'][0]['meta']) == str(meta)):
- # no change
- pass
- else:
- self._update_id_self_meta(update, meta)
-
- field = obj_dict.get('id_perms', None)
- if field is not None:
- # construct object of xsd-type and get its xml repr
- field = obj_dict['id_perms']
- buf = cStringIO.StringIO()
- # perms might be inserted at server as obj.
- # obj construction diff from dict construction.
- if isinstance(field, dict):
- IdPermsType(**field).exportChildren(buf, level = 1, name_ = 'id-perms', pretty_print = False)
- else: # object
- field.exportChildren(buf, level = 1, name_ = 'id-perms', pretty_print = False)
- id_perms_xml = buf.getvalue()
- buf.close()
- meta = Metadata('id-perms' , '',
- {'ifmap-cardinality':'singleValue'}, ns_prefix = 'contrail',
- elements = id_perms_xml)
-
- if (existing_metas and 'id-perms' in existing_metas and
- str(existing_metas['id-perms'][0]['meta']) == str(meta)):
- # no change
- pass
- else:
- self._update_id_self_meta(update, meta)
-
- field = obj_dict.get('display_name', None)
- if field is not None:
- norm_str = escape(str(obj_dict['display_name']))
- meta = Metadata('display-name', norm_str,
- {'ifmap-cardinality':'singleValue'}, ns_prefix = 'contrail')
-
- if (existing_metas and 'display-name' in existing_metas and
- str(existing_metas['display-name'][0]['meta']) == str(meta)):
- # no change
- pass
- else:
- self._update_id_self_meta(update, meta)
-
- # Ref Link Metas
- imid = self._imid_handler
-
- # construct object of xsd-type and get its xml repr
- refs = obj_dict.get('virtual_machine_interface_refs', None)
- if refs:
- for ref in refs:
- ref_fq_name = ref['to']
- obj_type = 'virtual-machine-interface'
- to_imid = cfgm_common.imid.get_ifmap_id_from_fq_name(obj_type, ref_fq_name)
- subnet_virtual_machine_interface_xml = ''
- meta = Metadata('subnet-virtual-machine-interface' , '',
- {'ifmap-cardinality':'singleValue'}, ns_prefix = 'contrail',
- elements = subnet_virtual_machine_interface_xml)
- self._update_id_pair_meta(update, to_imid, meta)
-
-
- self._publish_update(my_imid, update)
- return (True, '')
- #end _ifmap_subnet_set
-
- def _ifmap_subnet_create(self, obj_ids, obj_dict):
- (ok, result) = self._ifmap_subnet_set(obj_ids['imid'], None, obj_dict)
- return (ok, result)
- #end _ifmap_subnet_create
-
-
- def _ifmap_subnet_read_to_meta_index(self, ifmap_id, field_names = None):
- # field_names = None means all fields will be read
- imid = self._imid_handler
- start_id = str(Identity(name = ifmap_id, type = 'other',
- other_type = 'extended'))
- # if id-perms missing, identity doesn't exist
-
- all_metas = [u'subnet-virtual-machine-interface', u'subnet-ip-prefix', u'id-perms', u'display-name']
- if not field_names:
- metas_to_read = all_metas
- else: # read only requested fields
- metas_to_read = set(all_metas) & set(field_names.keys())
-
- # metas is a dict where key is meta-name and val is list of dict
- # of form [{'meta':meta}, {'id':id1, 'meta':meta}, {'id':id2, 'meta':meta}]
- metas = {}
- for meta_name in metas_to_read:
- if meta_name in self._id_to_metas[ifmap_id]:
- metas[meta_name] = self._id_to_metas[ifmap_id][meta_name]
- return metas
- #end _ifmap_subnet_read_to_meta_index
-
- def _ifmap_subnet_update(self, ifmap_id, new_obj_dict):
- # read in refs from ifmap to determine which ones become inactive after update
- existing_metas = self._ifmap_subnet_read_to_meta_index(ifmap_id)
-
- # remove properties that are no longer active
- props = ['subnet-ip-prefix', 'id-perms', 'display-name']
- for prop in props:
- prop_m = prop.replace('-', '_')
- if prop in existing_metas and prop_m not in new_obj_dict:
- self._delete_id_self_meta(ifmap_id, 'contrail:'+prop)
- # remove refs that are no longer active
- delete_list = []
- refs = {'subnet-virtual-machine-interface': 'virtual-machine-interface'}
- for meta, to_name in refs.items():
- old_set = set([m['id'] for m in existing_metas.get(meta, [])])
- new_set = set()
- to_name_m = to_name.replace('-', '_')
- for ref in new_obj_dict.get(to_name_m+'_refs', []):
- to_imid = self.fq_name_to_ifmap_id(to_name, ref['to'])
- new_set.add(to_imid)
-
- for inact_ref in old_set - new_set:
- delete_list.append((inact_ref, 'contrail:'+meta))
-
- if delete_list:
- self._delete_id_pair_meta_list(ifmap_id, delete_list)
-
- (ok, result) = self._ifmap_subnet_set(ifmap_id, existing_metas, new_obj_dict)
- return (ok, result)
- #end _ifmap_subnet_update
-
- def _ifmap_subnet_delete(self, obj_ids):
- ifmap_id = obj_ids['imid']
- parent_imid = obj_ids.get('parent_imid', None)
- existing_metas = self._ifmap_subnet_read_to_meta_index(ifmap_id)
- meta_list = []
- for meta_name, meta_infos in existing_metas.items():
- for meta_info in meta_infos:
- ref_imid = meta_info.get('id')
- if ref_imid is None:
- continue
- meta_list.append((ref_imid, 'contrail:'+meta_name))
-
- if parent_imid:
- # Remove link from parent
- meta_list.append((parent_imid, None))
-
- if meta_list:
- self._delete_id_pair_meta_list(ifmap_id, meta_list)
-
- # Remove all property metadata associated with this ident
- self._delete_id_self_meta(ifmap_id, None)
-
- return (True, '')
- #end _ifmap_subnet_delete
-
- def _ifmap_global_system_config_alloc(self, parent_type, fq_name):
- imid = self._imid_handler
- (my_imid, parent_imid) = \
- imid.global_system_config_alloc_ifmap_id(parent_type, fq_name)
- if my_imid is None or parent_imid is None:
- return (False, (my_imid, parent_imid))
- return (True, (my_imid, parent_imid))
- #end _ifmap_global_system_config_alloc
-
- def _ifmap_global_system_config_set(self, my_imid, existing_metas, obj_dict):
- # Properties Meta
- update = {}
- field = obj_dict.get('autonomous_system', None)
- if field is not None:
- norm_str = escape(str(obj_dict['autonomous_system']))
- meta = Metadata('autonomous-system', norm_str,
- {'ifmap-cardinality':'singleValue'}, ns_prefix = 'contrail')
-
- if (existing_metas and 'autonomous-system' in existing_metas and
- str(existing_metas['autonomous-system'][0]['meta']) == str(meta)):
- # no change
- pass
- else:
- self._update_id_self_meta(update, meta)
-
- field = obj_dict.get('config_version', None)
- if field is not None:
- norm_str = escape(str(obj_dict['config_version']))
- meta = Metadata('config-version', norm_str,
- {'ifmap-cardinality':'singleValue'}, ns_prefix = 'contrail')
-
- if (existing_metas and 'config-version' in existing_metas and
- str(existing_metas['config-version'][0]['meta']) == str(meta)):
- # no change
- pass
- else:
- self._update_id_self_meta(update, meta)
-
- field = obj_dict.get('plugin_tuning', None)
- if field is not None:
- # construct object of xsd-type and get its xml repr
- field = obj_dict['plugin_tuning']
- buf = cStringIO.StringIO()
- # perms might be inserted at server as obj.
- # obj construction diff from dict construction.
- if isinstance(field, dict):
- PluginProperties(**field).exportChildren(buf, level = 1, name_ = 'plugin-tuning', pretty_print = False)
- else: # object
- field.exportChildren(buf, level = 1, name_ = 'plugin-tuning', pretty_print = False)
- plugin_tuning_xml = buf.getvalue()
- buf.close()
- meta = Metadata('plugin-tuning' , '',
- {'ifmap-cardinality':'singleValue'}, ns_prefix = 'contrail',
- elements = plugin_tuning_xml)
-
- if (existing_metas and 'plugin-tuning' in existing_metas and
- str(existing_metas['plugin-tuning'][0]['meta']) == str(meta)):
- # no change
- pass
- else:
- self._update_id_self_meta(update, meta)
-
- field = obj_dict.get('ibgp_auto_mesh', None)
- if field is not None:
- norm_str = escape(str(obj_dict['ibgp_auto_mesh']))
- meta = Metadata('ibgp-auto-mesh', norm_str,
- {'ifmap-cardinality':'singleValue'}, ns_prefix = 'contrail')
-
- if (existing_metas and 'ibgp-auto-mesh' in existing_metas and
- str(existing_metas['ibgp-auto-mesh'][0]['meta']) == str(meta)):
- # no change
- pass
- else:
- self._update_id_self_meta(update, meta)
-
- field = obj_dict.get('ip_fabric_subnets', None)
- if field is not None:
- # construct object of xsd-type and get its xml repr
- field = obj_dict['ip_fabric_subnets']
- buf = cStringIO.StringIO()
- # perms might be inserted at server as obj.
- # obj construction diff from dict construction.
- if isinstance(field, dict):
- SubnetListType(**field).exportChildren(buf, level = 1, name_ = 'ip-fabric-subnets', pretty_print = False)
- else: # object
- field.exportChildren(buf, level = 1, name_ = 'ip-fabric-subnets', pretty_print = False)
- ip_fabric_subnets_xml = buf.getvalue()
- buf.close()
- meta = Metadata('ip-fabric-subnets' , '',
- {'ifmap-cardinality':'singleValue'}, ns_prefix = 'contrail',
- elements = ip_fabric_subnets_xml)
-
- if (existing_metas and 'ip-fabric-subnets' in existing_metas and
- str(existing_metas['ip-fabric-subnets'][0]['meta']) == str(meta)):
- # no change
- pass
- else:
- self._update_id_self_meta(update, meta)
-
- field = obj_dict.get('id_perms', None)
- if field is not None:
- # construct object of xsd-type and get its xml repr
- field = obj_dict['id_perms']
- buf = cStringIO.StringIO()
- # perms might be inserted at server as obj.
- # obj construction diff from dict construction.
- if isinstance(field, dict):
- IdPermsType(**field).exportChildren(buf, level = 1, name_ = 'id-perms', pretty_print = False)
- else: # object
- field.exportChildren(buf, level = 1, name_ = 'id-perms', pretty_print = False)
- id_perms_xml = buf.getvalue()
- buf.close()
- meta = Metadata('id-perms' , '',
- {'ifmap-cardinality':'singleValue'}, ns_prefix = 'contrail',
- elements = id_perms_xml)
-
- if (existing_metas and 'id-perms' in existing_metas and
- str(existing_metas['id-perms'][0]['meta']) == str(meta)):
- # no change
- pass
- else:
- self._update_id_self_meta(update, meta)
-
- field = obj_dict.get('display_name', None)
- if field is not None:
- norm_str = escape(str(obj_dict['display_name']))
- meta = Metadata('display-name', norm_str,
- {'ifmap-cardinality':'singleValue'}, ns_prefix = 'contrail')
-
- if (existing_metas and 'display-name' in existing_metas and
- str(existing_metas['display-name'][0]['meta']) == str(meta)):
- # no change
- pass
- else:
- self._update_id_self_meta(update, meta)
-
- # Ref Link Metas
- imid = self._imid_handler
-
- # construct object of xsd-type and get its xml repr
- refs = obj_dict.get('bgp_router_refs', None)
- if refs:
- for ref in refs:
- ref_fq_name = ref['to']
- obj_type = 'bgp-router'
- to_imid = cfgm_common.imid.get_ifmap_id_from_fq_name(obj_type, ref_fq_name)
- global_system_config_bgp_router_xml = ''
- meta = Metadata('global-system-config-bgp-router' , '',
- {'ifmap-cardinality':'singleValue'}, ns_prefix = 'contrail',
- elements = global_system_config_bgp_router_xml)
- self._update_id_pair_meta(update, to_imid, meta)
-
- # construct object of xsd-type and get its xml repr
- refs = obj_dict.get('global_vrouter_config_refs', None)
- if refs:
- for ref in refs:
- ref_fq_name = ref['to']
- obj_type = 'global-vrouter-config'
- to_imid = cfgm_common.imid.get_ifmap_id_from_fq_name(obj_type, ref_fq_name)
- global_system_config_global_vrouter_config_xml = ''
- meta = Metadata('global-system-config-global-vrouter-config' , '',
- {'ifmap-cardinality':'singleValue'}, ns_prefix = 'contrail',
- elements = global_system_config_global_vrouter_config_xml)
- self._update_id_pair_meta(update, to_imid, meta)
-
- # construct object of xsd-type and get its xml repr
- refs = obj_dict.get('physical_router_refs', None)
- if refs:
- for ref in refs:
- ref_fq_name = ref['to']
- obj_type = 'physical-router'
- to_imid = cfgm_common.imid.get_ifmap_id_from_fq_name(obj_type, ref_fq_name)
- global_system_config_physical_router_xml = ''
- meta = Metadata('global-system-config-physical-router' , '',
- {'ifmap-cardinality':'singleValue'}, ns_prefix = 'contrail',
- elements = global_system_config_physical_router_xml)
- self._update_id_pair_meta(update, to_imid, meta)
-
- # construct object of xsd-type and get its xml repr
- refs = obj_dict.get('virtual_router_refs', None)
- if refs:
- for ref in refs:
- ref_fq_name = ref['to']
- obj_type = 'virtual-router'
- to_imid = cfgm_common.imid.get_ifmap_id_from_fq_name(obj_type, ref_fq_name)
- global_system_config_virtual_router_xml = ''
- meta = Metadata('global-system-config-virtual-router' , '',
- {'ifmap-cardinality':'singleValue'}, ns_prefix = 'contrail',
- elements = global_system_config_virtual_router_xml)
- self._update_id_pair_meta(update, to_imid, meta)
-
- # construct object of xsd-type and get its xml repr
- refs = obj_dict.get('config_node_refs', None)
- if refs:
- for ref in refs:
- ref_fq_name = ref['to']
- obj_type = 'config-node'
- to_imid = cfgm_common.imid.get_ifmap_id_from_fq_name(obj_type, ref_fq_name)
- global_system_config_config_node_xml = ''
- meta = Metadata('global-system-config-config-node' , '',
- {'ifmap-cardinality':'singleValue'}, ns_prefix = 'contrail',
- elements = global_system_config_config_node_xml)
- self._update_id_pair_meta(update, to_imid, meta)
-
- # construct object of xsd-type and get its xml repr
- refs = obj_dict.get('analytics_node_refs', None)
- if refs:
- for ref in refs:
- ref_fq_name = ref['to']
- obj_type = 'analytics-node'
- to_imid = cfgm_common.imid.get_ifmap_id_from_fq_name(obj_type, ref_fq_name)
- global_system_config_analytics_node_xml = ''
- meta = Metadata('global-system-config-analytics-node' , '',
- {'ifmap-cardinality':'singleValue'}, ns_prefix = 'contrail',
- elements = global_system_config_analytics_node_xml)
- self._update_id_pair_meta(update, to_imid, meta)
-
- # construct object of xsd-type and get its xml repr
- refs = obj_dict.get('database_node_refs', None)
- if refs:
- for ref in refs:
- ref_fq_name = ref['to']
- obj_type = 'database-node'
- to_imid = cfgm_common.imid.get_ifmap_id_from_fq_name(obj_type, ref_fq_name)
- global_system_config_database_node_xml = ''
- meta = Metadata('global-system-config-database-node' , '',
- {'ifmap-cardinality':'singleValue'}, ns_prefix = 'contrail',
- elements = global_system_config_database_node_xml)
- self._update_id_pair_meta(update, to_imid, meta)
-
- # construct object of xsd-type and get its xml repr
- refs = obj_dict.get('service_appliance_set_refs', None)
- if refs:
- for ref in refs:
- ref_fq_name = ref['to']
- obj_type = 'service-appliance-set'
- to_imid = cfgm_common.imid.get_ifmap_id_from_fq_name(obj_type, ref_fq_name)
- global_system_config_service_appliance_set_xml = ''
- meta = Metadata('global-system-config-service-appliance-set' , '',
- {'ifmap-cardinality':'singleValue'}, ns_prefix = 'contrail',
- elements = global_system_config_service_appliance_set_xml)
- self._update_id_pair_meta(update, to_imid, meta)
-
-
- self._publish_update(my_imid, update)
- return (True, '')
- #end _ifmap_global_system_config_set
-
- def _ifmap_global_system_config_create(self, obj_ids, obj_dict):
- if not 'parent_type' in obj_dict:
- # parent is config-root
- parent_type = 'config-root'
- parent_imid = 'contrail:config-root:root'
- else:
- parent_type = obj_dict['parent_type']
- parent_imid = obj_ids.get('parent_imid', None)
-
- # Parent Link Meta
- update = {}
- parent_link_meta = self._parent_metas[parent_type]['global-system-config']
- meta = Metadata(parent_link_meta, '',
- {'ifmap-cardinality':'singleValue'}, ns_prefix = 'contrail')
- self._update_id_pair_meta(update, obj_ids['imid'], meta)
- self._publish_update(parent_imid, update)
-
- (ok, result) = self._ifmap_global_system_config_set(obj_ids['imid'], None, obj_dict)
- return (ok, result)
- #end _ifmap_global_system_config_create
-
-
- def _ifmap_global_system_config_read_to_meta_index(self, ifmap_id, field_names = None):
- # field_names = None means all fields will be read
- imid = self._imid_handler
- start_id = str(Identity(name = ifmap_id, type = 'other',
- other_type = 'extended'))
- # if id-perms missing, identity doesn't exist
-
- all_metas = [u'global-system-config-bgp-router', u'autonomous-system', u'config-version', u'plugin-tuning', u'ibgp-auto-mesh', u'ip-fabric-subnets', u'id-perms', u'display-name', u'global-system-config-global-vrouter-config', u'global-system-config-physical-router', u'global-system-config-virtual-router', u'global-system-config-config-node', u'global-system-config-analytics-node', u'global-system-config-database-node', u'global-system-config-service-appliance-set']
- if not field_names:
- metas_to_read = all_metas
- else: # read only requested fields
- metas_to_read = set(all_metas) & set(field_names.keys())
-
- # metas is a dict where key is meta-name and val is list of dict
- # of form [{'meta':meta}, {'id':id1, 'meta':meta}, {'id':id2, 'meta':meta}]
- metas = {}
- for meta_name in metas_to_read:
- if meta_name in self._id_to_metas[ifmap_id]:
- metas[meta_name] = self._id_to_metas[ifmap_id][meta_name]
- return metas
- #end _ifmap_global-system-config_read_to_meta_index
-
- def _ifmap_global_system_config_update(self, ifmap_id, new_obj_dict):
- # read in refs from ifmap to determine which ones become inactive after update
- existing_metas = self._ifmap_global_system_config_read_to_meta_index(ifmap_id)
-
- # remove properties that are no longer active
- props = ['autonomous-system', 'config-version', 'plugin-tuning', 'ibgp-auto-mesh', 'ip-fabric-subnets', 'id-perms', 'display-name']
- for prop in props:
- prop_m = prop.replace('-', '_')
- if prop in existing_metas and prop_m not in new_obj_dict:
- self._delete_id_self_meta(ifmap_id, 'contrail:'+prop)
- # remove refs that are no longer active
- delete_list = []
- refs = {'global-system-config-bgp-router': 'bgp-router'}
- for meta, to_name in refs.items():
- old_set = set([m['id'] for m in existing_metas.get(meta, [])])
- new_set = set()
- to_name_m = to_name.replace('-', '_')
- for ref in new_obj_dict.get(to_name_m+'_refs', []):
- to_imid = self.fq_name_to_ifmap_id(to_name, ref['to'])
- new_set.add(to_imid)
-
- for inact_ref in old_set - new_set:
- delete_list.append((inact_ref, 'contrail:'+meta))
-
- if delete_list:
- self._delete_id_pair_meta_list(ifmap_id, delete_list)
-
- (ok, result) = self._ifmap_global_system_config_set(ifmap_id, existing_metas, new_obj_dict)
- return (ok, result)
- #end _ifmap_global_system_config_update
-
- def _ifmap_global_system_config_delete(self, obj_ids):
- ifmap_id = obj_ids['imid']
- parent_imid = obj_ids.get('parent_imid', None)
- existing_metas = self._ifmap_global_system_config_read_to_meta_index(ifmap_id)
- meta_list = []
- for meta_name, meta_infos in existing_metas.items():
- for meta_info in meta_infos:
- ref_imid = meta_info.get('id')
- if ref_imid is None:
- continue
- meta_list.append((ref_imid, 'contrail:'+meta_name))
-
- if parent_imid:
- # Remove link from parent
- meta_list.append((parent_imid, None))
-
- if meta_list:
- self._delete_id_pair_meta_list(ifmap_id, meta_list)
-
- # Remove all property metadata associated with this ident
- self._delete_id_self_meta(ifmap_id, None)
-
- return (True, '')
- #end _ifmap_global_system_config_delete
-
- def _ifmap_service_appliance_alloc(self, parent_type, fq_name):
- imid = self._imid_handler
- (my_imid, parent_imid) = \
- imid.service_appliance_alloc_ifmap_id(parent_type, fq_name)
- if my_imid is None or parent_imid is None:
- return (False, (my_imid, parent_imid))
- return (True, (my_imid, parent_imid))
- #end _ifmap_service_appliance_alloc
-
- def _ifmap_service_appliance_set(self, my_imid, existing_metas, obj_dict):
- # Properties Meta
- update = {}
- field = obj_dict.get('service_appliance_user_credentials', None)
- if field is not None:
- # construct object of xsd-type and get its xml repr
- field = obj_dict['service_appliance_user_credentials']
- buf = cStringIO.StringIO()
- # perms might be inserted at server as obj.
- # obj construction diff from dict construction.
- if isinstance(field, dict):
- UserCredentials(**field).exportChildren(buf, level = 1, name_ = 'service-appliance-user-credentials', pretty_print = False)
- else: # object
- field.exportChildren(buf, level = 1, name_ = 'service-appliance-user-credentials', pretty_print = False)
- service_appliance_user_credentials_xml = buf.getvalue()
- buf.close()
- meta = Metadata('service-appliance-user-credentials' , '',
- {'ifmap-cardinality':'singleValue'}, ns_prefix = 'contrail',
- elements = service_appliance_user_credentials_xml)
-
- if (existing_metas and 'service-appliance-user-credentials' in existing_metas and
- str(existing_metas['service-appliance-user-credentials'][0]['meta']) == str(meta)):
- # no change
- pass
- else:
- self._update_id_self_meta(update, meta)
-
- field = obj_dict.get('service_appliance_ip_address', None)
- if field is not None:
- norm_str = escape(str(obj_dict['service_appliance_ip_address']))
- meta = Metadata('service-appliance-ip-address', norm_str,
- {'ifmap-cardinality':'singleValue'}, ns_prefix = 'contrail')
-
- if (existing_metas and 'service-appliance-ip-address' in existing_metas and
- str(existing_metas['service-appliance-ip-address'][0]['meta']) == str(meta)):
- # no change
- pass
- else:
- self._update_id_self_meta(update, meta)
-
- field = obj_dict.get('service_appliance_properties', None)
- if field is not None:
- # construct object of xsd-type and get its xml repr
- field = obj_dict['service_appliance_properties']
- buf = cStringIO.StringIO()
- # perms might be inserted at server as obj.
- # obj construction diff from dict construction.
- if isinstance(field, dict):
- KeyValuePairs(**field).exportChildren(buf, level = 1, name_ = 'service-appliance-properties', pretty_print = False)
- else: # object
- field.exportChildren(buf, level = 1, name_ = 'service-appliance-properties', pretty_print = False)
- service_appliance_properties_xml = buf.getvalue()
- buf.close()
- meta = Metadata('service-appliance-properties' , '',
- {'ifmap-cardinality':'singleValue'}, ns_prefix = 'contrail',
- elements = service_appliance_properties_xml)
-
- if (existing_metas and 'service-appliance-properties' in existing_metas and
- str(existing_metas['service-appliance-properties'][0]['meta']) == str(meta)):
- # no change
- pass
- else:
- self._update_id_self_meta(update, meta)
-
- field = obj_dict.get('id_perms', None)
- if field is not None:
- # construct object of xsd-type and get its xml repr
- field = obj_dict['id_perms']
- buf = cStringIO.StringIO()
- # perms might be inserted at server as obj.
- # obj construction diff from dict construction.
- if isinstance(field, dict):
- IdPermsType(**field).exportChildren(buf, level = 1, name_ = 'id-perms', pretty_print = False)
- else: # object
- field.exportChildren(buf, level = 1, name_ = 'id-perms', pretty_print = False)
- id_perms_xml = buf.getvalue()
- buf.close()
- meta = Metadata('id-perms' , '',
- {'ifmap-cardinality':'singleValue'}, ns_prefix = 'contrail',
- elements = id_perms_xml)
-
- if (existing_metas and 'id-perms' in existing_metas and
- str(existing_metas['id-perms'][0]['meta']) == str(meta)):
- # no change
- pass
- else:
- self._update_id_self_meta(update, meta)
-
- field = obj_dict.get('display_name', None)
- if field is not None:
- norm_str = escape(str(obj_dict['display_name']))
- meta = Metadata('display-name', norm_str,
- {'ifmap-cardinality':'singleValue'}, ns_prefix = 'contrail')
-
- if (existing_metas and 'display-name' in existing_metas and
- str(existing_metas['display-name'][0]['meta']) == str(meta)):
- # no change
- pass
- else:
- self._update_id_self_meta(update, meta)
-
- # Ref Link Metas
- imid = self._imid_handler
-
-
- self._publish_update(my_imid, update)
- return (True, '')
- #end _ifmap_service_appliance_set
-
- def _ifmap_service_appliance_create(self, obj_ids, obj_dict):
- if not 'parent_type' in obj_dict:
- # parent is config-root
- parent_type = 'config-root'
- parent_imid = 'contrail:config-root:root'
- else:
- parent_type = obj_dict['parent_type']
- parent_imid = obj_ids.get('parent_imid', None)
-
- # Parent Link Meta
- update = {}
- parent_link_meta = self._parent_metas[parent_type]['service-appliance']
- meta = Metadata(parent_link_meta, '',
- {'ifmap-cardinality':'singleValue'}, ns_prefix = 'contrail')
- self._update_id_pair_meta(update, obj_ids['imid'], meta)
- self._publish_update(parent_imid, update)
-
- (ok, result) = self._ifmap_service_appliance_set(obj_ids['imid'], None, obj_dict)
- return (ok, result)
- #end _ifmap_service_appliance_create
-
-
- def _ifmap_service_appliance_read_to_meta_index(self, ifmap_id, field_names = None):
- # field_names = None means all fields will be read
- imid = self._imid_handler
- start_id = str(Identity(name = ifmap_id, type = 'other',
- other_type = 'extended'))
- # if id-perms missing, identity doesn't exist
-
- all_metas = [u'service-appliance-user-credentials', u'service-appliance-ip-address', u'service-appliance-properties', u'id-perms', u'display-name']
- if not field_names:
- metas_to_read = all_metas
- else: # read only requested fields
- metas_to_read = set(all_metas) & set(field_names.keys())
-
- # metas is a dict where key is meta-name and val is list of dict
- # of form [{'meta':meta}, {'id':id1, 'meta':meta}, {'id':id2, 'meta':meta}]
- metas = {}
- for meta_name in metas_to_read:
- if meta_name in self._id_to_metas[ifmap_id]:
- metas[meta_name] = self._id_to_metas[ifmap_id][meta_name]
- return metas
- #end _ifmap_service-appliance_read_to_meta_index
-
- def _ifmap_service_appliance_update(self, ifmap_id, new_obj_dict):
- # read in refs from ifmap to determine which ones become inactive after update
- existing_metas = self._ifmap_service_appliance_read_to_meta_index(ifmap_id)
-
- # remove properties that are no longer active
- props = ['service-appliance-user-credentials', 'service-appliance-ip-address', 'service-appliance-properties', 'id-perms', 'display-name']
- for prop in props:
- prop_m = prop.replace('-', '_')
- if prop in existing_metas and prop_m not in new_obj_dict:
- self._delete_id_self_meta(ifmap_id, 'contrail:'+prop)
- # remove refs that are no longer active
- delete_list = []
- refs = {}
- for meta, to_name in refs.items():
- old_set = set([m['id'] for m in existing_metas.get(meta, [])])
- new_set = set()
- to_name_m = to_name.replace('-', '_')
- for ref in new_obj_dict.get(to_name_m+'_refs', []):
- to_imid = self.fq_name_to_ifmap_id(to_name, ref['to'])
- new_set.add(to_imid)
-
- for inact_ref in old_set - new_set:
- delete_list.append((inact_ref, 'contrail:'+meta))
-
- if delete_list:
- self._delete_id_pair_meta_list(ifmap_id, delete_list)
-
- (ok, result) = self._ifmap_service_appliance_set(ifmap_id, existing_metas, new_obj_dict)
- return (ok, result)
- #end _ifmap_service_appliance_update
-
- def _ifmap_service_appliance_delete(self, obj_ids):
- ifmap_id = obj_ids['imid']
- parent_imid = obj_ids.get('parent_imid', None)
- existing_metas = self._ifmap_service_appliance_read_to_meta_index(ifmap_id)
- meta_list = []
- for meta_name, meta_infos in existing_metas.items():
- for meta_info in meta_infos:
- ref_imid = meta_info.get('id')
- if ref_imid is None:
- continue
- meta_list.append((ref_imid, 'contrail:'+meta_name))
-
- if parent_imid:
- # Remove link from parent
- meta_list.append((parent_imid, None))
-
- if meta_list:
- self._delete_id_pair_meta_list(ifmap_id, meta_list)
-
- # Remove all property metadata associated with this ident
- self._delete_id_self_meta(ifmap_id, None)
-
- return (True, '')
- #end _ifmap_service_appliance_delete
-
- def _ifmap_service_instance_alloc(self, parent_type, fq_name):
- imid = self._imid_handler
- (my_imid, parent_imid) = \
- imid.service_instance_alloc_ifmap_id(parent_type, fq_name)
- if my_imid is None or parent_imid is None:
- return (False, (my_imid, parent_imid))
- return (True, (my_imid, parent_imid))
- #end _ifmap_service_instance_alloc
-
- def _ifmap_service_instance_set(self, my_imid, existing_metas, obj_dict):
- # Properties Meta
- update = {}
- field = obj_dict.get('service_instance_properties', None)
- if field is not None:
- # construct object of xsd-type and get its xml repr
- field = obj_dict['service_instance_properties']
- buf = cStringIO.StringIO()
- # perms might be inserted at server as obj.
- # obj construction diff from dict construction.
- if isinstance(field, dict):
- ServiceInstanceType(**field).exportChildren(buf, level = 1, name_ = 'service-instance-properties', pretty_print = False)
- else: # object
- field.exportChildren(buf, level = 1, name_ = 'service-instance-properties', pretty_print = False)
- service_instance_properties_xml = buf.getvalue()
- buf.close()
- meta = Metadata('service-instance-properties' , '',
- {'ifmap-cardinality':'singleValue'}, ns_prefix = 'contrail',
- elements = service_instance_properties_xml)
-
- if (existing_metas and 'service-instance-properties' in existing_metas and
- str(existing_metas['service-instance-properties'][0]['meta']) == str(meta)):
- # no change
- pass
- else:
- self._update_id_self_meta(update, meta)
-
- field = obj_dict.get('id_perms', None)
- if field is not None:
- # construct object of xsd-type and get its xml repr
- field = obj_dict['id_perms']
- buf = cStringIO.StringIO()
- # perms might be inserted at server as obj.
- # obj construction diff from dict construction.
- if isinstance(field, dict):
- IdPermsType(**field).exportChildren(buf, level = 1, name_ = 'id-perms', pretty_print = False)
- else: # object
- field.exportChildren(buf, level = 1, name_ = 'id-perms', pretty_print = False)
- id_perms_xml = buf.getvalue()
- buf.close()
- meta = Metadata('id-perms' , '',
- {'ifmap-cardinality':'singleValue'}, ns_prefix = 'contrail',
- elements = id_perms_xml)
-
- if (existing_metas and 'id-perms' in existing_metas and
- str(existing_metas['id-perms'][0]['meta']) == str(meta)):
- # no change
- pass
- else:
- self._update_id_self_meta(update, meta)
-
- field = obj_dict.get('display_name', None)
- if field is not None:
- norm_str = escape(str(obj_dict['display_name']))
- meta = Metadata('display-name', norm_str,
- {'ifmap-cardinality':'singleValue'}, ns_prefix = 'contrail')
-
- if (existing_metas and 'display-name' in existing_metas and
- str(existing_metas['display-name'][0]['meta']) == str(meta)):
- # no change
- pass
- else:
- self._update_id_self_meta(update, meta)
-
- # Ref Link Metas
- imid = self._imid_handler
-
- # construct object of xsd-type and get its xml repr
- refs = obj_dict.get('service_template_refs', None)
- if refs:
- for ref in refs:
- ref_fq_name = ref['to']
- obj_type = 'service-template'
- to_imid = cfgm_common.imid.get_ifmap_id_from_fq_name(obj_type, ref_fq_name)
- service_instance_service_template_xml = ''
- meta = Metadata('service-instance-service-template' , '',
- {'ifmap-cardinality':'singleValue'}, ns_prefix = 'contrail',
- elements = service_instance_service_template_xml)
- self._update_id_pair_meta(update, to_imid, meta)
-
-
- self._publish_update(my_imid, update)
- return (True, '')
- #end _ifmap_service_instance_set
-
- def _ifmap_service_instance_create(self, obj_ids, obj_dict):
- if not 'parent_type' in obj_dict:
- # parent is config-root
- parent_type = 'config-root'
- parent_imid = 'contrail:config-root:root'
- else:
- parent_type = obj_dict['parent_type']
- parent_imid = obj_ids.get('parent_imid', None)
-
- # Parent Link Meta
- update = {}
- parent_link_meta = self._parent_metas[parent_type]['service-instance']
- meta = Metadata(parent_link_meta, '',
- {'ifmap-cardinality':'singleValue'}, ns_prefix = 'contrail')
- self._update_id_pair_meta(update, obj_ids['imid'], meta)
- self._publish_update(parent_imid, update)
-
- (ok, result) = self._ifmap_service_instance_set(obj_ids['imid'], None, obj_dict)
- return (ok, result)
- #end _ifmap_service_instance_create
-
-
- def _ifmap_service_instance_read_to_meta_index(self, ifmap_id, field_names = None):
- # field_names = None means all fields will be read
- imid = self._imid_handler
- start_id = str(Identity(name = ifmap_id, type = 'other',
- other_type = 'extended'))
- # if id-perms missing, identity doesn't exist
-
- all_metas = [u'service-instance-service-template', u'virtual-machine-service-instance', u'logical-router-service-instance', u'loadbalancer-pool-service-instance', u'service-instance-properties', u'id-perms', u'display-name']
- if not field_names:
- metas_to_read = all_metas
- else: # read only requested fields
- metas_to_read = set(all_metas) & set(field_names.keys())
-
- # metas is a dict where key is meta-name and val is list of dict
- # of form [{'meta':meta}, {'id':id1, 'meta':meta}, {'id':id2, 'meta':meta}]
- metas = {}
- for meta_name in metas_to_read:
- if meta_name in self._id_to_metas[ifmap_id]:
- metas[meta_name] = self._id_to_metas[ifmap_id][meta_name]
- return metas
- #end _ifmap_service-instance_read_to_meta_index
-
- def _ifmap_service_instance_update(self, ifmap_id, new_obj_dict):
- # read in refs from ifmap to determine which ones become inactive after update
- existing_metas = self._ifmap_service_instance_read_to_meta_index(ifmap_id)
-
- # remove properties that are no longer active
- props = ['service-instance-properties', 'id-perms', 'display-name']
- for prop in props:
- prop_m = prop.replace('-', '_')
- if prop in existing_metas and prop_m not in new_obj_dict:
- self._delete_id_self_meta(ifmap_id, 'contrail:'+prop)
- # remove refs that are no longer active
- delete_list = []
- refs = {'service-instance-service-template': 'service-template'}
- for meta, to_name in refs.items():
- old_set = set([m['id'] for m in existing_metas.get(meta, [])])
- new_set = set()
- to_name_m = to_name.replace('-', '_')
- for ref in new_obj_dict.get(to_name_m+'_refs', []):
- to_imid = self.fq_name_to_ifmap_id(to_name, ref['to'])
- new_set.add(to_imid)
-
- for inact_ref in old_set - new_set:
- delete_list.append((inact_ref, 'contrail:'+meta))
-
- if delete_list:
- self._delete_id_pair_meta_list(ifmap_id, delete_list)
-
- (ok, result) = self._ifmap_service_instance_set(ifmap_id, existing_metas, new_obj_dict)
- return (ok, result)
- #end _ifmap_service_instance_update
-
- def _ifmap_service_instance_delete(self, obj_ids):
- ifmap_id = obj_ids['imid']
- parent_imid = obj_ids.get('parent_imid', None)
- existing_metas = self._ifmap_service_instance_read_to_meta_index(ifmap_id)
- meta_list = []
- for meta_name, meta_infos in existing_metas.items():
- for meta_info in meta_infos:
- ref_imid = meta_info.get('id')
- if ref_imid is None:
- continue
- meta_list.append((ref_imid, 'contrail:'+meta_name))
-
- if parent_imid:
- # Remove link from parent
- meta_list.append((parent_imid, None))
-
- if meta_list:
- self._delete_id_pair_meta_list(ifmap_id, meta_list)
-
- # Remove all property metadata associated with this ident
- self._delete_id_self_meta(ifmap_id, None)
-
- return (True, '')
- #end _ifmap_service_instance_delete
-
- def _ifmap_namespace_alloc(self, parent_type, fq_name):
- imid = self._imid_handler
- (my_imid, parent_imid) = \
- imid.namespace_alloc_ifmap_id(parent_type, fq_name)
- if my_imid is None or parent_imid is None:
- return (False, (my_imid, parent_imid))
- return (True, (my_imid, parent_imid))
- #end _ifmap_namespace_alloc
-
- def _ifmap_namespace_set(self, my_imid, existing_metas, obj_dict):
- # Properties Meta
- update = {}
- field = obj_dict.get('namespace_cidr', None)
- if field is not None:
- # construct object of xsd-type and get its xml repr
- field = obj_dict['namespace_cidr']
- buf = cStringIO.StringIO()
- # perms might be inserted at server as obj.
- # obj construction diff from dict construction.
- if isinstance(field, dict):
- SubnetType(**field).exportChildren(buf, level = 1, name_ = 'namespace-cidr', pretty_print = False)
- else: # object
- field.exportChildren(buf, level = 1, name_ = 'namespace-cidr', pretty_print = False)
- namespace_cidr_xml = buf.getvalue()
- buf.close()
- meta = Metadata('namespace-cidr' , '',
- {'ifmap-cardinality':'singleValue'}, ns_prefix = 'contrail',
- elements = namespace_cidr_xml)
-
- if (existing_metas and 'namespace-cidr' in existing_metas and
- str(existing_metas['namespace-cidr'][0]['meta']) == str(meta)):
- # no change
- pass
- else:
- self._update_id_self_meta(update, meta)
-
- field = obj_dict.get('id_perms', None)
- if field is not None:
- # construct object of xsd-type and get its xml repr
- field = obj_dict['id_perms']
- buf = cStringIO.StringIO()
- # perms might be inserted at server as obj.
- # obj construction diff from dict construction.
- if isinstance(field, dict):
- IdPermsType(**field).exportChildren(buf, level = 1, name_ = 'id-perms', pretty_print = False)
- else: # object
- field.exportChildren(buf, level = 1, name_ = 'id-perms', pretty_print = False)
- id_perms_xml = buf.getvalue()
- buf.close()
- meta = Metadata('id-perms' , '',
- {'ifmap-cardinality':'singleValue'}, ns_prefix = 'contrail',
- elements = id_perms_xml)
-
- if (existing_metas and 'id-perms' in existing_metas and
- str(existing_metas['id-perms'][0]['meta']) == str(meta)):
- # no change
- pass
- else:
- self._update_id_self_meta(update, meta)
-
- field = obj_dict.get('display_name', None)
- if field is not None:
- norm_str = escape(str(obj_dict['display_name']))
- meta = Metadata('display-name', norm_str,
- {'ifmap-cardinality':'singleValue'}, ns_prefix = 'contrail')
-
- if (existing_metas and 'display-name' in existing_metas and
- str(existing_metas['display-name'][0]['meta']) == str(meta)):
- # no change
- pass
- else:
- self._update_id_self_meta(update, meta)
-
- # Ref Link Metas
- imid = self._imid_handler
-
-
- self._publish_update(my_imid, update)
- return (True, '')
- #end _ifmap_namespace_set
-
- def _ifmap_namespace_create(self, obj_ids, obj_dict):
- if not 'parent_type' in obj_dict:
- # parent is config-root
- parent_type = 'config-root'
- parent_imid = 'contrail:config-root:root'
- else:
- parent_type = obj_dict['parent_type']
- parent_imid = obj_ids.get('parent_imid', None)
-
- # Parent Link Meta
- update = {}
- parent_link_meta = self._parent_metas[parent_type]['namespace']
- meta = Metadata(parent_link_meta, '',
- {'ifmap-cardinality':'singleValue'}, ns_prefix = 'contrail')
- self._update_id_pair_meta(update, obj_ids['imid'], meta)
- self._publish_update(parent_imid, update)
-
- (ok, result) = self._ifmap_namespace_set(obj_ids['imid'], None, obj_dict)
- return (ok, result)
- #end _ifmap_namespace_create
-
-
- def _ifmap_namespace_read_to_meta_index(self, ifmap_id, field_names = None):
- # field_names = None means all fields will be read
- imid = self._imid_handler
- start_id = str(Identity(name = ifmap_id, type = 'other',
- other_type = 'extended'))
- # if id-perms missing, identity doesn't exist
-
- all_metas = [u'project-namespace', u'namespace-cidr', u'id-perms', u'display-name']
- if not field_names:
- metas_to_read = all_metas
- else: # read only requested fields
- metas_to_read = set(all_metas) & set(field_names.keys())
-
- # metas is a dict where key is meta-name and val is list of dict
- # of form [{'meta':meta}, {'id':id1, 'meta':meta}, {'id':id2, 'meta':meta}]
- metas = {}
- for meta_name in metas_to_read:
- if meta_name in self._id_to_metas[ifmap_id]:
- metas[meta_name] = self._id_to_metas[ifmap_id][meta_name]
- return metas
- #end _ifmap_namespace_read_to_meta_index
-
- def _ifmap_namespace_update(self, ifmap_id, new_obj_dict):
- # read in refs from ifmap to determine which ones become inactive after update
- existing_metas = self._ifmap_namespace_read_to_meta_index(ifmap_id)
-
- # remove properties that are no longer active
- props = ['namespace-cidr', 'id-perms', 'display-name']
- for prop in props:
- prop_m = prop.replace('-', '_')
- if prop in existing_metas and prop_m not in new_obj_dict:
- self._delete_id_self_meta(ifmap_id, 'contrail:'+prop)
- # remove refs that are no longer active
- delete_list = []
- refs = {}
- for meta, to_name in refs.items():
- old_set = set([m['id'] for m in existing_metas.get(meta, [])])
- new_set = set()
- to_name_m = to_name.replace('-', '_')
- for ref in new_obj_dict.get(to_name_m+'_refs', []):
- to_imid = self.fq_name_to_ifmap_id(to_name, ref['to'])
- new_set.add(to_imid)
-
- for inact_ref in old_set - new_set:
- delete_list.append((inact_ref, 'contrail:'+meta))
-
- if delete_list:
- self._delete_id_pair_meta_list(ifmap_id, delete_list)
-
- (ok, result) = self._ifmap_namespace_set(ifmap_id, existing_metas, new_obj_dict)
- return (ok, result)
- #end _ifmap_namespace_update
-
- def _ifmap_namespace_delete(self, obj_ids):
- ifmap_id = obj_ids['imid']
- parent_imid = obj_ids.get('parent_imid', None)
- existing_metas = self._ifmap_namespace_read_to_meta_index(ifmap_id)
- meta_list = []
- for meta_name, meta_infos in existing_metas.items():
- for meta_info in meta_infos:
- ref_imid = meta_info.get('id')
- if ref_imid is None:
- continue
- meta_list.append((ref_imid, 'contrail:'+meta_name))
-
- if parent_imid:
- # Remove link from parent
- meta_list.append((parent_imid, None))
-
- if meta_list:
- self._delete_id_pair_meta_list(ifmap_id, meta_list)
-
- # Remove all property metadata associated with this ident
- self._delete_id_self_meta(ifmap_id, None)
-
- return (True, '')
- #end _ifmap_namespace_delete
-
- def _ifmap_logical_interface_alloc(self, parent_type, fq_name):
- imid = self._imid_handler
- (my_imid, parent_imid) = \
- imid.logical_interface_alloc_ifmap_id(parent_type, fq_name)
- if my_imid is None or parent_imid is None:
- return (False, (my_imid, parent_imid))
- return (True, (my_imid, parent_imid))
- #end _ifmap_logical_interface_alloc
-
- def _ifmap_logical_interface_set(self, my_imid, existing_metas, obj_dict):
- # Properties Meta
- update = {}
- field = obj_dict.get('logical_interface_vlan_tag', None)
- if field is not None:
- norm_str = escape(str(obj_dict['logical_interface_vlan_tag']))
- meta = Metadata('logical-interface-vlan-tag', norm_str,
- {'ifmap-cardinality':'singleValue'}, ns_prefix = 'contrail')
-
- if (existing_metas and 'logical-interface-vlan-tag' in existing_metas and
- str(existing_metas['logical-interface-vlan-tag'][0]['meta']) == str(meta)):
- # no change
- pass
- else:
- self._update_id_self_meta(update, meta)
-
- field = obj_dict.get('logical_interface_type', None)
- if field is not None:
- norm_str = escape(str(obj_dict['logical_interface_type']))
- meta = Metadata('logical-interface-type', norm_str,
- {'ifmap-cardinality':'singleValue'}, ns_prefix = 'contrail')
-
- if (existing_metas and 'logical-interface-type' in existing_metas and
- str(existing_metas['logical-interface-type'][0]['meta']) == str(meta)):
- # no change
- pass
- else:
- self._update_id_self_meta(update, meta)
-
- field = obj_dict.get('id_perms', None)
- if field is not None:
- # construct object of xsd-type and get its xml repr
- field = obj_dict['id_perms']
- buf = cStringIO.StringIO()
- # perms might be inserted at server as obj.
- # obj construction diff from dict construction.
- if isinstance(field, dict):
- IdPermsType(**field).exportChildren(buf, level = 1, name_ = 'id-perms', pretty_print = False)
- else: # object
- field.exportChildren(buf, level = 1, name_ = 'id-perms', pretty_print = False)
- id_perms_xml = buf.getvalue()
- buf.close()
- meta = Metadata('id-perms' , '',
- {'ifmap-cardinality':'singleValue'}, ns_prefix = 'contrail',
- elements = id_perms_xml)
-
- if (existing_metas and 'id-perms' in existing_metas and
- str(existing_metas['id-perms'][0]['meta']) == str(meta)):
- # no change
- pass
- else:
- self._update_id_self_meta(update, meta)
-
- field = obj_dict.get('display_name', None)
- if field is not None:
- norm_str = escape(str(obj_dict['display_name']))
- meta = Metadata('display-name', norm_str,
- {'ifmap-cardinality':'singleValue'}, ns_prefix = 'contrail')
-
- if (existing_metas and 'display-name' in existing_metas and
- str(existing_metas['display-name'][0]['meta']) == str(meta)):
- # no change
- pass
- else:
- self._update_id_self_meta(update, meta)
-
- # Ref Link Metas
- imid = self._imid_handler
-
- # construct object of xsd-type and get its xml repr
- refs = obj_dict.get('virtual_machine_interface_refs', None)
- if refs:
- for ref in refs:
- ref_fq_name = ref['to']
- obj_type = 'virtual-machine-interface'
- to_imid = cfgm_common.imid.get_ifmap_id_from_fq_name(obj_type, ref_fq_name)
- logical_interface_virtual_machine_interface_xml = ''
- meta = Metadata('logical-interface-virtual-machine-interface' , '',
- {'ifmap-cardinality':'singleValue'}, ns_prefix = 'contrail',
- elements = logical_interface_virtual_machine_interface_xml)
- self._update_id_pair_meta(update, to_imid, meta)
-
-
- self._publish_update(my_imid, update)
- return (True, '')
- #end _ifmap_logical_interface_set
-
- def _ifmap_logical_interface_create(self, obj_ids, obj_dict):
- if not 'parent_type' in obj_dict:
- # parent is config-root
- parent_type = 'config-root'
- parent_imid = 'contrail:config-root:root'
- else:
- parent_type = obj_dict['parent_type']
- parent_imid = obj_ids.get('parent_imid', None)
-
- # Parent Link Meta
- update = {}
- parent_link_meta = self._parent_metas[parent_type]['logical-interface']
- meta = Metadata(parent_link_meta, '',
- {'ifmap-cardinality':'singleValue'}, ns_prefix = 'contrail')
- self._update_id_pair_meta(update, obj_ids['imid'], meta)
- self._publish_update(parent_imid, update)
-
- (ok, result) = self._ifmap_logical_interface_set(obj_ids['imid'], None, obj_dict)
- return (ok, result)
- #end _ifmap_logical_interface_create
-
-
- def _ifmap_logical_interface_read_to_meta_index(self, ifmap_id, field_names = None):
- # field_names = None means all fields will be read
- imid = self._imid_handler
- start_id = str(Identity(name = ifmap_id, type = 'other',
- other_type = 'extended'))
- # if id-perms missing, identity doesn't exist
-
- all_metas = [u'logical-interface-virtual-machine-interface', u'logical-interface-vlan-tag', u'logical-interface-type', u'id-perms', u'display-name']
- if not field_names:
- metas_to_read = all_metas
- else: # read only requested fields
- metas_to_read = set(all_metas) & set(field_names.keys())
-
- # metas is a dict where key is meta-name and val is list of dict
- # of form [{'meta':meta}, {'id':id1, 'meta':meta}, {'id':id2, 'meta':meta}]
- metas = {}
- for meta_name in metas_to_read:
- if meta_name in self._id_to_metas[ifmap_id]:
- metas[meta_name] = self._id_to_metas[ifmap_id][meta_name]
- return metas
- #end _ifmap_logical-interface_read_to_meta_index
-
- def _ifmap_logical_interface_update(self, ifmap_id, new_obj_dict):
- # read in refs from ifmap to determine which ones become inactive after update
- existing_metas = self._ifmap_logical_interface_read_to_meta_index(ifmap_id)
-
- # remove properties that are no longer active
- props = ['logical-interface-vlan-tag', 'logical-interface-type', 'id-perms', 'display-name']
- for prop in props:
- prop_m = prop.replace('-', '_')
- if prop in existing_metas and prop_m not in new_obj_dict:
- self._delete_id_self_meta(ifmap_id, 'contrail:'+prop)
- # remove refs that are no longer active
- delete_list = []
- refs = {'logical-interface-virtual-machine-interface': 'virtual-machine-interface'}
- for meta, to_name in refs.items():
- old_set = set([m['id'] for m in existing_metas.get(meta, [])])
- new_set = set()
- to_name_m = to_name.replace('-', '_')
- for ref in new_obj_dict.get(to_name_m+'_refs', []):
- to_imid = self.fq_name_to_ifmap_id(to_name, ref['to'])
- new_set.add(to_imid)
-
- for inact_ref in old_set - new_set:
- delete_list.append((inact_ref, 'contrail:'+meta))
-
- if delete_list:
- self._delete_id_pair_meta_list(ifmap_id, delete_list)
-
- (ok, result) = self._ifmap_logical_interface_set(ifmap_id, existing_metas, new_obj_dict)
- return (ok, result)
- #end _ifmap_logical_interface_update
-
- def _ifmap_logical_interface_delete(self, obj_ids):
- ifmap_id = obj_ids['imid']
- parent_imid = obj_ids.get('parent_imid', None)
- existing_metas = self._ifmap_logical_interface_read_to_meta_index(ifmap_id)
- meta_list = []
- for meta_name, meta_infos in existing_metas.items():
- for meta_info in meta_infos:
- ref_imid = meta_info.get('id')
- if ref_imid is None:
- continue
- meta_list.append((ref_imid, 'contrail:'+meta_name))
-
- if parent_imid:
- # Remove link from parent
- meta_list.append((parent_imid, None))
-
- if meta_list:
- self._delete_id_pair_meta_list(ifmap_id, meta_list)
-
- # Remove all property metadata associated with this ident
- self._delete_id_self_meta(ifmap_id, None)
-
- return (True, '')
- #end _ifmap_logical_interface_delete
-
- def _ifmap_route_table_alloc(self, parent_type, fq_name):
- imid = self._imid_handler
- (my_imid, parent_imid) = \
- imid.route_table_alloc_ifmap_id(parent_type, fq_name)
- if my_imid is None or parent_imid is None:
- return (False, (my_imid, parent_imid))
- return (True, (my_imid, parent_imid))
- #end _ifmap_route_table_alloc
-
- def _ifmap_route_table_set(self, my_imid, existing_metas, obj_dict):
- # Properties Meta
- update = {}
- field = obj_dict.get('routes', None)
- if field is not None:
- # construct object of xsd-type and get its xml repr
- field = obj_dict['routes']
- buf = cStringIO.StringIO()
- # perms might be inserted at server as obj.
- # obj construction diff from dict construction.
- if isinstance(field, dict):
- RouteTableType(**field).exportChildren(buf, level = 1, name_ = 'routes', pretty_print = False)
- else: # object
- field.exportChildren(buf, level = 1, name_ = 'routes', pretty_print = False)
- routes_xml = buf.getvalue()
- buf.close()
- meta = Metadata('routes' , '',
- {'ifmap-cardinality':'singleValue'}, ns_prefix = 'contrail',
- elements = routes_xml)
-
- if (existing_metas and 'routes' in existing_metas and
- str(existing_metas['routes'][0]['meta']) == str(meta)):
- # no change
- pass
- else:
- self._update_id_self_meta(update, meta)
-
- field = obj_dict.get('id_perms', None)
- if field is not None:
- # construct object of xsd-type and get its xml repr
- field = obj_dict['id_perms']
- buf = cStringIO.StringIO()
- # perms might be inserted at server as obj.
- # obj construction diff from dict construction.
- if isinstance(field, dict):
- IdPermsType(**field).exportChildren(buf, level = 1, name_ = 'id-perms', pretty_print = False)
- else: # object
- field.exportChildren(buf, level = 1, name_ = 'id-perms', pretty_print = False)
- id_perms_xml = buf.getvalue()
- buf.close()
- meta = Metadata('id-perms' , '',
- {'ifmap-cardinality':'singleValue'}, ns_prefix = 'contrail',
- elements = id_perms_xml)
-
- if (existing_metas and 'id-perms' in existing_metas and
- str(existing_metas['id-perms'][0]['meta']) == str(meta)):
- # no change
- pass
- else:
- self._update_id_self_meta(update, meta)
-
- field = obj_dict.get('display_name', None)
- if field is not None:
- norm_str = escape(str(obj_dict['display_name']))
- meta = Metadata('display-name', norm_str,
- {'ifmap-cardinality':'singleValue'}, ns_prefix = 'contrail')
-
- if (existing_metas and 'display-name' in existing_metas and
- str(existing_metas['display-name'][0]['meta']) == str(meta)):
- # no change
- pass
- else:
- self._update_id_self_meta(update, meta)
-
- # Ref Link Metas
- imid = self._imid_handler
-
-
- self._publish_update(my_imid, update)
- return (True, '')
- #end _ifmap_route_table_set
-
- def _ifmap_route_table_create(self, obj_ids, obj_dict):
- if not 'parent_type' in obj_dict:
- # parent is config-root
- parent_type = 'config-root'
- parent_imid = 'contrail:config-root:root'
- else:
- parent_type = obj_dict['parent_type']
- parent_imid = obj_ids.get('parent_imid', None)
-
- # Parent Link Meta
- update = {}
- parent_link_meta = self._parent_metas[parent_type]['route-table']
- meta = Metadata(parent_link_meta, '',
- {'ifmap-cardinality':'singleValue'}, ns_prefix = 'contrail')
- self._update_id_pair_meta(update, obj_ids['imid'], meta)
- self._publish_update(parent_imid, update)
-
- (ok, result) = self._ifmap_route_table_set(obj_ids['imid'], None, obj_dict)
- return (ok, result)
- #end _ifmap_route_table_create
-
-
- def _ifmap_route_table_read_to_meta_index(self, ifmap_id, field_names = None):
- # field_names = None means all fields will be read
- imid = self._imid_handler
- start_id = str(Identity(name = ifmap_id, type = 'other',
- other_type = 'extended'))
- # if id-perms missing, identity doesn't exist
-
- all_metas = [u'virtual-network-route-table', u'routes', u'id-perms', u'display-name']
- if not field_names:
- metas_to_read = all_metas
- else: # read only requested fields
- metas_to_read = set(all_metas) & set(field_names.keys())
-
- # metas is a dict where key is meta-name and val is list of dict
- # of form [{'meta':meta}, {'id':id1, 'meta':meta}, {'id':id2, 'meta':meta}]
- metas = {}
- for meta_name in metas_to_read:
- if meta_name in self._id_to_metas[ifmap_id]:
- metas[meta_name] = self._id_to_metas[ifmap_id][meta_name]
- return metas
- #end _ifmap_route-table_read_to_meta_index
-
- def _ifmap_route_table_update(self, ifmap_id, new_obj_dict):
- # read in refs from ifmap to determine which ones become inactive after update
- existing_metas = self._ifmap_route_table_read_to_meta_index(ifmap_id)
-
- # remove properties that are no longer active
- props = ['routes', 'id-perms', 'display-name']
- for prop in props:
- prop_m = prop.replace('-', '_')
- if prop in existing_metas and prop_m not in new_obj_dict:
- self._delete_id_self_meta(ifmap_id, 'contrail:'+prop)
- # remove refs that are no longer active
- delete_list = []
- refs = {}
- for meta, to_name in refs.items():
- old_set = set([m['id'] for m in existing_metas.get(meta, [])])
- new_set = set()
- to_name_m = to_name.replace('-', '_')
- for ref in new_obj_dict.get(to_name_m+'_refs', []):
- to_imid = self.fq_name_to_ifmap_id(to_name, ref['to'])
- new_set.add(to_imid)
-
- for inact_ref in old_set - new_set:
- delete_list.append((inact_ref, 'contrail:'+meta))
-
- if delete_list:
- self._delete_id_pair_meta_list(ifmap_id, delete_list)
-
- (ok, result) = self._ifmap_route_table_set(ifmap_id, existing_metas, new_obj_dict)
- return (ok, result)
- #end _ifmap_route_table_update
-
- def _ifmap_route_table_delete(self, obj_ids):
- ifmap_id = obj_ids['imid']
- parent_imid = obj_ids.get('parent_imid', None)
- existing_metas = self._ifmap_route_table_read_to_meta_index(ifmap_id)
- meta_list = []
- for meta_name, meta_infos in existing_metas.items():
- for meta_info in meta_infos:
- ref_imid = meta_info.get('id')
- if ref_imid is None:
- continue
- meta_list.append((ref_imid, 'contrail:'+meta_name))
-
- if parent_imid:
- # Remove link from parent
- meta_list.append((parent_imid, None))
-
- if meta_list:
- self._delete_id_pair_meta_list(ifmap_id, meta_list)
-
- # Remove all property metadata associated with this ident
- self._delete_id_self_meta(ifmap_id, None)
-
- return (True, '')
- #end _ifmap_route_table_delete
-
- def _ifmap_physical_interface_alloc(self, parent_type, fq_name):
- imid = self._imid_handler
- (my_imid, parent_imid) = \
- imid.physical_interface_alloc_ifmap_id(parent_type, fq_name)
- if my_imid is None or parent_imid is None:
- return (False, (my_imid, parent_imid))
- return (True, (my_imid, parent_imid))
- #end _ifmap_physical_interface_alloc
-
- def _ifmap_physical_interface_set(self, my_imid, existing_metas, obj_dict):
- # Properties Meta
- update = {}
- field = obj_dict.get('id_perms', None)
- if field is not None:
- # construct object of xsd-type and get its xml repr
- field = obj_dict['id_perms']
- buf = cStringIO.StringIO()
- # perms might be inserted at server as obj.
- # obj construction diff from dict construction.
- if isinstance(field, dict):
- IdPermsType(**field).exportChildren(buf, level = 1, name_ = 'id-perms', pretty_print = False)
- else: # object
- field.exportChildren(buf, level = 1, name_ = 'id-perms', pretty_print = False)
- id_perms_xml = buf.getvalue()
- buf.close()
- meta = Metadata('id-perms' , '',
- {'ifmap-cardinality':'singleValue'}, ns_prefix = 'contrail',
- elements = id_perms_xml)
-
- if (existing_metas and 'id-perms' in existing_metas and
- str(existing_metas['id-perms'][0]['meta']) == str(meta)):
- # no change
- pass
- else:
- self._update_id_self_meta(update, meta)
-
- field = obj_dict.get('display_name', None)
- if field is not None:
- norm_str = escape(str(obj_dict['display_name']))
- meta = Metadata('display-name', norm_str,
- {'ifmap-cardinality':'singleValue'}, ns_prefix = 'contrail')
-
- if (existing_metas and 'display-name' in existing_metas and
- str(existing_metas['display-name'][0]['meta']) == str(meta)):
- # no change
- pass
- else:
- self._update_id_self_meta(update, meta)
-
- # Ref Link Metas
- imid = self._imid_handler
-
- # construct object of xsd-type and get its xml repr
- refs = obj_dict.get('logical_interface_refs', None)
- if refs:
- for ref in refs:
- ref_fq_name = ref['to']
- obj_type = 'logical-interface'
- to_imid = cfgm_common.imid.get_ifmap_id_from_fq_name(obj_type, ref_fq_name)
- physical_interface_logical_interface_xml = ''
- meta = Metadata('physical-interface-logical-interface' , '',
- {'ifmap-cardinality':'singleValue'}, ns_prefix = 'contrail',
- elements = physical_interface_logical_interface_xml)
- self._update_id_pair_meta(update, to_imid, meta)
-
-
- self._publish_update(my_imid, update)
- return (True, '')
- #end _ifmap_physical_interface_set
-
- def _ifmap_physical_interface_create(self, obj_ids, obj_dict):
- if not 'parent_type' in obj_dict:
- # parent is config-root
- parent_type = 'config-root'
- parent_imid = 'contrail:config-root:root'
- else:
- parent_type = obj_dict['parent_type']
- parent_imid = obj_ids.get('parent_imid', None)
-
- # Parent Link Meta
- update = {}
- parent_link_meta = self._parent_metas[parent_type]['physical-interface']
- meta = Metadata(parent_link_meta, '',
- {'ifmap-cardinality':'singleValue'}, ns_prefix = 'contrail')
- self._update_id_pair_meta(update, obj_ids['imid'], meta)
- self._publish_update(parent_imid, update)
-
- (ok, result) = self._ifmap_physical_interface_set(obj_ids['imid'], None, obj_dict)
- return (ok, result)
- #end _ifmap_physical_interface_create
-
-
- def _ifmap_physical_interface_read_to_meta_index(self, ifmap_id, field_names = None):
- # field_names = None means all fields will be read
- imid = self._imid_handler
- start_id = str(Identity(name = ifmap_id, type = 'other',
- other_type = 'extended'))
- # if id-perms missing, identity doesn't exist
-
- all_metas = [u'id-perms', u'display-name', u'physical-interface-logical-interface']
- if not field_names:
- metas_to_read = all_metas
- else: # read only requested fields
- metas_to_read = set(all_metas) & set(field_names.keys())
-
- # metas is a dict where key is meta-name and val is list of dict
- # of form [{'meta':meta}, {'id':id1, 'meta':meta}, {'id':id2, 'meta':meta}]
- metas = {}
- for meta_name in metas_to_read:
- if meta_name in self._id_to_metas[ifmap_id]:
- metas[meta_name] = self._id_to_metas[ifmap_id][meta_name]
- return metas
- #end _ifmap_physical-interface_read_to_meta_index
-
- def _ifmap_physical_interface_update(self, ifmap_id, new_obj_dict):
- # read in refs from ifmap to determine which ones become inactive after update
- existing_metas = self._ifmap_physical_interface_read_to_meta_index(ifmap_id)
-
- # remove properties that are no longer active
- props = ['id-perms', 'display-name']
- for prop in props:
- prop_m = prop.replace('-', '_')
- if prop in existing_metas and prop_m not in new_obj_dict:
- self._delete_id_self_meta(ifmap_id, 'contrail:'+prop)
- # remove refs that are no longer active
- delete_list = []
- refs = {}
- for meta, to_name in refs.items():
- old_set = set([m['id'] for m in existing_metas.get(meta, [])])
- new_set = set()
- to_name_m = to_name.replace('-', '_')
- for ref in new_obj_dict.get(to_name_m+'_refs', []):
- to_imid = self.fq_name_to_ifmap_id(to_name, ref['to'])
- new_set.add(to_imid)
-
- for inact_ref in old_set - new_set:
- delete_list.append((inact_ref, 'contrail:'+meta))
-
- if delete_list:
- self._delete_id_pair_meta_list(ifmap_id, delete_list)
-
- (ok, result) = self._ifmap_physical_interface_set(ifmap_id, existing_metas, new_obj_dict)
- return (ok, result)
- #end _ifmap_physical_interface_update
-
- def _ifmap_physical_interface_delete(self, obj_ids):
- ifmap_id = obj_ids['imid']
- parent_imid = obj_ids.get('parent_imid', None)
- existing_metas = self._ifmap_physical_interface_read_to_meta_index(ifmap_id)
- meta_list = []
- for meta_name, meta_infos in existing_metas.items():
- for meta_info in meta_infos:
- ref_imid = meta_info.get('id')
- if ref_imid is None:
- continue
- meta_list.append((ref_imid, 'contrail:'+meta_name))
-
- if parent_imid:
- # Remove link from parent
- meta_list.append((parent_imid, None))
-
- if meta_list:
- self._delete_id_pair_meta_list(ifmap_id, meta_list)
-
- # Remove all property metadata associated with this ident
- self._delete_id_self_meta(ifmap_id, None)
-
- return (True, '')
- #end _ifmap_physical_interface_delete
-
- def _ifmap_access_control_list_alloc(self, parent_type, fq_name):
- imid = self._imid_handler
- (my_imid, parent_imid) = \
- imid.access_control_list_alloc_ifmap_id(parent_type, fq_name)
- if my_imid is None or parent_imid is None:
- return (False, (my_imid, parent_imid))
- return (True, (my_imid, parent_imid))
- #end _ifmap_access_control_list_alloc
-
- def _ifmap_access_control_list_set(self, my_imid, existing_metas, obj_dict):
- # Properties Meta
- update = {}
- field = obj_dict.get('access_control_list_entries', None)
- if field is not None:
- # construct object of xsd-type and get its xml repr
- field = obj_dict['access_control_list_entries']
- buf = cStringIO.StringIO()
- # perms might be inserted at server as obj.
- # obj construction diff from dict construction.
- if isinstance(field, dict):
- AclEntriesType(**field).exportChildren(buf, level = 1, name_ = 'access-control-list-entries', pretty_print = False)
- else: # object
- field.exportChildren(buf, level = 1, name_ = 'access-control-list-entries', pretty_print = False)
- access_control_list_entries_xml = buf.getvalue()
- buf.close()
- meta = Metadata('access-control-list-entries' , '',
- {'ifmap-cardinality':'singleValue'}, ns_prefix = 'contrail',
- elements = access_control_list_entries_xml)
-
- if (existing_metas and 'access-control-list-entries' in existing_metas and
- str(existing_metas['access-control-list-entries'][0]['meta']) == str(meta)):
- # no change
- pass
- else:
- self._update_id_self_meta(update, meta)
-
- field = obj_dict.get('id_perms', None)
- if field is not None:
- # construct object of xsd-type and get its xml repr
- field = obj_dict['id_perms']
- buf = cStringIO.StringIO()
- # perms might be inserted at server as obj.
- # obj construction diff from dict construction.
- if isinstance(field, dict):
- IdPermsType(**field).exportChildren(buf, level = 1, name_ = 'id-perms', pretty_print = False)
- else: # object
- field.exportChildren(buf, level = 1, name_ = 'id-perms', pretty_print = False)
- id_perms_xml = buf.getvalue()
- buf.close()
- meta = Metadata('id-perms' , '',
- {'ifmap-cardinality':'singleValue'}, ns_prefix = 'contrail',
- elements = id_perms_xml)
-
- if (existing_metas and 'id-perms' in existing_metas and
- str(existing_metas['id-perms'][0]['meta']) == str(meta)):
- # no change
- pass
- else:
- self._update_id_self_meta(update, meta)
-
- field = obj_dict.get('display_name', None)
- if field is not None:
- norm_str = escape(str(obj_dict['display_name']))
- meta = Metadata('display-name', norm_str,
- {'ifmap-cardinality':'singleValue'}, ns_prefix = 'contrail')
-
- if (existing_metas and 'display-name' in existing_metas and
- str(existing_metas['display-name'][0]['meta']) == str(meta)):
- # no change
- pass
- else:
- self._update_id_self_meta(update, meta)
-
- # Ref Link Metas
- imid = self._imid_handler
-
-
- self._publish_update(my_imid, update)
- return (True, '')
- #end _ifmap_access_control_list_set
-
- def _ifmap_access_control_list_create(self, obj_ids, obj_dict):
- if not 'parent_type' in obj_dict:
- # parent is config-root
- parent_type = 'config-root'
- parent_imid = 'contrail:config-root:root'
- else:
- parent_type = obj_dict['parent_type']
- parent_imid = obj_ids.get('parent_imid', None)
-
- # Parent Link Meta
- update = {}
- parent_link_meta = self._parent_metas[parent_type]['access-control-list']
- meta = Metadata(parent_link_meta, '',
- {'ifmap-cardinality':'singleValue'}, ns_prefix = 'contrail')
- self._update_id_pair_meta(update, obj_ids['imid'], meta)
- self._publish_update(parent_imid, update)
-
- (ok, result) = self._ifmap_access_control_list_set(obj_ids['imid'], None, obj_dict)
- return (ok, result)
- #end _ifmap_access_control_list_create
-
-
- def _ifmap_access_control_list_read_to_meta_index(self, ifmap_id, field_names = None):
- # field_names = None means all fields will be read
- imid = self._imid_handler
- start_id = str(Identity(name = ifmap_id, type = 'other',
- other_type = 'extended'))
- # if id-perms missing, identity doesn't exist
-
- all_metas = [u'access-control-list-entries', u'id-perms', u'display-name']
- if not field_names:
- metas_to_read = all_metas
- else: # read only requested fields
- metas_to_read = set(all_metas) & set(field_names.keys())
-
- # metas is a dict where key is meta-name and val is list of dict
- # of form [{'meta':meta}, {'id':id1, 'meta':meta}, {'id':id2, 'meta':meta}]
- metas = {}
- for meta_name in metas_to_read:
- if meta_name in self._id_to_metas[ifmap_id]:
- metas[meta_name] = self._id_to_metas[ifmap_id][meta_name]
- return metas
- #end _ifmap_access-control-list_read_to_meta_index
-
- def _ifmap_access_control_list_update(self, ifmap_id, new_obj_dict):
- # read in refs from ifmap to determine which ones become inactive after update
- existing_metas = self._ifmap_access_control_list_read_to_meta_index(ifmap_id)
-
- # remove properties that are no longer active
- props = ['access-control-list-entries', 'id-perms', 'display-name']
- for prop in props:
- prop_m = prop.replace('-', '_')
- if prop in existing_metas and prop_m not in new_obj_dict:
- self._delete_id_self_meta(ifmap_id, 'contrail:'+prop)
- # remove refs that are no longer active
- delete_list = []
- refs = {}
- for meta, to_name in refs.items():
- old_set = set([m['id'] for m in existing_metas.get(meta, [])])
- new_set = set()
- to_name_m = to_name.replace('-', '_')
- for ref in new_obj_dict.get(to_name_m+'_refs', []):
- to_imid = self.fq_name_to_ifmap_id(to_name, ref['to'])
- new_set.add(to_imid)
-
- for inact_ref in old_set - new_set:
- delete_list.append((inact_ref, 'contrail:'+meta))
-
- if delete_list:
- self._delete_id_pair_meta_list(ifmap_id, delete_list)
-
- (ok, result) = self._ifmap_access_control_list_set(ifmap_id, existing_metas, new_obj_dict)
- return (ok, result)
- #end _ifmap_access_control_list_update
-
- def _ifmap_access_control_list_delete(self, obj_ids):
- ifmap_id = obj_ids['imid']
- parent_imid = obj_ids.get('parent_imid', None)
- existing_metas = self._ifmap_access_control_list_read_to_meta_index(ifmap_id)
- meta_list = []
- for meta_name, meta_infos in existing_metas.items():
- for meta_info in meta_infos:
- ref_imid = meta_info.get('id')
- if ref_imid is None:
- continue
- meta_list.append((ref_imid, 'contrail:'+meta_name))
-
- if parent_imid:
- # Remove link from parent
- meta_list.append((parent_imid, None))
-
- if meta_list:
- self._delete_id_pair_meta_list(ifmap_id, meta_list)
-
- # Remove all property metadata associated with this ident
- self._delete_id_self_meta(ifmap_id, None)
-
- return (True, '')
- #end _ifmap_access_control_list_delete
-
- def _ifmap_analytics_node_alloc(self, parent_type, fq_name):
- imid = self._imid_handler
- (my_imid, parent_imid) = \
- imid.analytics_node_alloc_ifmap_id(parent_type, fq_name)
- if my_imid is None or parent_imid is None:
- return (False, (my_imid, parent_imid))
- return (True, (my_imid, parent_imid))
- #end _ifmap_analytics_node_alloc
-
- def _ifmap_analytics_node_set(self, my_imid, existing_metas, obj_dict):
- # Properties Meta
- update = {}
- field = obj_dict.get('analytics_node_ip_address', None)
- if field is not None:
- norm_str = escape(str(obj_dict['analytics_node_ip_address']))
- meta = Metadata('analytics-node-ip-address', norm_str,
- {'ifmap-cardinality':'singleValue'}, ns_prefix = 'contrail')
-
- if (existing_metas and 'analytics-node-ip-address' in existing_metas and
- str(existing_metas['analytics-node-ip-address'][0]['meta']) == str(meta)):
- # no change
- pass
- else:
- self._update_id_self_meta(update, meta)
-
- field = obj_dict.get('id_perms', None)
- if field is not None:
- # construct object of xsd-type and get its xml repr
- field = obj_dict['id_perms']
- buf = cStringIO.StringIO()
- # perms might be inserted at server as obj.
- # obj construction diff from dict construction.
- if isinstance(field, dict):
- IdPermsType(**field).exportChildren(buf, level = 1, name_ = 'id-perms', pretty_print = False)
- else: # object
- field.exportChildren(buf, level = 1, name_ = 'id-perms', pretty_print = False)
- id_perms_xml = buf.getvalue()
- buf.close()
- meta = Metadata('id-perms' , '',
- {'ifmap-cardinality':'singleValue'}, ns_prefix = 'contrail',
- elements = id_perms_xml)
-
- if (existing_metas and 'id-perms' in existing_metas and
- str(existing_metas['id-perms'][0]['meta']) == str(meta)):
- # no change
- pass
- else:
- self._update_id_self_meta(update, meta)
-
- field = obj_dict.get('display_name', None)
- if field is not None:
- norm_str = escape(str(obj_dict['display_name']))
- meta = Metadata('display-name', norm_str,
- {'ifmap-cardinality':'singleValue'}, ns_prefix = 'contrail')
-
- if (existing_metas and 'display-name' in existing_metas and
- str(existing_metas['display-name'][0]['meta']) == str(meta)):
- # no change
- pass
- else:
- self._update_id_self_meta(update, meta)
-
- # Ref Link Metas
- imid = self._imid_handler
-
-
- self._publish_update(my_imid, update)
- return (True, '')
- #end _ifmap_analytics_node_set
-
- def _ifmap_analytics_node_create(self, obj_ids, obj_dict):
- if not 'parent_type' in obj_dict:
- # parent is config-root
- parent_type = 'config-root'
- parent_imid = 'contrail:config-root:root'
- else:
- parent_type = obj_dict['parent_type']
- parent_imid = obj_ids.get('parent_imid', None)
-
- # Parent Link Meta
- update = {}
- parent_link_meta = self._parent_metas[parent_type]['analytics-node']
- meta = Metadata(parent_link_meta, '',
- {'ifmap-cardinality':'singleValue'}, ns_prefix = 'contrail')
- self._update_id_pair_meta(update, obj_ids['imid'], meta)
- self._publish_update(parent_imid, update)
-
- (ok, result) = self._ifmap_analytics_node_set(obj_ids['imid'], None, obj_dict)
- return (ok, result)
- #end _ifmap_analytics_node_create
-
-
- def _ifmap_analytics_node_read_to_meta_index(self, ifmap_id, field_names = None):
- # field_names = None means all fields will be read
- imid = self._imid_handler
- start_id = str(Identity(name = ifmap_id, type = 'other',
- other_type = 'extended'))
- # if id-perms missing, identity doesn't exist
-
- all_metas = [u'analytics-node-ip-address', u'id-perms', u'display-name']
- if not field_names:
- metas_to_read = all_metas
- else: # read only requested fields
- metas_to_read = set(all_metas) & set(field_names.keys())
-
- # metas is a dict where key is meta-name and val is list of dict
- # of form [{'meta':meta}, {'id':id1, 'meta':meta}, {'id':id2, 'meta':meta}]
- metas = {}
- for meta_name in metas_to_read:
- if meta_name in self._id_to_metas[ifmap_id]:
- metas[meta_name] = self._id_to_metas[ifmap_id][meta_name]
- return metas
- #end _ifmap_analytics-node_read_to_meta_index
-
- def _ifmap_analytics_node_update(self, ifmap_id, new_obj_dict):
- # read in refs from ifmap to determine which ones become inactive after update
- existing_metas = self._ifmap_analytics_node_read_to_meta_index(ifmap_id)
-
- # remove properties that are no longer active
- props = ['analytics-node-ip-address', 'id-perms', 'display-name']
- for prop in props:
- prop_m = prop.replace('-', '_')
- if prop in existing_metas and prop_m not in new_obj_dict:
- self._delete_id_self_meta(ifmap_id, 'contrail:'+prop)
- # remove refs that are no longer active
- delete_list = []
- refs = {}
- for meta, to_name in refs.items():
- old_set = set([m['id'] for m in existing_metas.get(meta, [])])
- new_set = set()
- to_name_m = to_name.replace('-', '_')
- for ref in new_obj_dict.get(to_name_m+'_refs', []):
- to_imid = self.fq_name_to_ifmap_id(to_name, ref['to'])
- new_set.add(to_imid)
-
- for inact_ref in old_set - new_set:
- delete_list.append((inact_ref, 'contrail:'+meta))
-
- if delete_list:
- self._delete_id_pair_meta_list(ifmap_id, delete_list)
-
- (ok, result) = self._ifmap_analytics_node_set(ifmap_id, existing_metas, new_obj_dict)
- return (ok, result)
- #end _ifmap_analytics_node_update
-
- def _ifmap_analytics_node_delete(self, obj_ids):
- ifmap_id = obj_ids['imid']
- parent_imid = obj_ids.get('parent_imid', None)
- existing_metas = self._ifmap_analytics_node_read_to_meta_index(ifmap_id)
- meta_list = []
- for meta_name, meta_infos in existing_metas.items():
- for meta_info in meta_infos:
- ref_imid = meta_info.get('id')
- if ref_imid is None:
- continue
- meta_list.append((ref_imid, 'contrail:'+meta_name))
-
- if parent_imid:
- # Remove link from parent
- meta_list.append((parent_imid, None))
-
- if meta_list:
- self._delete_id_pair_meta_list(ifmap_id, meta_list)
-
- # Remove all property metadata associated with this ident
- self._delete_id_self_meta(ifmap_id, None)
-
- return (True, '')
- #end _ifmap_analytics_node_delete
-
- def _ifmap_virtual_DNS_alloc(self, parent_type, fq_name):
- imid = self._imid_handler
- (my_imid, parent_imid) = \
- imid.virtual_DNS_alloc_ifmap_id(parent_type, fq_name)
- if my_imid is None or parent_imid is None:
- return (False, (my_imid, parent_imid))
- return (True, (my_imid, parent_imid))
- #end _ifmap_virtual_DNS_alloc
-
- def _ifmap_virtual_DNS_set(self, my_imid, existing_metas, obj_dict):
- # Properties Meta
- update = {}
- field = obj_dict.get('virtual_DNS_data', None)
- if field is not None:
- # construct object of xsd-type and get its xml repr
- field = obj_dict['virtual_DNS_data']
- buf = cStringIO.StringIO()
- # perms might be inserted at server as obj.
- # obj construction diff from dict construction.
- if isinstance(field, dict):
- VirtualDnsType(**field).exportChildren(buf, level = 1, name_ = 'virtual-DNS-data', pretty_print = False)
- else: # object
- field.exportChildren(buf, level = 1, name_ = 'virtual-DNS-data', pretty_print = False)
- virtual_DNS_data_xml = buf.getvalue()
- buf.close()
- meta = Metadata('virtual-DNS-data' , '',
- {'ifmap-cardinality':'singleValue'}, ns_prefix = 'contrail',
- elements = virtual_DNS_data_xml)
-
- if (existing_metas and 'virtual-DNS-data' in existing_metas and
- str(existing_metas['virtual-DNS-data'][0]['meta']) == str(meta)):
- # no change
- pass
- else:
- self._update_id_self_meta(update, meta)
-
- field = obj_dict.get('id_perms', None)
- if field is not None:
- # construct object of xsd-type and get its xml repr
- field = obj_dict['id_perms']
- buf = cStringIO.StringIO()
- # perms might be inserted at server as obj.
- # obj construction diff from dict construction.
- if isinstance(field, dict):
- IdPermsType(**field).exportChildren(buf, level = 1, name_ = 'id-perms', pretty_print = False)
- else: # object
- field.exportChildren(buf, level = 1, name_ = 'id-perms', pretty_print = False)
- id_perms_xml = buf.getvalue()
- buf.close()
- meta = Metadata('id-perms' , '',
- {'ifmap-cardinality':'singleValue'}, ns_prefix = 'contrail',
- elements = id_perms_xml)
-
- if (existing_metas and 'id-perms' in existing_metas and
- str(existing_metas['id-perms'][0]['meta']) == str(meta)):
- # no change
- pass
- else:
- self._update_id_self_meta(update, meta)
-
- field = obj_dict.get('display_name', None)
- if field is not None:
- norm_str = escape(str(obj_dict['display_name']))
- meta = Metadata('display-name', norm_str,
- {'ifmap-cardinality':'singleValue'}, ns_prefix = 'contrail')
-
- if (existing_metas and 'display-name' in existing_metas and
- str(existing_metas['display-name'][0]['meta']) == str(meta)):
- # no change
- pass
- else:
- self._update_id_self_meta(update, meta)
-
- # Ref Link Metas
- imid = self._imid_handler
-
- # construct object of xsd-type and get its xml repr
- refs = obj_dict.get('virtual_DNS_record_refs', None)
- if refs:
- for ref in refs:
- ref_fq_name = ref['to']
- obj_type = 'virtual-DNS-record'
- to_imid = cfgm_common.imid.get_ifmap_id_from_fq_name(obj_type, ref_fq_name)
- virtual_DNS_virtual_DNS_record_xml = ''
- meta = Metadata('virtual-DNS-virtual-DNS-record' , '',
- {'ifmap-cardinality':'singleValue'}, ns_prefix = 'contrail',
- elements = virtual_DNS_virtual_DNS_record_xml)
- self._update_id_pair_meta(update, to_imid, meta)
-
-
- self._publish_update(my_imid, update)
- return (True, '')
- #end _ifmap_virtual_DNS_set
-
- def _ifmap_virtual_DNS_create(self, obj_ids, obj_dict):
- if not 'parent_type' in obj_dict:
- # parent is config-root
- parent_type = 'config-root'
- parent_imid = 'contrail:config-root:root'
- else:
- parent_type = obj_dict['parent_type']
- parent_imid = obj_ids.get('parent_imid', None)
-
- # Parent Link Meta
- update = {}
- parent_link_meta = self._parent_metas[parent_type]['virtual-DNS']
- meta = Metadata(parent_link_meta, '',
- {'ifmap-cardinality':'singleValue'}, ns_prefix = 'contrail')
- self._update_id_pair_meta(update, obj_ids['imid'], meta)
- self._publish_update(parent_imid, update)
-
- (ok, result) = self._ifmap_virtual_DNS_set(obj_ids['imid'], None, obj_dict)
- return (ok, result)
- #end _ifmap_virtual_DNS_create
-
-
- def _ifmap_virtual_DNS_read_to_meta_index(self, ifmap_id, field_names = None):
- # field_names = None means all fields will be read
- imid = self._imid_handler
- start_id = str(Identity(name = ifmap_id, type = 'other',
- other_type = 'extended'))
- # if id-perms missing, identity doesn't exist
-
- all_metas = [u'network-ipam-virtual-DNS', u'virtual-DNS-data', u'id-perms', u'display-name', u'virtual-DNS-virtual-DNS-record']
- if not field_names:
- metas_to_read = all_metas
- else: # read only requested fields
- metas_to_read = set(all_metas) & set(field_names.keys())
-
- # metas is a dict where key is meta-name and val is list of dict
- # of form [{'meta':meta}, {'id':id1, 'meta':meta}, {'id':id2, 'meta':meta}]
- metas = {}
- for meta_name in metas_to_read:
- if meta_name in self._id_to_metas[ifmap_id]:
- metas[meta_name] = self._id_to_metas[ifmap_id][meta_name]
- return metas
- #end _ifmap_virtual-DNS_read_to_meta_index
-
- def _ifmap_virtual_DNS_update(self, ifmap_id, new_obj_dict):
- # read in refs from ifmap to determine which ones become inactive after update
- existing_metas = self._ifmap_virtual_DNS_read_to_meta_index(ifmap_id)
-
- # remove properties that are no longer active
- props = ['virtual-DNS-data', 'id-perms', 'display-name']
- for prop in props:
- prop_m = prop.replace('-', '_')
- if prop in existing_metas and prop_m not in new_obj_dict:
- self._delete_id_self_meta(ifmap_id, 'contrail:'+prop)
- # remove refs that are no longer active
- delete_list = []
- refs = {}
- for meta, to_name in refs.items():
- old_set = set([m['id'] for m in existing_metas.get(meta, [])])
- new_set = set()
- to_name_m = to_name.replace('-', '_')
- for ref in new_obj_dict.get(to_name_m+'_refs', []):
- to_imid = self.fq_name_to_ifmap_id(to_name, ref['to'])
- new_set.add(to_imid)
-
- for inact_ref in old_set - new_set:
- delete_list.append((inact_ref, 'contrail:'+meta))
-
- if delete_list:
- self._delete_id_pair_meta_list(ifmap_id, delete_list)
-
- (ok, result) = self._ifmap_virtual_DNS_set(ifmap_id, existing_metas, new_obj_dict)
- return (ok, result)
- #end _ifmap_virtual_DNS_update
-
- def _ifmap_virtual_DNS_delete(self, obj_ids):
- ifmap_id = obj_ids['imid']
- parent_imid = obj_ids.get('parent_imid', None)
- existing_metas = self._ifmap_virtual_DNS_read_to_meta_index(ifmap_id)
- meta_list = []
- for meta_name, meta_infos in existing_metas.items():
- for meta_info in meta_infos:
- ref_imid = meta_info.get('id')
- if ref_imid is None:
- continue
- meta_list.append((ref_imid, 'contrail:'+meta_name))
-
- if parent_imid:
- # Remove link from parent
- meta_list.append((parent_imid, None))
-
- if meta_list:
- self._delete_id_pair_meta_list(ifmap_id, meta_list)
-
- # Remove all property metadata associated with this ident
- self._delete_id_self_meta(ifmap_id, None)
-
- return (True, '')
- #end _ifmap_virtual_DNS_delete
-
- def _ifmap_customer_attachment_alloc(self, parent_type, fq_name):
- imid = self._imid_handler
- (my_imid, parent_imid) = \
- imid.customer_attachment_alloc_ifmap_id(parent_type, fq_name)
- if my_imid is None or parent_imid is None:
- return (False, (my_imid, parent_imid))
- return (True, (my_imid, parent_imid))
- #end _ifmap_customer_attachment_alloc
-
- def _ifmap_customer_attachment_set(self, my_imid, existing_metas, obj_dict):
- # Properties Meta
- update = {}
- field = obj_dict.get('attachment_address', None)
- if field is not None:
- # construct object of xsd-type and get its xml repr
- field = obj_dict['attachment_address']
- buf = cStringIO.StringIO()
- # perms might be inserted at server as obj.
- # obj construction diff from dict construction.
- if isinstance(field, dict):
- AttachmentAddressType(**field).exportChildren(buf, level = 1, name_ = 'attachment-address', pretty_print = False)
- else: # object
- field.exportChildren(buf, level = 1, name_ = 'attachment-address', pretty_print = False)
- attachment_address_xml = buf.getvalue()
- buf.close()
- meta = Metadata('attachment-address' , '',
- {'ifmap-cardinality':'singleValue'}, ns_prefix = 'contrail',
- elements = attachment_address_xml)
-
- if (existing_metas and 'attachment-address' in existing_metas and
- str(existing_metas['attachment-address'][0]['meta']) == str(meta)):
- # no change
- pass
- else:
- self._update_id_self_meta(update, meta)
-
- field = obj_dict.get('id_perms', None)
- if field is not None:
- # construct object of xsd-type and get its xml repr
- field = obj_dict['id_perms']
- buf = cStringIO.StringIO()
- # perms might be inserted at server as obj.
- # obj construction diff from dict construction.
- if isinstance(field, dict):
- IdPermsType(**field).exportChildren(buf, level = 1, name_ = 'id-perms', pretty_print = False)
- else: # object
- field.exportChildren(buf, level = 1, name_ = 'id-perms', pretty_print = False)
- id_perms_xml = buf.getvalue()
- buf.close()
- meta = Metadata('id-perms' , '',
- {'ifmap-cardinality':'singleValue'}, ns_prefix = 'contrail',
- elements = id_perms_xml)
-
- if (existing_metas and 'id-perms' in existing_metas and
- str(existing_metas['id-perms'][0]['meta']) == str(meta)):
- # no change
- pass
- else:
- self._update_id_self_meta(update, meta)
-
- field = obj_dict.get('display_name', None)
- if field is not None:
- norm_str = escape(str(obj_dict['display_name']))
- meta = Metadata('display-name', norm_str,
- {'ifmap-cardinality':'singleValue'}, ns_prefix = 'contrail')
-
- if (existing_metas and 'display-name' in existing_metas and
- str(existing_metas['display-name'][0]['meta']) == str(meta)):
- # no change
- pass
- else:
- self._update_id_self_meta(update, meta)
-
- # Ref Link Metas
- imid = self._imid_handler
-
- # construct object of xsd-type and get its xml repr
- refs = obj_dict.get('virtual_machine_interface_refs', None)
- if refs:
- for ref in refs:
- ref_fq_name = ref['to']
- obj_type = 'virtual-machine-interface'
- to_imid = cfgm_common.imid.get_ifmap_id_from_fq_name(obj_type, ref_fq_name)
- customer_attachment_virtual_machine_interface_xml = ''
- meta = Metadata('customer-attachment-virtual-machine-interface' , '',
- {'ifmap-cardinality':'singleValue'}, ns_prefix = 'contrail',
- elements = customer_attachment_virtual_machine_interface_xml)
- self._update_id_pair_meta(update, to_imid, meta)
-
- # construct object of xsd-type and get its xml repr
- refs = obj_dict.get('floating_ip_refs', None)
- if refs:
- for ref in refs:
- ref_fq_name = ref['to']
- obj_type = 'floating-ip'
- to_imid = cfgm_common.imid.get_ifmap_id_from_fq_name(obj_type, ref_fq_name)
- customer_attachment_floating_ip_xml = ''
- meta = Metadata('customer-attachment-floating-ip' , '',
- {'ifmap-cardinality':'singleValue'}, ns_prefix = 'contrail',
- elements = customer_attachment_floating_ip_xml)
- self._update_id_pair_meta(update, to_imid, meta)
-
- # construct object of xsd-type and get its xml repr
- refs = obj_dict.get('routing_instance_refs', None)
- if refs:
- for ref in refs:
- ref_fq_name = ref['to']
- obj_type = 'routing-instance'
- to_imid = cfgm_common.imid.get_ifmap_id_from_fq_name(obj_type, ref_fq_name)
- binding_xml = ''
- ref_data = ref['attr']
- if ref_data:
- buf = cStringIO.StringIO()
- BindingType(**ref_data).exportChildren(buf, level = 1, name_ = 'binding', pretty_print = False)
- binding_xml = binding_xml + buf.getvalue()
- buf.close()
- meta = Metadata('binding' , '',
- {'ifmap-cardinality':'singleValue'}, ns_prefix = 'contrail',
- elements = binding_xml)
- self._update_id_pair_meta(update, to_imid, meta)
-
- # construct object of xsd-type and get its xml repr
- refs = obj_dict.get('provider_attachment_refs', None)
- if refs:
- for ref in refs:
- ref_fq_name = ref['to']
- obj_type = 'provider-attachment'
- to_imid = cfgm_common.imid.get_ifmap_id_from_fq_name(obj_type, ref_fq_name)
- attachment_info_xml = ''
- ref_data = ref['attr']
- if ref_data:
- buf = cStringIO.StringIO()
- AttachmentInfoType(**ref_data).exportChildren(buf, level = 1, name_ = 'attachment-info', pretty_print = False)
- attachment_info_xml = attachment_info_xml + buf.getvalue()
- buf.close()
- meta = Metadata('attachment-info' , '',
- {'ifmap-cardinality':'singleValue'}, ns_prefix = 'contrail',
- elements = attachment_info_xml)
- self._update_id_pair_meta(update, to_imid, meta)
-
-
- self._publish_update(my_imid, update)
- return (True, '')
- #end _ifmap_customer_attachment_set
-
- def _ifmap_customer_attachment_create(self, obj_ids, obj_dict):
- (ok, result) = self._ifmap_customer_attachment_set(obj_ids['imid'], None, obj_dict)
- return (ok, result)
- #end _ifmap_customer_attachment_create
-
-
- def _ifmap_customer_attachment_read_to_meta_index(self, ifmap_id, field_names = None):
- # field_names = None means all fields will be read
- imid = self._imid_handler
- start_id = str(Identity(name = ifmap_id, type = 'other',
- other_type = 'extended'))
- # if id-perms missing, identity doesn't exist
-
- all_metas = [u'customer-attachment-virtual-machine-interface', u'customer-attachment-floating-ip', u'attachment-address', u'id-perms', u'display-name', u'binding', u'attachment-info']
- if not field_names:
- metas_to_read = all_metas
- else: # read only requested fields
- metas_to_read = set(all_metas) & set(field_names.keys())
-
- # metas is a dict where key is meta-name and val is list of dict
- # of form [{'meta':meta}, {'id':id1, 'meta':meta}, {'id':id2, 'meta':meta}]
- metas = {}
- for meta_name in metas_to_read:
- if meta_name in self._id_to_metas[ifmap_id]:
- metas[meta_name] = self._id_to_metas[ifmap_id][meta_name]
- return metas
- #end _ifmap_customer-attachment_read_to_meta_index
-
- def _ifmap_customer_attachment_update(self, ifmap_id, new_obj_dict):
- # read in refs from ifmap to determine which ones become inactive after update
- existing_metas = self._ifmap_customer_attachment_read_to_meta_index(ifmap_id)
-
- # remove properties that are no longer active
- props = ['attachment-address', 'id-perms', 'display-name']
- for prop in props:
- prop_m = prop.replace('-', '_')
- if prop in existing_metas and prop_m not in new_obj_dict:
- self._delete_id_self_meta(ifmap_id, 'contrail:'+prop)
- # remove refs that are no longer active
- delete_list = []
- refs = {'customer-attachment-virtual-machine-interface': 'virtual-machine-interface',
- 'customer-attachment-floating-ip': 'floating-ip'}
- for meta, to_name in refs.items():
- old_set = set([m['id'] for m in existing_metas.get(meta, [])])
- new_set = set()
- to_name_m = to_name.replace('-', '_')
- for ref in new_obj_dict.get(to_name_m+'_refs', []):
- to_imid = self.fq_name_to_ifmap_id(to_name, ref['to'])
- new_set.add(to_imid)
-
- for inact_ref in old_set - new_set:
- delete_list.append((inact_ref, 'contrail:'+meta))
-
- if delete_list:
- self._delete_id_pair_meta_list(ifmap_id, delete_list)
-
- (ok, result) = self._ifmap_customer_attachment_set(ifmap_id, existing_metas, new_obj_dict)
- return (ok, result)
- #end _ifmap_customer_attachment_update
-
- def _ifmap_customer_attachment_delete(self, obj_ids):
- ifmap_id = obj_ids['imid']
- parent_imid = obj_ids.get('parent_imid', None)
- existing_metas = self._ifmap_customer_attachment_read_to_meta_index(ifmap_id)
- meta_list = []
- for meta_name, meta_infos in existing_metas.items():
- for meta_info in meta_infos:
- ref_imid = meta_info.get('id')
- if ref_imid is None:
- continue
- meta_list.append((ref_imid, 'contrail:'+meta_name))
-
- if parent_imid:
- # Remove link from parent
- meta_list.append((parent_imid, None))
-
- if meta_list:
- self._delete_id_pair_meta_list(ifmap_id, meta_list)
-
- # Remove all property metadata associated with this ident
- self._delete_id_self_meta(ifmap_id, None)
-
- return (True, '')
- #end _ifmap_customer_attachment_delete
-
- def _ifmap_service_appliance_set_alloc(self, parent_type, fq_name):
- imid = self._imid_handler
- (my_imid, parent_imid) = \
- imid.service_appliance_set_alloc_ifmap_id(parent_type, fq_name)
- if my_imid is None or parent_imid is None:
- return (False, (my_imid, parent_imid))
- return (True, (my_imid, parent_imid))
- #end _ifmap_service_appliance_set_alloc
-
- def _ifmap_service_appliance_set_set(self, my_imid, existing_metas, obj_dict):
- # Properties Meta
- update = {}
- field = obj_dict.get('service_appliance_set_properties', None)
- if field is not None:
- # construct object of xsd-type and get its xml repr
- field = obj_dict['service_appliance_set_properties']
- buf = cStringIO.StringIO()
- # perms might be inserted at server as obj.
- # obj construction diff from dict construction.
- if isinstance(field, dict):
- KeyValuePairs(**field).exportChildren(buf, level = 1, name_ = 'service-appliance-set-properties', pretty_print = False)
- else: # object
- field.exportChildren(buf, level = 1, name_ = 'service-appliance-set-properties', pretty_print = False)
- service_appliance_set_properties_xml = buf.getvalue()
- buf.close()
- meta = Metadata('service-appliance-set-properties' , '',
- {'ifmap-cardinality':'singleValue'}, ns_prefix = 'contrail',
- elements = service_appliance_set_properties_xml)
-
- if (existing_metas and 'service-appliance-set-properties' in existing_metas and
- str(existing_metas['service-appliance-set-properties'][0]['meta']) == str(meta)):
- # no change
- pass
- else:
- self._update_id_self_meta(update, meta)
-
- field = obj_dict.get('service_appliance_driver', None)
- if field is not None:
- norm_str = escape(str(obj_dict['service_appliance_driver']))
- meta = Metadata('service-appliance-driver', norm_str,
- {'ifmap-cardinality':'singleValue'}, ns_prefix = 'contrail')
-
- if (existing_metas and 'service-appliance-driver' in existing_metas and
- str(existing_metas['service-appliance-driver'][0]['meta']) == str(meta)):
- # no change
- pass
- else:
- self._update_id_self_meta(update, meta)
-
- field = obj_dict.get('service_appliance_ha_mode', None)
- if field is not None:
- norm_str = escape(str(obj_dict['service_appliance_ha_mode']))
- meta = Metadata('service-appliance-ha-mode', norm_str,
- {'ifmap-cardinality':'singleValue'}, ns_prefix = 'contrail')
-
- if (existing_metas and 'service-appliance-ha-mode' in existing_metas and
- str(existing_metas['service-appliance-ha-mode'][0]['meta']) == str(meta)):
- # no change
- pass
- else:
- self._update_id_self_meta(update, meta)
-
- field = obj_dict.get('id_perms', None)
- if field is not None:
- # construct object of xsd-type and get its xml repr
- field = obj_dict['id_perms']
- buf = cStringIO.StringIO()
- # perms might be inserted at server as obj.
- # obj construction diff from dict construction.
- if isinstance(field, dict):
- IdPermsType(**field).exportChildren(buf, level = 1, name_ = 'id-perms', pretty_print = False)
- else: # object
- field.exportChildren(buf, level = 1, name_ = 'id-perms', pretty_print = False)
- id_perms_xml = buf.getvalue()
- buf.close()
- meta = Metadata('id-perms' , '',
- {'ifmap-cardinality':'singleValue'}, ns_prefix = 'contrail',
- elements = id_perms_xml)
-
- if (existing_metas and 'id-perms' in existing_metas and
- str(existing_metas['id-perms'][0]['meta']) == str(meta)):
- # no change
- pass
- else:
- self._update_id_self_meta(update, meta)
-
- field = obj_dict.get('display_name', None)
- if field is not None:
- norm_str = escape(str(obj_dict['display_name']))
- meta = Metadata('display-name', norm_str,
- {'ifmap-cardinality':'singleValue'}, ns_prefix = 'contrail')
-
- if (existing_metas and 'display-name' in existing_metas and
- str(existing_metas['display-name'][0]['meta']) == str(meta)):
- # no change
- pass
- else:
- self._update_id_self_meta(update, meta)
-
- # Ref Link Metas
- imid = self._imid_handler
-
- # construct object of xsd-type and get its xml repr
- refs = obj_dict.get('service_appliance_refs', None)
- if refs:
- for ref in refs:
- ref_fq_name = ref['to']
- obj_type = 'service-appliance'
- to_imid = cfgm_common.imid.get_ifmap_id_from_fq_name(obj_type, ref_fq_name)
- service_appliance_set_service_appliance_xml = ''
- meta = Metadata('service-appliance-set-service-appliance' , '',
- {'ifmap-cardinality':'singleValue'}, ns_prefix = 'contrail',
- elements = service_appliance_set_service_appliance_xml)
- self._update_id_pair_meta(update, to_imid, meta)
-
-
- self._publish_update(my_imid, update)
- return (True, '')
- #end _ifmap_service_appliance_set_set
-
- def _ifmap_service_appliance_set_create(self, obj_ids, obj_dict):
- if not 'parent_type' in obj_dict:
- # parent is config-root
- parent_type = 'config-root'
- parent_imid = 'contrail:config-root:root'
- else:
- parent_type = obj_dict['parent_type']
- parent_imid = obj_ids.get('parent_imid', None)
-
- # Parent Link Meta
- update = {}
- parent_link_meta = self._parent_metas[parent_type]['service-appliance-set']
- meta = Metadata(parent_link_meta, '',
- {'ifmap-cardinality':'singleValue'}, ns_prefix = 'contrail')
- self._update_id_pair_meta(update, obj_ids['imid'], meta)
- self._publish_update(parent_imid, update)
-
- (ok, result) = self._ifmap_service_appliance_set_set(obj_ids['imid'], None, obj_dict)
- return (ok, result)
- #end _ifmap_service_appliance_set_create
-
-
- def _ifmap_service_appliance_set_read_to_meta_index(self, ifmap_id, field_names = None):
- # field_names = None means all fields will be read
- imid = self._imid_handler
- start_id = str(Identity(name = ifmap_id, type = 'other',
- other_type = 'extended'))
- # if id-perms missing, identity doesn't exist
-
- all_metas = [u'loadbalancer-pool-service-appliance-set', u'service-appliance-set-properties', u'service-appliance-driver', u'service-appliance-ha-mode', u'id-perms', u'display-name', u'service-appliance-set-service-appliance']
- if not field_names:
- metas_to_read = all_metas
- else: # read only requested fields
- metas_to_read = set(all_metas) & set(field_names.keys())
-
- # metas is a dict where key is meta-name and val is list of dict
- # of form [{'meta':meta}, {'id':id1, 'meta':meta}, {'id':id2, 'meta':meta}]
- metas = {}
- for meta_name in metas_to_read:
- if meta_name in self._id_to_metas[ifmap_id]:
- metas[meta_name] = self._id_to_metas[ifmap_id][meta_name]
- return metas
- #end _ifmap_service-appliance-set_read_to_meta_index
-
- def _ifmap_service_appliance_set_update(self, ifmap_id, new_obj_dict):
- # read in refs from ifmap to determine which ones become inactive after update
- existing_metas = self._ifmap_service_appliance_set_read_to_meta_index(ifmap_id)
-
- # remove properties that are no longer active
- props = ['service-appliance-set-properties', 'service-appliance-driver', 'service-appliance-ha-mode', 'id-perms', 'display-name']
- for prop in props:
- prop_m = prop.replace('-', '_')
- if prop in existing_metas and prop_m not in new_obj_dict:
- self._delete_id_self_meta(ifmap_id, 'contrail:'+prop)
- # remove refs that are no longer active
- delete_list = []
- refs = {}
- for meta, to_name in refs.items():
- old_set = set([m['id'] for m in existing_metas.get(meta, [])])
- new_set = set()
- to_name_m = to_name.replace('-', '_')
- for ref in new_obj_dict.get(to_name_m+'_refs', []):
- to_imid = self.fq_name_to_ifmap_id(to_name, ref['to'])
- new_set.add(to_imid)
-
- for inact_ref in old_set - new_set:
- delete_list.append((inact_ref, 'contrail:'+meta))
-
- if delete_list:
- self._delete_id_pair_meta_list(ifmap_id, delete_list)
-
- (ok, result) = self._ifmap_service_appliance_set_set(ifmap_id, existing_metas, new_obj_dict)
- return (ok, result)
- #end _ifmap_service_appliance_set_update
-
- def _ifmap_service_appliance_set_delete(self, obj_ids):
- ifmap_id = obj_ids['imid']
- parent_imid = obj_ids.get('parent_imid', None)
- existing_metas = self._ifmap_service_appliance_set_read_to_meta_index(ifmap_id)
- meta_list = []
- for meta_name, meta_infos in existing_metas.items():
- for meta_info in meta_infos:
- ref_imid = meta_info.get('id')
- if ref_imid is None:
- continue
- meta_list.append((ref_imid, 'contrail:'+meta_name))
-
- if parent_imid:
- # Remove link from parent
- meta_list.append((parent_imid, None))
-
- if meta_list:
- self._delete_id_pair_meta_list(ifmap_id, meta_list)
-
- # Remove all property metadata associated with this ident
- self._delete_id_self_meta(ifmap_id, None)
-
- return (True, '')
- #end _ifmap_service_appliance_set_delete
-
- def _ifmap_config_node_alloc(self, parent_type, fq_name):
- imid = self._imid_handler
- (my_imid, parent_imid) = \
- imid.config_node_alloc_ifmap_id(parent_type, fq_name)
- if my_imid is None or parent_imid is None:
- return (False, (my_imid, parent_imid))
- return (True, (my_imid, parent_imid))
- #end _ifmap_config_node_alloc
-
- def _ifmap_config_node_set(self, my_imid, existing_metas, obj_dict):
- # Properties Meta
- update = {}
- field = obj_dict.get('config_node_ip_address', None)
- if field is not None:
- norm_str = escape(str(obj_dict['config_node_ip_address']))
- meta = Metadata('config-node-ip-address', norm_str,
- {'ifmap-cardinality':'singleValue'}, ns_prefix = 'contrail')
-
- if (existing_metas and 'config-node-ip-address' in existing_metas and
- str(existing_metas['config-node-ip-address'][0]['meta']) == str(meta)):
- # no change
- pass
- else:
- self._update_id_self_meta(update, meta)
-
- field = obj_dict.get('id_perms', None)
- if field is not None:
- # construct object of xsd-type and get its xml repr
- field = obj_dict['id_perms']
- buf = cStringIO.StringIO()
- # perms might be inserted at server as obj.
- # obj construction diff from dict construction.
- if isinstance(field, dict):
- IdPermsType(**field).exportChildren(buf, level = 1, name_ = 'id-perms', pretty_print = False)
- else: # object
- field.exportChildren(buf, level = 1, name_ = 'id-perms', pretty_print = False)
- id_perms_xml = buf.getvalue()
- buf.close()
- meta = Metadata('id-perms' , '',
- {'ifmap-cardinality':'singleValue'}, ns_prefix = 'contrail',
- elements = id_perms_xml)
-
- if (existing_metas and 'id-perms' in existing_metas and
- str(existing_metas['id-perms'][0]['meta']) == str(meta)):
- # no change
- pass
- else:
- self._update_id_self_meta(update, meta)
-
- field = obj_dict.get('display_name', None)
- if field is not None:
- norm_str = escape(str(obj_dict['display_name']))
- meta = Metadata('display-name', norm_str,
- {'ifmap-cardinality':'singleValue'}, ns_prefix = 'contrail')
-
- if (existing_metas and 'display-name' in existing_metas and
- str(existing_metas['display-name'][0]['meta']) == str(meta)):
- # no change
- pass
- else:
- self._update_id_self_meta(update, meta)
-
- # Ref Link Metas
- imid = self._imid_handler
-
-
- self._publish_update(my_imid, update)
- return (True, '')
- #end _ifmap_config_node_set
-
- def _ifmap_config_node_create(self, obj_ids, obj_dict):
- if not 'parent_type' in obj_dict:
- # parent is config-root
- parent_type = 'config-root'
- parent_imid = 'contrail:config-root:root'
- else:
- parent_type = obj_dict['parent_type']
- parent_imid = obj_ids.get('parent_imid', None)
-
- # Parent Link Meta
- update = {}
- parent_link_meta = self._parent_metas[parent_type]['config-node']
- meta = Metadata(parent_link_meta, '',
- {'ifmap-cardinality':'singleValue'}, ns_prefix = 'contrail')
- self._update_id_pair_meta(update, obj_ids['imid'], meta)
- self._publish_update(parent_imid, update)
-
- (ok, result) = self._ifmap_config_node_set(obj_ids['imid'], None, obj_dict)
- return (ok, result)
- #end _ifmap_config_node_create
-
-
- def _ifmap_config_node_read_to_meta_index(self, ifmap_id, field_names = None):
- # field_names = None means all fields will be read
- imid = self._imid_handler
- start_id = str(Identity(name = ifmap_id, type = 'other',
- other_type = 'extended'))
- # if id-perms missing, identity doesn't exist
-
- all_metas = [u'config-node-ip-address', u'id-perms', u'display-name']
- if not field_names:
- metas_to_read = all_metas
- else: # read only requested fields
- metas_to_read = set(all_metas) & set(field_names.keys())
-
- # metas is a dict where key is meta-name and val is list of dict
- # of form [{'meta':meta}, {'id':id1, 'meta':meta}, {'id':id2, 'meta':meta}]
- metas = {}
- for meta_name in metas_to_read:
- if meta_name in self._id_to_metas[ifmap_id]:
- metas[meta_name] = self._id_to_metas[ifmap_id][meta_name]
- return metas
- #end _ifmap_config-node_read_to_meta_index
-
- def _ifmap_config_node_update(self, ifmap_id, new_obj_dict):
- # read in refs from ifmap to determine which ones become inactive after update
- existing_metas = self._ifmap_config_node_read_to_meta_index(ifmap_id)
-
- # remove properties that are no longer active
- props = ['config-node-ip-address', 'id-perms', 'display-name']
- for prop in props:
- prop_m = prop.replace('-', '_')
- if prop in existing_metas and prop_m not in new_obj_dict:
- self._delete_id_self_meta(ifmap_id, 'contrail:'+prop)
- # remove refs that are no longer active
- delete_list = []
- refs = {}
- for meta, to_name in refs.items():
- old_set = set([m['id'] for m in existing_metas.get(meta, [])])
- new_set = set()
- to_name_m = to_name.replace('-', '_')
- for ref in new_obj_dict.get(to_name_m+'_refs', []):
- to_imid = self.fq_name_to_ifmap_id(to_name, ref['to'])
- new_set.add(to_imid)
-
- for inact_ref in old_set - new_set:
- delete_list.append((inact_ref, 'contrail:'+meta))
-
- if delete_list:
- self._delete_id_pair_meta_list(ifmap_id, delete_list)
-
- (ok, result) = self._ifmap_config_node_set(ifmap_id, existing_metas, new_obj_dict)
- return (ok, result)
- #end _ifmap_config_node_update
-
- def _ifmap_config_node_delete(self, obj_ids):
- ifmap_id = obj_ids['imid']
- parent_imid = obj_ids.get('parent_imid', None)
- existing_metas = self._ifmap_config_node_read_to_meta_index(ifmap_id)
- meta_list = []
- for meta_name, meta_infos in existing_metas.items():
- for meta_info in meta_infos:
- ref_imid = meta_info.get('id')
- if ref_imid is None:
- continue
- meta_list.append((ref_imid, 'contrail:'+meta_name))
-
- if parent_imid:
- # Remove link from parent
- meta_list.append((parent_imid, None))
-
- if meta_list:
- self._delete_id_pair_meta_list(ifmap_id, meta_list)
-
- # Remove all property metadata associated with this ident
- self._delete_id_self_meta(ifmap_id, None)
-
- return (True, '')
- #end _ifmap_config_node_delete
-
- def _ifmap_qos_queue_alloc(self, parent_type, fq_name):
- imid = self._imid_handler
- (my_imid, parent_imid) = \
- imid.qos_queue_alloc_ifmap_id(parent_type, fq_name)
- if my_imid is None or parent_imid is None:
- return (False, (my_imid, parent_imid))
- return (True, (my_imid, parent_imid))
- #end _ifmap_qos_queue_alloc
-
- def _ifmap_qos_queue_set(self, my_imid, existing_metas, obj_dict):
- # Properties Meta
- update = {}
- field = obj_dict.get('min_bandwidth', None)
- if field is not None:
- norm_str = escape(str(obj_dict['min_bandwidth']))
- meta = Metadata('min-bandwidth', norm_str,
- {'ifmap-cardinality':'singleValue'}, ns_prefix = 'contrail')
-
- if (existing_metas and 'min-bandwidth' in existing_metas and
- str(existing_metas['min-bandwidth'][0]['meta']) == str(meta)):
- # no change
- pass
- else:
- self._update_id_self_meta(update, meta)
-
- field = obj_dict.get('max_bandwidth', None)
- if field is not None:
- norm_str = escape(str(obj_dict['max_bandwidth']))
- meta = Metadata('max-bandwidth', norm_str,
- {'ifmap-cardinality':'singleValue'}, ns_prefix = 'contrail')
-
- if (existing_metas and 'max-bandwidth' in existing_metas and
- str(existing_metas['max-bandwidth'][0]['meta']) == str(meta)):
- # no change
- pass
- else:
- self._update_id_self_meta(update, meta)
-
- field = obj_dict.get('id_perms', None)
- if field is not None:
- # construct object of xsd-type and get its xml repr
- field = obj_dict['id_perms']
- buf = cStringIO.StringIO()
- # perms might be inserted at server as obj.
- # obj construction diff from dict construction.
- if isinstance(field, dict):
- IdPermsType(**field).exportChildren(buf, level = 1, name_ = 'id-perms', pretty_print = False)
- else: # object
- field.exportChildren(buf, level = 1, name_ = 'id-perms', pretty_print = False)
- id_perms_xml = buf.getvalue()
- buf.close()
- meta = Metadata('id-perms' , '',
- {'ifmap-cardinality':'singleValue'}, ns_prefix = 'contrail',
- elements = id_perms_xml)
-
- if (existing_metas and 'id-perms' in existing_metas and
- str(existing_metas['id-perms'][0]['meta']) == str(meta)):
- # no change
- pass
- else:
- self._update_id_self_meta(update, meta)
-
- field = obj_dict.get('display_name', None)
- if field is not None:
- norm_str = escape(str(obj_dict['display_name']))
- meta = Metadata('display-name', norm_str,
- {'ifmap-cardinality':'singleValue'}, ns_prefix = 'contrail')
-
- if (existing_metas and 'display-name' in existing_metas and
- str(existing_metas['display-name'][0]['meta']) == str(meta)):
- # no change
- pass
- else:
- self._update_id_self_meta(update, meta)
-
- # Ref Link Metas
- imid = self._imid_handler
-
-
- self._publish_update(my_imid, update)
- return (True, '')
- #end _ifmap_qos_queue_set
-
- def _ifmap_qos_queue_create(self, obj_ids, obj_dict):
- if not 'parent_type' in obj_dict:
- # parent is config-root
- parent_type = 'config-root'
- parent_imid = 'contrail:config-root:root'
- else:
- parent_type = obj_dict['parent_type']
- parent_imid = obj_ids.get('parent_imid', None)
-
- # Parent Link Meta
- update = {}
- parent_link_meta = self._parent_metas[parent_type]['qos-queue']
- meta = Metadata(parent_link_meta, '',
- {'ifmap-cardinality':'singleValue'}, ns_prefix = 'contrail')
- self._update_id_pair_meta(update, obj_ids['imid'], meta)
- self._publish_update(parent_imid, update)
-
- (ok, result) = self._ifmap_qos_queue_set(obj_ids['imid'], None, obj_dict)
- return (ok, result)
- #end _ifmap_qos_queue_create
-
-
- def _ifmap_qos_queue_read_to_meta_index(self, ifmap_id, field_names = None):
- # field_names = None means all fields will be read
- imid = self._imid_handler
- start_id = str(Identity(name = ifmap_id, type = 'other',
- other_type = 'extended'))
- # if id-perms missing, identity doesn't exist
-
- all_metas = [u'qos-forwarding-class-qos-queue', u'min-bandwidth', u'max-bandwidth', u'id-perms', u'display-name']
- if not field_names:
- metas_to_read = all_metas
- else: # read only requested fields
- metas_to_read = set(all_metas) & set(field_names.keys())
-
- # metas is a dict where key is meta-name and val is list of dict
- # of form [{'meta':meta}, {'id':id1, 'meta':meta}, {'id':id2, 'meta':meta}]
- metas = {}
- for meta_name in metas_to_read:
- if meta_name in self._id_to_metas[ifmap_id]:
- metas[meta_name] = self._id_to_metas[ifmap_id][meta_name]
- return metas
- #end _ifmap_qos-queue_read_to_meta_index
-
- def _ifmap_qos_queue_update(self, ifmap_id, new_obj_dict):
- # read in refs from ifmap to determine which ones become inactive after update
- existing_metas = self._ifmap_qos_queue_read_to_meta_index(ifmap_id)
-
- # remove properties that are no longer active
- props = ['min-bandwidth', 'max-bandwidth', 'id-perms', 'display-name']
- for prop in props:
- prop_m = prop.replace('-', '_')
- if prop in existing_metas and prop_m not in new_obj_dict:
- self._delete_id_self_meta(ifmap_id, 'contrail:'+prop)
- # remove refs that are no longer active
- delete_list = []
- refs = {}
- for meta, to_name in refs.items():
- old_set = set([m['id'] for m in existing_metas.get(meta, [])])
- new_set = set()
- to_name_m = to_name.replace('-', '_')
- for ref in new_obj_dict.get(to_name_m+'_refs', []):
- to_imid = self.fq_name_to_ifmap_id(to_name, ref['to'])
- new_set.add(to_imid)
-
- for inact_ref in old_set - new_set:
- delete_list.append((inact_ref, 'contrail:'+meta))
-
- if delete_list:
- self._delete_id_pair_meta_list(ifmap_id, delete_list)
-
- (ok, result) = self._ifmap_qos_queue_set(ifmap_id, existing_metas, new_obj_dict)
- return (ok, result)
- #end _ifmap_qos_queue_update
-
- def _ifmap_qos_queue_delete(self, obj_ids):
- ifmap_id = obj_ids['imid']
- parent_imid = obj_ids.get('parent_imid', None)
- existing_metas = self._ifmap_qos_queue_read_to_meta_index(ifmap_id)
- meta_list = []
- for meta_name, meta_infos in existing_metas.items():
- for meta_info in meta_infos:
- ref_imid = meta_info.get('id')
- if ref_imid is None:
- continue
- meta_list.append((ref_imid, 'contrail:'+meta_name))
-
- if parent_imid:
- # Remove link from parent
- meta_list.append((parent_imid, None))
-
- if meta_list:
- self._delete_id_pair_meta_list(ifmap_id, meta_list)
-
- # Remove all property metadata associated with this ident
- self._delete_id_self_meta(ifmap_id, None)
-
- return (True, '')
- #end _ifmap_qos_queue_delete
-
- def _ifmap_virtual_machine_alloc(self, parent_type, fq_name):
- imid = self._imid_handler
- (my_imid, parent_imid) = \
- imid.virtual_machine_alloc_ifmap_id(parent_type, fq_name)
- if my_imid is None or parent_imid is None:
- return (False, (my_imid, parent_imid))
- return (True, (my_imid, parent_imid))
- #end _ifmap_virtual_machine_alloc
-
- def _ifmap_virtual_machine_set(self, my_imid, existing_metas, obj_dict):
- # Properties Meta
- update = {}
- field = obj_dict.get('id_perms', None)
- if field is not None:
- # construct object of xsd-type and get its xml repr
- field = obj_dict['id_perms']
- buf = cStringIO.StringIO()
- # perms might be inserted at server as obj.
- # obj construction diff from dict construction.
- if isinstance(field, dict):
- IdPermsType(**field).exportChildren(buf, level = 1, name_ = 'id-perms', pretty_print = False)
- else: # object
- field.exportChildren(buf, level = 1, name_ = 'id-perms', pretty_print = False)
- id_perms_xml = buf.getvalue()
- buf.close()
- meta = Metadata('id-perms' , '',
- {'ifmap-cardinality':'singleValue'}, ns_prefix = 'contrail',
- elements = id_perms_xml)
-
- if (existing_metas and 'id-perms' in existing_metas and
- str(existing_metas['id-perms'][0]['meta']) == str(meta)):
- # no change
- pass
- else:
- self._update_id_self_meta(update, meta)
-
- field = obj_dict.get('display_name', None)
- if field is not None:
- norm_str = escape(str(obj_dict['display_name']))
- meta = Metadata('display-name', norm_str,
- {'ifmap-cardinality':'singleValue'}, ns_prefix = 'contrail')
-
- if (existing_metas and 'display-name' in existing_metas and
- str(existing_metas['display-name'][0]['meta']) == str(meta)):
- # no change
- pass
- else:
- self._update_id_self_meta(update, meta)
-
- # Ref Link Metas
- imid = self._imid_handler
-
- # construct object of xsd-type and get its xml repr
- refs = obj_dict.get('virtual_machine_interface_refs', None)
- if refs:
- for ref in refs:
- ref_fq_name = ref['to']
- obj_type = 'virtual-machine-interface'
- to_imid = cfgm_common.imid.get_ifmap_id_from_fq_name(obj_type, ref_fq_name)
- virtual_machine_virtual_machine_interface_xml = ''
- meta = Metadata('virtual-machine-virtual-machine-interface' , '',
- {'ifmap-cardinality':'singleValue'}, ns_prefix = 'contrail',
- elements = virtual_machine_virtual_machine_interface_xml)
- self._update_id_pair_meta(update, to_imid, meta)
-
- # construct object of xsd-type and get its xml repr
- refs = obj_dict.get('service_instance_refs', None)
- if refs:
- for ref in refs:
- ref_fq_name = ref['to']
- obj_type = 'service-instance'
- to_imid = cfgm_common.imid.get_ifmap_id_from_fq_name(obj_type, ref_fq_name)
- virtual_machine_service_instance_xml = ''
- meta = Metadata('virtual-machine-service-instance' , '',
- {'ifmap-cardinality':'singleValue'}, ns_prefix = 'contrail',
- elements = virtual_machine_service_instance_xml)
- self._update_id_pair_meta(update, to_imid, meta)
-
-
- self._publish_update(my_imid, update)
- return (True, '')
- #end _ifmap_virtual_machine_set
-
- def _ifmap_virtual_machine_create(self, obj_ids, obj_dict):
- (ok, result) = self._ifmap_virtual_machine_set(obj_ids['imid'], None, obj_dict)
- return (ok, result)
- #end _ifmap_virtual_machine_create
-
-
- def _ifmap_virtual_machine_read_to_meta_index(self, ifmap_id, field_names = None):
- # field_names = None means all fields will be read
- imid = self._imid_handler
- start_id = str(Identity(name = ifmap_id, type = 'other',
- other_type = 'extended'))
- # if id-perms missing, identity doesn't exist
-
- all_metas = [u'virtual-machine-service-instance', u'virtual-machine-interface-virtual-machine', u'virtual-router-virtual-machine', u'id-perms', u'display-name', u'virtual-machine-virtual-machine-interface']
- if not field_names:
- metas_to_read = all_metas
- else: # read only requested fields
- metas_to_read = set(all_metas) & set(field_names.keys())
-
- # metas is a dict where key is meta-name and val is list of dict
- # of form [{'meta':meta}, {'id':id1, 'meta':meta}, {'id':id2, 'meta':meta}]
- metas = {}
- for meta_name in metas_to_read:
- if meta_name in self._id_to_metas[ifmap_id]:
- metas[meta_name] = self._id_to_metas[ifmap_id][meta_name]
- return metas
- #end _ifmap_virtual-machine_read_to_meta_index
-
- def _ifmap_virtual_machine_update(self, ifmap_id, new_obj_dict):
- # read in refs from ifmap to determine which ones become inactive after update
- existing_metas = self._ifmap_virtual_machine_read_to_meta_index(ifmap_id)
-
- # remove properties that are no longer active
- props = ['id-perms', 'display-name']
- for prop in props:
- prop_m = prop.replace('-', '_')
- if prop in existing_metas and prop_m not in new_obj_dict:
- self._delete_id_self_meta(ifmap_id, 'contrail:'+prop)
- # remove refs that are no longer active
- delete_list = []
- refs = {'virtual-machine-service-instance': 'service-instance'}
- for meta, to_name in refs.items():
- old_set = set([m['id'] for m in existing_metas.get(meta, [])])
- new_set = set()
- to_name_m = to_name.replace('-', '_')
- for ref in new_obj_dict.get(to_name_m+'_refs', []):
- to_imid = self.fq_name_to_ifmap_id(to_name, ref['to'])
- new_set.add(to_imid)
-
- for inact_ref in old_set - new_set:
- delete_list.append((inact_ref, 'contrail:'+meta))
-
- if delete_list:
- self._delete_id_pair_meta_list(ifmap_id, delete_list)
-
- (ok, result) = self._ifmap_virtual_machine_set(ifmap_id, existing_metas, new_obj_dict)
- return (ok, result)
- #end _ifmap_virtual_machine_update
-
- def _ifmap_virtual_machine_delete(self, obj_ids):
- ifmap_id = obj_ids['imid']
- parent_imid = obj_ids.get('parent_imid', None)
- existing_metas = self._ifmap_virtual_machine_read_to_meta_index(ifmap_id)
- meta_list = []
- for meta_name, meta_infos in existing_metas.items():
- for meta_info in meta_infos:
- ref_imid = meta_info.get('id')
- if ref_imid is None:
- continue
- meta_list.append((ref_imid, 'contrail:'+meta_name))
-
- if parent_imid:
- # Remove link from parent
- meta_list.append((parent_imid, None))
-
- if meta_list:
- self._delete_id_pair_meta_list(ifmap_id, meta_list)
-
- # Remove all property metadata associated with this ident
- self._delete_id_self_meta(ifmap_id, None)
-
- return (True, '')
- #end _ifmap_virtual_machine_delete
-
- def _ifmap_interface_route_table_alloc(self, parent_type, fq_name):
- imid = self._imid_handler
- (my_imid, parent_imid) = \
- imid.interface_route_table_alloc_ifmap_id(parent_type, fq_name)
- if my_imid is None or parent_imid is None:
- return (False, (my_imid, parent_imid))
- return (True, (my_imid, parent_imid))
- #end _ifmap_interface_route_table_alloc
-
- def _ifmap_interface_route_table_set(self, my_imid, existing_metas, obj_dict):
- # Properties Meta
- update = {}
- field = obj_dict.get('interface_route_table_routes', None)
- if field is not None:
- # construct object of xsd-type and get its xml repr
- field = obj_dict['interface_route_table_routes']
- buf = cStringIO.StringIO()
- # perms might be inserted at server as obj.
- # obj construction diff from dict construction.
- if isinstance(field, dict):
- RouteTableType(**field).exportChildren(buf, level = 1, name_ = 'interface-route-table-routes', pretty_print = False)
- else: # object
- field.exportChildren(buf, level = 1, name_ = 'interface-route-table-routes', pretty_print = False)
- interface_route_table_routes_xml = buf.getvalue()
- buf.close()
- meta = Metadata('interface-route-table-routes' , '',
- {'ifmap-cardinality':'singleValue'}, ns_prefix = 'contrail',
- elements = interface_route_table_routes_xml)
-
- if (existing_metas and 'interface-route-table-routes' in existing_metas and
- str(existing_metas['interface-route-table-routes'][0]['meta']) == str(meta)):
- # no change
- pass
- else:
- self._update_id_self_meta(update, meta)
-
- field = obj_dict.get('id_perms', None)
- if field is not None:
- # construct object of xsd-type and get its xml repr
- field = obj_dict['id_perms']
- buf = cStringIO.StringIO()
- # perms might be inserted at server as obj.
- # obj construction diff from dict construction.
- if isinstance(field, dict):
- IdPermsType(**field).exportChildren(buf, level = 1, name_ = 'id-perms', pretty_print = False)
- else: # object
- field.exportChildren(buf, level = 1, name_ = 'id-perms', pretty_print = False)
- id_perms_xml = buf.getvalue()
- buf.close()
- meta = Metadata('id-perms' , '',
- {'ifmap-cardinality':'singleValue'}, ns_prefix = 'contrail',
- elements = id_perms_xml)
-
- if (existing_metas and 'id-perms' in existing_metas and
- str(existing_metas['id-perms'][0]['meta']) == str(meta)):
- # no change
- pass
- else:
- self._update_id_self_meta(update, meta)
-
- field = obj_dict.get('display_name', None)
- if field is not None:
- norm_str = escape(str(obj_dict['display_name']))
- meta = Metadata('display-name', norm_str,
- {'ifmap-cardinality':'singleValue'}, ns_prefix = 'contrail')
-
- if (existing_metas and 'display-name' in existing_metas and
- str(existing_metas['display-name'][0]['meta']) == str(meta)):
- # no change
- pass
- else:
- self._update_id_self_meta(update, meta)
-
- # Ref Link Metas
- imid = self._imid_handler
-
-
- self._publish_update(my_imid, update)
- return (True, '')
- #end _ifmap_interface_route_table_set
-
- def _ifmap_interface_route_table_create(self, obj_ids, obj_dict):
- if not 'parent_type' in obj_dict:
- # parent is config-root
- parent_type = 'config-root'
- parent_imid = 'contrail:config-root:root'
- else:
- parent_type = obj_dict['parent_type']
- parent_imid = obj_ids.get('parent_imid', None)
-
- # Parent Link Meta
- update = {}
- parent_link_meta = self._parent_metas[parent_type]['interface-route-table']
- meta = Metadata(parent_link_meta, '',
- {'ifmap-cardinality':'singleValue'}, ns_prefix = 'contrail')
- self._update_id_pair_meta(update, obj_ids['imid'], meta)
- self._publish_update(parent_imid, update)
-
- (ok, result) = self._ifmap_interface_route_table_set(obj_ids['imid'], None, obj_dict)
- return (ok, result)
- #end _ifmap_interface_route_table_create
-
-
- def _ifmap_interface_route_table_read_to_meta_index(self, ifmap_id, field_names = None):
- # field_names = None means all fields will be read
- imid = self._imid_handler
- start_id = str(Identity(name = ifmap_id, type = 'other',
- other_type = 'extended'))
- # if id-perms missing, identity doesn't exist
-
- all_metas = [u'virtual-machine-interface-route-table', u'interface-route-table-routes', u'id-perms', u'display-name']
- if not field_names:
- metas_to_read = all_metas
- else: # read only requested fields
- metas_to_read = set(all_metas) & set(field_names.keys())
-
- # metas is a dict where key is meta-name and val is list of dict
- # of form [{'meta':meta}, {'id':id1, 'meta':meta}, {'id':id2, 'meta':meta}]
- metas = {}
- for meta_name in metas_to_read:
- if meta_name in self._id_to_metas[ifmap_id]:
- metas[meta_name] = self._id_to_metas[ifmap_id][meta_name]
- return metas
- #end _ifmap_interface-route-table_read_to_meta_index
-
- def _ifmap_interface_route_table_update(self, ifmap_id, new_obj_dict):
- # read in refs from ifmap to determine which ones become inactive after update
- existing_metas = self._ifmap_interface_route_table_read_to_meta_index(ifmap_id)
-
- # remove properties that are no longer active
- props = ['interface-route-table-routes', 'id-perms', 'display-name']
- for prop in props:
- prop_m = prop.replace('-', '_')
- if prop in existing_metas and prop_m not in new_obj_dict:
- self._delete_id_self_meta(ifmap_id, 'contrail:'+prop)
- # remove refs that are no longer active
- delete_list = []
- refs = {}
- for meta, to_name in refs.items():
- old_set = set([m['id'] for m in existing_metas.get(meta, [])])
- new_set = set()
- to_name_m = to_name.replace('-', '_')
- for ref in new_obj_dict.get(to_name_m+'_refs', []):
- to_imid = self.fq_name_to_ifmap_id(to_name, ref['to'])
- new_set.add(to_imid)
-
- for inact_ref in old_set - new_set:
- delete_list.append((inact_ref, 'contrail:'+meta))
-
- if delete_list:
- self._delete_id_pair_meta_list(ifmap_id, delete_list)
-
- (ok, result) = self._ifmap_interface_route_table_set(ifmap_id, existing_metas, new_obj_dict)
- return (ok, result)
- #end _ifmap_interface_route_table_update
-
- def _ifmap_interface_route_table_delete(self, obj_ids):
- ifmap_id = obj_ids['imid']
- parent_imid = obj_ids.get('parent_imid', None)
- existing_metas = self._ifmap_interface_route_table_read_to_meta_index(ifmap_id)
- meta_list = []
- for meta_name, meta_infos in existing_metas.items():
- for meta_info in meta_infos:
- ref_imid = meta_info.get('id')
- if ref_imid is None:
- continue
- meta_list.append((ref_imid, 'contrail:'+meta_name))
-
- if parent_imid:
- # Remove link from parent
- meta_list.append((parent_imid, None))
-
- if meta_list:
- self._delete_id_pair_meta_list(ifmap_id, meta_list)
-
- # Remove all property metadata associated with this ident
- self._delete_id_self_meta(ifmap_id, None)
-
- return (True, '')
- #end _ifmap_interface_route_table_delete
-
- def _ifmap_service_template_alloc(self, parent_type, fq_name):
- imid = self._imid_handler
- (my_imid, parent_imid) = \
- imid.service_template_alloc_ifmap_id(parent_type, fq_name)
- if my_imid is None or parent_imid is None:
- return (False, (my_imid, parent_imid))
- return (True, (my_imid, parent_imid))
- #end _ifmap_service_template_alloc
-
- def _ifmap_service_template_set(self, my_imid, existing_metas, obj_dict):
- # Properties Meta
- update = {}
- field = obj_dict.get('service_template_properties', None)
- if field is not None:
- # construct object of xsd-type and get its xml repr
- field = obj_dict['service_template_properties']
- buf = cStringIO.StringIO()
- # perms might be inserted at server as obj.
- # obj construction diff from dict construction.
- if isinstance(field, dict):
- ServiceTemplateType(**field).exportChildren(buf, level = 1, name_ = 'service-template-properties', pretty_print = False)
- else: # object
- field.exportChildren(buf, level = 1, name_ = 'service-template-properties', pretty_print = False)
- service_template_properties_xml = buf.getvalue()
- buf.close()
- meta = Metadata('service-template-properties' , '',
- {'ifmap-cardinality':'singleValue'}, ns_prefix = 'contrail',
- elements = service_template_properties_xml)
-
- if (existing_metas and 'service-template-properties' in existing_metas and
- str(existing_metas['service-template-properties'][0]['meta']) == str(meta)):
- # no change
- pass
- else:
- self._update_id_self_meta(update, meta)
-
- field = obj_dict.get('id_perms', None)
- if field is not None:
- # construct object of xsd-type and get its xml repr
- field = obj_dict['id_perms']
- buf = cStringIO.StringIO()
- # perms might be inserted at server as obj.
- # obj construction diff from dict construction.
- if isinstance(field, dict):
- IdPermsType(**field).exportChildren(buf, level = 1, name_ = 'id-perms', pretty_print = False)
- else: # object
- field.exportChildren(buf, level = 1, name_ = 'id-perms', pretty_print = False)
- id_perms_xml = buf.getvalue()
- buf.close()
- meta = Metadata('id-perms' , '',
- {'ifmap-cardinality':'singleValue'}, ns_prefix = 'contrail',
- elements = id_perms_xml)
-
- if (existing_metas and 'id-perms' in existing_metas and
- str(existing_metas['id-perms'][0]['meta']) == str(meta)):
- # no change
- pass
- else:
- self._update_id_self_meta(update, meta)
-
- field = obj_dict.get('display_name', None)
- if field is not None:
- norm_str = escape(str(obj_dict['display_name']))
- meta = Metadata('display-name', norm_str,
- {'ifmap-cardinality':'singleValue'}, ns_prefix = 'contrail')
-
- if (existing_metas and 'display-name' in existing_metas and
- str(existing_metas['display-name'][0]['meta']) == str(meta)):
- # no change
- pass
- else:
- self._update_id_self_meta(update, meta)
-
- # Ref Link Metas
- imid = self._imid_handler
-
-
- self._publish_update(my_imid, update)
- return (True, '')
- #end _ifmap_service_template_set
-
- def _ifmap_service_template_create(self, obj_ids, obj_dict):
- if not 'parent_type' in obj_dict:
- # parent is config-root
- parent_type = 'config-root'
- parent_imid = 'contrail:config-root:root'
- else:
- parent_type = obj_dict['parent_type']
- parent_imid = obj_ids.get('parent_imid', None)
-
- # Parent Link Meta
- update = {}
- parent_link_meta = self._parent_metas[parent_type]['service-template']
- meta = Metadata(parent_link_meta, '',
- {'ifmap-cardinality':'singleValue'}, ns_prefix = 'contrail')
- self._update_id_pair_meta(update, obj_ids['imid'], meta)
- self._publish_update(parent_imid, update)
-
- (ok, result) = self._ifmap_service_template_set(obj_ids['imid'], None, obj_dict)
- return (ok, result)
- #end _ifmap_service_template_create
-
-
- def _ifmap_service_template_read_to_meta_index(self, ifmap_id, field_names = None):
- # field_names = None means all fields will be read
- imid = self._imid_handler
- start_id = str(Identity(name = ifmap_id, type = 'other',
- other_type = 'extended'))
- # if id-perms missing, identity doesn't exist
-
- all_metas = [u'service-instance-service-template', u'service-template-properties', u'id-perms', u'display-name']
- if not field_names:
- metas_to_read = all_metas
- else: # read only requested fields
- metas_to_read = set(all_metas) & set(field_names.keys())
-
- # metas is a dict where key is meta-name and val is list of dict
- # of form [{'meta':meta}, {'id':id1, 'meta':meta}, {'id':id2, 'meta':meta}]
- metas = {}
- for meta_name in metas_to_read:
- if meta_name in self._id_to_metas[ifmap_id]:
- metas[meta_name] = self._id_to_metas[ifmap_id][meta_name]
- return metas
- #end _ifmap_service-template_read_to_meta_index
-
- def _ifmap_service_template_update(self, ifmap_id, new_obj_dict):
- # read in refs from ifmap to determine which ones become inactive after update
- existing_metas = self._ifmap_service_template_read_to_meta_index(ifmap_id)
-
- # remove properties that are no longer active
- props = ['service-template-properties', 'id-perms', 'display-name']
- for prop in props:
- prop_m = prop.replace('-', '_')
- if prop in existing_metas and prop_m not in new_obj_dict:
- self._delete_id_self_meta(ifmap_id, 'contrail:'+prop)
- # remove refs that are no longer active
- delete_list = []
- refs = {}
- for meta, to_name in refs.items():
- old_set = set([m['id'] for m in existing_metas.get(meta, [])])
- new_set = set()
- to_name_m = to_name.replace('-', '_')
- for ref in new_obj_dict.get(to_name_m+'_refs', []):
- to_imid = self.fq_name_to_ifmap_id(to_name, ref['to'])
- new_set.add(to_imid)
-
- for inact_ref in old_set - new_set:
- delete_list.append((inact_ref, 'contrail:'+meta))
-
- if delete_list:
- self._delete_id_pair_meta_list(ifmap_id, delete_list)
-
- (ok, result) = self._ifmap_service_template_set(ifmap_id, existing_metas, new_obj_dict)
- return (ok, result)
- #end _ifmap_service_template_update
-
- def _ifmap_service_template_delete(self, obj_ids):
- ifmap_id = obj_ids['imid']
- parent_imid = obj_ids.get('parent_imid', None)
- existing_metas = self._ifmap_service_template_read_to_meta_index(ifmap_id)
- meta_list = []
- for meta_name, meta_infos in existing_metas.items():
- for meta_info in meta_infos:
- ref_imid = meta_info.get('id')
- if ref_imid is None:
- continue
- meta_list.append((ref_imid, 'contrail:'+meta_name))
-
- if parent_imid:
- # Remove link from parent
- meta_list.append((parent_imid, None))
-
- if meta_list:
- self._delete_id_pair_meta_list(ifmap_id, meta_list)
-
- # Remove all property metadata associated with this ident
- self._delete_id_self_meta(ifmap_id, None)
-
- return (True, '')
- #end _ifmap_service_template_delete
-
- def _ifmap_virtual_ip_alloc(self, parent_type, fq_name):
- imid = self._imid_handler
- (my_imid, parent_imid) = \
- imid.virtual_ip_alloc_ifmap_id(parent_type, fq_name)
- if my_imid is None or parent_imid is None:
- return (False, (my_imid, parent_imid))
- return (True, (my_imid, parent_imid))
- #end _ifmap_virtual_ip_alloc
-
- def _ifmap_virtual_ip_set(self, my_imid, existing_metas, obj_dict):
- # Properties Meta
- update = {}
- field = obj_dict.get('virtual_ip_properties', None)
- if field is not None:
- # construct object of xsd-type and get its xml repr
- field = obj_dict['virtual_ip_properties']
- buf = cStringIO.StringIO()
- # perms might be inserted at server as obj.
- # obj construction diff from dict construction.
- if isinstance(field, dict):
- VirtualIpType(**field).exportChildren(buf, level = 1, name_ = 'virtual-ip-properties', pretty_print = False)
- else: # object
- field.exportChildren(buf, level = 1, name_ = 'virtual-ip-properties', pretty_print = False)
- virtual_ip_properties_xml = buf.getvalue()
- buf.close()
- meta = Metadata('virtual-ip-properties' , '',
- {'ifmap-cardinality':'singleValue'}, ns_prefix = 'contrail',
- elements = virtual_ip_properties_xml)
-
- if (existing_metas and 'virtual-ip-properties' in existing_metas and
- str(existing_metas['virtual-ip-properties'][0]['meta']) == str(meta)):
- # no change
- pass
- else:
- self._update_id_self_meta(update, meta)
-
- field = obj_dict.get('id_perms', None)
- if field is not None:
- # construct object of xsd-type and get its xml repr
- field = obj_dict['id_perms']
- buf = cStringIO.StringIO()
- # perms might be inserted at server as obj.
- # obj construction diff from dict construction.
- if isinstance(field, dict):
- IdPermsType(**field).exportChildren(buf, level = 1, name_ = 'id-perms', pretty_print = False)
- else: # object
- field.exportChildren(buf, level = 1, name_ = 'id-perms', pretty_print = False)
- id_perms_xml = buf.getvalue()
- buf.close()
- meta = Metadata('id-perms' , '',
- {'ifmap-cardinality':'singleValue'}, ns_prefix = 'contrail',
- elements = id_perms_xml)
-
- if (existing_metas and 'id-perms' in existing_metas and
- str(existing_metas['id-perms'][0]['meta']) == str(meta)):
- # no change
- pass
- else:
- self._update_id_self_meta(update, meta)
-
- field = obj_dict.get('display_name', None)
- if field is not None:
- norm_str = escape(str(obj_dict['display_name']))
- meta = Metadata('display-name', norm_str,
- {'ifmap-cardinality':'singleValue'}, ns_prefix = 'contrail')
-
- if (existing_metas and 'display-name' in existing_metas and
- str(existing_metas['display-name'][0]['meta']) == str(meta)):
- # no change
- pass
- else:
- self._update_id_self_meta(update, meta)
-
- # Ref Link Metas
- imid = self._imid_handler
-
- # construct object of xsd-type and get its xml repr
- refs = obj_dict.get('loadbalancer_pool_refs', None)
- if refs:
- for ref in refs:
- ref_fq_name = ref['to']
- obj_type = 'loadbalancer-pool'
- to_imid = cfgm_common.imid.get_ifmap_id_from_fq_name(obj_type, ref_fq_name)
- virtual_ip_loadbalancer_pool_xml = ''
- meta = Metadata('virtual-ip-loadbalancer-pool' , '',
- {'ifmap-cardinality':'singleValue'}, ns_prefix = 'contrail',
- elements = virtual_ip_loadbalancer_pool_xml)
- self._update_id_pair_meta(update, to_imid, meta)
-
- # construct object of xsd-type and get its xml repr
- refs = obj_dict.get('virtual_machine_interface_refs', None)
- if refs:
- for ref in refs:
- ref_fq_name = ref['to']
- obj_type = 'virtual-machine-interface'
- to_imid = cfgm_common.imid.get_ifmap_id_from_fq_name(obj_type, ref_fq_name)
- virtual_ip_virtual_machine_interface_xml = ''
- meta = Metadata('virtual-ip-virtual-machine-interface' , '',
- {'ifmap-cardinality':'singleValue'}, ns_prefix = 'contrail',
- elements = virtual_ip_virtual_machine_interface_xml)
- self._update_id_pair_meta(update, to_imid, meta)
-
-
- self._publish_update(my_imid, update)
- return (True, '')
- #end _ifmap_virtual_ip_set
-
- def _ifmap_virtual_ip_create(self, obj_ids, obj_dict):
- if not 'parent_type' in obj_dict:
- # parent is config-root
- parent_type = 'config-root'
- parent_imid = 'contrail:config-root:root'
- else:
- parent_type = obj_dict['parent_type']
- parent_imid = obj_ids.get('parent_imid', None)
-
- # Parent Link Meta
- update = {}
- parent_link_meta = self._parent_metas[parent_type]['virtual-ip']
- meta = Metadata(parent_link_meta, '',
- {'ifmap-cardinality':'singleValue'}, ns_prefix = 'contrail')
- self._update_id_pair_meta(update, obj_ids['imid'], meta)
- self._publish_update(parent_imid, update)
-
- (ok, result) = self._ifmap_virtual_ip_set(obj_ids['imid'], None, obj_dict)
- return (ok, result)
- #end _ifmap_virtual_ip_create
-
-
- def _ifmap_virtual_ip_read_to_meta_index(self, ifmap_id, field_names = None):
- # field_names = None means all fields will be read
- imid = self._imid_handler
- start_id = str(Identity(name = ifmap_id, type = 'other',
- other_type = 'extended'))
- # if id-perms missing, identity doesn't exist
-
- all_metas = [u'virtual-ip-loadbalancer-pool', u'virtual-ip-virtual-machine-interface', u'virtual-ip-properties', u'id-perms', u'display-name']
- if not field_names:
- metas_to_read = all_metas
- else: # read only requested fields
- metas_to_read = set(all_metas) & set(field_names.keys())
-
- # metas is a dict where key is meta-name and val is list of dict
- # of form [{'meta':meta}, {'id':id1, 'meta':meta}, {'id':id2, 'meta':meta}]
- metas = {}
- for meta_name in metas_to_read:
- if meta_name in self._id_to_metas[ifmap_id]:
- metas[meta_name] = self._id_to_metas[ifmap_id][meta_name]
- return metas
- #end _ifmap_virtual-ip_read_to_meta_index
-
- def _ifmap_virtual_ip_update(self, ifmap_id, new_obj_dict):
- # read in refs from ifmap to determine which ones become inactive after update
- existing_metas = self._ifmap_virtual_ip_read_to_meta_index(ifmap_id)
-
- # remove properties that are no longer active
- props = ['virtual-ip-properties', 'id-perms', 'display-name']
- for prop in props:
- prop_m = prop.replace('-', '_')
- if prop in existing_metas and prop_m not in new_obj_dict:
- self._delete_id_self_meta(ifmap_id, 'contrail:'+prop)
- # remove refs that are no longer active
- delete_list = []
- refs = {'virtual-ip-loadbalancer-pool': 'loadbalancer-pool',
- 'virtual-ip-virtual-machine-interface': 'virtual-machine-interface'}
- for meta, to_name in refs.items():
- old_set = set([m['id'] for m in existing_metas.get(meta, [])])
- new_set = set()
- to_name_m = to_name.replace('-', '_')
- for ref in new_obj_dict.get(to_name_m+'_refs', []):
- to_imid = self.fq_name_to_ifmap_id(to_name, ref['to'])
- new_set.add(to_imid)
-
- for inact_ref in old_set - new_set:
- delete_list.append((inact_ref, 'contrail:'+meta))
-
- if delete_list:
- self._delete_id_pair_meta_list(ifmap_id, delete_list)
-
- (ok, result) = self._ifmap_virtual_ip_set(ifmap_id, existing_metas, new_obj_dict)
- return (ok, result)
- #end _ifmap_virtual_ip_update
-
- def _ifmap_virtual_ip_delete(self, obj_ids):
- ifmap_id = obj_ids['imid']
- parent_imid = obj_ids.get('parent_imid', None)
- existing_metas = self._ifmap_virtual_ip_read_to_meta_index(ifmap_id)
- meta_list = []
- for meta_name, meta_infos in existing_metas.items():
- for meta_info in meta_infos:
- ref_imid = meta_info.get('id')
- if ref_imid is None:
- continue
- meta_list.append((ref_imid, 'contrail:'+meta_name))
-
- if parent_imid:
- # Remove link from parent
- meta_list.append((parent_imid, None))
-
- if meta_list:
- self._delete_id_pair_meta_list(ifmap_id, meta_list)
-
- # Remove all property metadata associated with this ident
- self._delete_id_self_meta(ifmap_id, None)
-
- return (True, '')
- #end _ifmap_virtual_ip_delete
-
- def _ifmap_loadbalancer_member_alloc(self, parent_type, fq_name):
- imid = self._imid_handler
- (my_imid, parent_imid) = \
- imid.loadbalancer_member_alloc_ifmap_id(parent_type, fq_name)
- if my_imid is None or parent_imid is None:
- return (False, (my_imid, parent_imid))
- return (True, (my_imid, parent_imid))
- #end _ifmap_loadbalancer_member_alloc
-
- def _ifmap_loadbalancer_member_set(self, my_imid, existing_metas, obj_dict):
- # Properties Meta
- update = {}
- field = obj_dict.get('loadbalancer_member_properties', None)
- if field is not None:
- # construct object of xsd-type and get its xml repr
- field = obj_dict['loadbalancer_member_properties']
- buf = cStringIO.StringIO()
- # perms might be inserted at server as obj.
- # obj construction diff from dict construction.
- if isinstance(field, dict):
- LoadbalancerMemberType(**field).exportChildren(buf, level = 1, name_ = 'loadbalancer-member-properties', pretty_print = False)
- else: # object
- field.exportChildren(buf, level = 1, name_ = 'loadbalancer-member-properties', pretty_print = False)
- loadbalancer_member_properties_xml = buf.getvalue()
- buf.close()
- meta = Metadata('loadbalancer-member-properties' , '',
- {'ifmap-cardinality':'singleValue'}, ns_prefix = 'contrail',
- elements = loadbalancer_member_properties_xml)
-
- if (existing_metas and 'loadbalancer-member-properties' in existing_metas and
- str(existing_metas['loadbalancer-member-properties'][0]['meta']) == str(meta)):
- # no change
- pass
- else:
- self._update_id_self_meta(update, meta)
-
- field = obj_dict.get('id_perms', None)
- if field is not None:
- # construct object of xsd-type and get its xml repr
- field = obj_dict['id_perms']
- buf = cStringIO.StringIO()
- # perms might be inserted at server as obj.
- # obj construction diff from dict construction.
- if isinstance(field, dict):
- IdPermsType(**field).exportChildren(buf, level = 1, name_ = 'id-perms', pretty_print = False)
- else: # object
- field.exportChildren(buf, level = 1, name_ = 'id-perms', pretty_print = False)
- id_perms_xml = buf.getvalue()
- buf.close()
- meta = Metadata('id-perms' , '',
- {'ifmap-cardinality':'singleValue'}, ns_prefix = 'contrail',
- elements = id_perms_xml)
-
- if (existing_metas and 'id-perms' in existing_metas and
- str(existing_metas['id-perms'][0]['meta']) == str(meta)):
- # no change
- pass
- else:
- self._update_id_self_meta(update, meta)
-
- field = obj_dict.get('display_name', None)
- if field is not None:
- norm_str = escape(str(obj_dict['display_name']))
- meta = Metadata('display-name', norm_str,
- {'ifmap-cardinality':'singleValue'}, ns_prefix = 'contrail')
-
- if (existing_metas and 'display-name' in existing_metas and
- str(existing_metas['display-name'][0]['meta']) == str(meta)):
- # no change
- pass
- else:
- self._update_id_self_meta(update, meta)
-
- # Ref Link Metas
- imid = self._imid_handler
-
-
- self._publish_update(my_imid, update)
- return (True, '')
- #end _ifmap_loadbalancer_member_set
-
- def _ifmap_loadbalancer_member_create(self, obj_ids, obj_dict):
- if not 'parent_type' in obj_dict:
- # parent is config-root
- parent_type = 'config-root'
- parent_imid = 'contrail:config-root:root'
- else:
- parent_type = obj_dict['parent_type']
- parent_imid = obj_ids.get('parent_imid', None)
-
- # Parent Link Meta
- update = {}
- parent_link_meta = self._parent_metas[parent_type]['loadbalancer-member']
- meta = Metadata(parent_link_meta, '',
- {'ifmap-cardinality':'singleValue'}, ns_prefix = 'contrail')
- self._update_id_pair_meta(update, obj_ids['imid'], meta)
- self._publish_update(parent_imid, update)
-
- (ok, result) = self._ifmap_loadbalancer_member_set(obj_ids['imid'], None, obj_dict)
- return (ok, result)
- #end _ifmap_loadbalancer_member_create
-
-
- def _ifmap_loadbalancer_member_read_to_meta_index(self, ifmap_id, field_names = None):
- # field_names = None means all fields will be read
- imid = self._imid_handler
- start_id = str(Identity(name = ifmap_id, type = 'other',
- other_type = 'extended'))
- # if id-perms missing, identity doesn't exist
-
- all_metas = [u'loadbalancer-member-properties', u'id-perms', u'display-name']
- if not field_names:
- metas_to_read = all_metas
- else: # read only requested fields
- metas_to_read = set(all_metas) & set(field_names.keys())
-
- # metas is a dict where key is meta-name and val is list of dict
- # of form [{'meta':meta}, {'id':id1, 'meta':meta}, {'id':id2, 'meta':meta}]
- metas = {}
- for meta_name in metas_to_read:
- if meta_name in self._id_to_metas[ifmap_id]:
- metas[meta_name] = self._id_to_metas[ifmap_id][meta_name]
- return metas
- #end _ifmap_loadbalancer-member_read_to_meta_index
-
- def _ifmap_loadbalancer_member_update(self, ifmap_id, new_obj_dict):
- # read in refs from ifmap to determine which ones become inactive after update
- existing_metas = self._ifmap_loadbalancer_member_read_to_meta_index(ifmap_id)
-
- # remove properties that are no longer active
- props = ['loadbalancer-member-properties', 'id-perms', 'display-name']
- for prop in props:
- prop_m = prop.replace('-', '_')
- if prop in existing_metas and prop_m not in new_obj_dict:
- self._delete_id_self_meta(ifmap_id, 'contrail:'+prop)
- # remove refs that are no longer active
- delete_list = []
- refs = {}
- for meta, to_name in refs.items():
- old_set = set([m['id'] for m in existing_metas.get(meta, [])])
- new_set = set()
- to_name_m = to_name.replace('-', '_')
- for ref in new_obj_dict.get(to_name_m+'_refs', []):
- to_imid = self.fq_name_to_ifmap_id(to_name, ref['to'])
- new_set.add(to_imid)
-
- for inact_ref in old_set - new_set:
- delete_list.append((inact_ref, 'contrail:'+meta))
-
- if delete_list:
- self._delete_id_pair_meta_list(ifmap_id, delete_list)
-
- (ok, result) = self._ifmap_loadbalancer_member_set(ifmap_id, existing_metas, new_obj_dict)
- return (ok, result)
- #end _ifmap_loadbalancer_member_update
-
- def _ifmap_loadbalancer_member_delete(self, obj_ids):
- ifmap_id = obj_ids['imid']
- parent_imid = obj_ids.get('parent_imid', None)
- existing_metas = self._ifmap_loadbalancer_member_read_to_meta_index(ifmap_id)
- meta_list = []
- for meta_name, meta_infos in existing_metas.items():
- for meta_info in meta_infos:
- ref_imid = meta_info.get('id')
- if ref_imid is None:
- continue
- meta_list.append((ref_imid, 'contrail:'+meta_name))
-
- if parent_imid:
- # Remove link from parent
- meta_list.append((parent_imid, None))
-
- if meta_list:
- self._delete_id_pair_meta_list(ifmap_id, meta_list)
-
- # Remove all property metadata associated with this ident
- self._delete_id_self_meta(ifmap_id, None)
-
- return (True, '')
- #end _ifmap_loadbalancer_member_delete
-
- def _ifmap_security_group_alloc(self, parent_type, fq_name):
- imid = self._imid_handler
- (my_imid, parent_imid) = \
- imid.security_group_alloc_ifmap_id(parent_type, fq_name)
- if my_imid is None or parent_imid is None:
- return (False, (my_imid, parent_imid))
- return (True, (my_imid, parent_imid))
- #end _ifmap_security_group_alloc
-
- def _ifmap_security_group_set(self, my_imid, existing_metas, obj_dict):
- # Properties Meta
- update = {}
- field = obj_dict.get('security_group_id', None)
- if field is not None:
- norm_str = escape(str(obj_dict['security_group_id']))
- meta = Metadata('security-group-id', norm_str,
- {'ifmap-cardinality':'singleValue'}, ns_prefix = 'contrail')
-
- if (existing_metas and 'security-group-id' in existing_metas and
- str(existing_metas['security-group-id'][0]['meta']) == str(meta)):
- # no change
- pass
- else:
- self._update_id_self_meta(update, meta)
-
- field = obj_dict.get('configured_security_group_id', None)
- if field is not None:
- norm_str = escape(str(obj_dict['configured_security_group_id']))
- meta = Metadata('configured-security-group-id', norm_str,
- {'ifmap-cardinality':'singleValue'}, ns_prefix = 'contrail')
-
- if (existing_metas and 'configured-security-group-id' in existing_metas and
- str(existing_metas['configured-security-group-id'][0]['meta']) == str(meta)):
- # no change
- pass
- else:
- self._update_id_self_meta(update, meta)
-
- field = obj_dict.get('security_group_entries', None)
- if field is not None:
- # construct object of xsd-type and get its xml repr
- field = obj_dict['security_group_entries']
- buf = cStringIO.StringIO()
- # perms might be inserted at server as obj.
- # obj construction diff from dict construction.
- if isinstance(field, dict):
- PolicyEntriesType(**field).exportChildren(buf, level = 1, name_ = 'security-group-entries', pretty_print = False)
- else: # object
- field.exportChildren(buf, level = 1, name_ = 'security-group-entries', pretty_print = False)
- security_group_entries_xml = buf.getvalue()
- buf.close()
- meta = Metadata('security-group-entries' , '',
- {'ifmap-cardinality':'singleValue'}, ns_prefix = 'contrail',
- elements = security_group_entries_xml)
-
- if (existing_metas and 'security-group-entries' in existing_metas and
- str(existing_metas['security-group-entries'][0]['meta']) == str(meta)):
- # no change
- pass
- else:
- self._update_id_self_meta(update, meta)
-
- field = obj_dict.get('id_perms', None)
- if field is not None:
- # construct object of xsd-type and get its xml repr
- field = obj_dict['id_perms']
- buf = cStringIO.StringIO()
- # perms might be inserted at server as obj.
- # obj construction diff from dict construction.
- if isinstance(field, dict):
- IdPermsType(**field).exportChildren(buf, level = 1, name_ = 'id-perms', pretty_print = False)
- else: # object
- field.exportChildren(buf, level = 1, name_ = 'id-perms', pretty_print = False)
- id_perms_xml = buf.getvalue()
- buf.close()
- meta = Metadata('id-perms' , '',
- {'ifmap-cardinality':'singleValue'}, ns_prefix = 'contrail',
- elements = id_perms_xml)
-
- if (existing_metas and 'id-perms' in existing_metas and
- str(existing_metas['id-perms'][0]['meta']) == str(meta)):
- # no change
- pass
- else:
- self._update_id_self_meta(update, meta)
-
- field = obj_dict.get('display_name', None)
- if field is not None:
- norm_str = escape(str(obj_dict['display_name']))
- meta = Metadata('display-name', norm_str,
- {'ifmap-cardinality':'singleValue'}, ns_prefix = 'contrail')
-
- if (existing_metas and 'display-name' in existing_metas and
- str(existing_metas['display-name'][0]['meta']) == str(meta)):
- # no change
- pass
- else:
- self._update_id_self_meta(update, meta)
-
- # Ref Link Metas
- imid = self._imid_handler
-
- # construct object of xsd-type and get its xml repr
- refs = obj_dict.get('access_control_list_refs', None)
- if refs:
- for ref in refs:
- ref_fq_name = ref['to']
- obj_type = 'access-control-list'
- to_imid = cfgm_common.imid.get_ifmap_id_from_fq_name(obj_type, ref_fq_name)
- security_group_access_control_list_xml = ''
- meta = Metadata('security-group-access-control-list' , '',
- {'ifmap-cardinality':'singleValue'}, ns_prefix = 'contrail',
- elements = security_group_access_control_list_xml)
- self._update_id_pair_meta(update, to_imid, meta)
-
-
- self._publish_update(my_imid, update)
- return (True, '')
- #end _ifmap_security_group_set
-
- def _ifmap_security_group_create(self, obj_ids, obj_dict):
- if not 'parent_type' in obj_dict:
- # parent is config-root
- parent_type = 'config-root'
- parent_imid = 'contrail:config-root:root'
- else:
- parent_type = obj_dict['parent_type']
- parent_imid = obj_ids.get('parent_imid', None)
-
- # Parent Link Meta
- update = {}
- parent_link_meta = self._parent_metas[parent_type]['security-group']
- meta = Metadata(parent_link_meta, '',
- {'ifmap-cardinality':'singleValue'}, ns_prefix = 'contrail')
- self._update_id_pair_meta(update, obj_ids['imid'], meta)
- self._publish_update(parent_imid, update)
-
- (ok, result) = self._ifmap_security_group_set(obj_ids['imid'], None, obj_dict)
- return (ok, result)
- #end _ifmap_security_group_create
-
-
- def _ifmap_security_group_read_to_meta_index(self, ifmap_id, field_names = None):
- # field_names = None means all fields will be read
- imid = self._imid_handler
- start_id = str(Identity(name = ifmap_id, type = 'other',
- other_type = 'extended'))
- # if id-perms missing, identity doesn't exist
-
- all_metas = [u'virtual-machine-interface-security-group', u'security-group-id', u'configured-security-group-id', u'security-group-entries', u'id-perms', u'display-name', u'security-group-access-control-list']
- if not field_names:
- metas_to_read = all_metas
- else: # read only requested fields
- metas_to_read = set(all_metas) & set(field_names.keys())
-
- # metas is a dict where key is meta-name and val is list of dict
- # of form [{'meta':meta}, {'id':id1, 'meta':meta}, {'id':id2, 'meta':meta}]
- metas = {}
- for meta_name in metas_to_read:
- if meta_name in self._id_to_metas[ifmap_id]:
- metas[meta_name] = self._id_to_metas[ifmap_id][meta_name]
- return metas
- #end _ifmap_security-group_read_to_meta_index
-
- def _ifmap_security_group_update(self, ifmap_id, new_obj_dict):
- # read in refs from ifmap to determine which ones become inactive after update
- existing_metas = self._ifmap_security_group_read_to_meta_index(ifmap_id)
-
- # remove properties that are no longer active
- props = ['security-group-id', 'configured-security-group-id', 'security-group-entries', 'id-perms', 'display-name']
- for prop in props:
- prop_m = prop.replace('-', '_')
- if prop in existing_metas and prop_m not in new_obj_dict:
- self._delete_id_self_meta(ifmap_id, 'contrail:'+prop)
- # remove refs that are no longer active
- delete_list = []
- refs = {}
- for meta, to_name in refs.items():
- old_set = set([m['id'] for m in existing_metas.get(meta, [])])
- new_set = set()
- to_name_m = to_name.replace('-', '_')
- for ref in new_obj_dict.get(to_name_m+'_refs', []):
- to_imid = self.fq_name_to_ifmap_id(to_name, ref['to'])
- new_set.add(to_imid)
-
- for inact_ref in old_set - new_set:
- delete_list.append((inact_ref, 'contrail:'+meta))
-
- if delete_list:
- self._delete_id_pair_meta_list(ifmap_id, delete_list)
-
- (ok, result) = self._ifmap_security_group_set(ifmap_id, existing_metas, new_obj_dict)
- return (ok, result)
- #end _ifmap_security_group_update
-
- def _ifmap_security_group_delete(self, obj_ids):
- ifmap_id = obj_ids['imid']
- parent_imid = obj_ids.get('parent_imid', None)
- existing_metas = self._ifmap_security_group_read_to_meta_index(ifmap_id)
- meta_list = []
- for meta_name, meta_infos in existing_metas.items():
- for meta_info in meta_infos:
- ref_imid = meta_info.get('id')
- if ref_imid is None:
- continue
- meta_list.append((ref_imid, 'contrail:'+meta_name))
-
- if parent_imid:
- # Remove link from parent
- meta_list.append((parent_imid, None))
-
- if meta_list:
- self._delete_id_pair_meta_list(ifmap_id, meta_list)
-
- # Remove all property metadata associated with this ident
- self._delete_id_self_meta(ifmap_id, None)
-
- return (True, '')
- #end _ifmap_security_group_delete
-
- def _ifmap_provider_attachment_alloc(self, parent_type, fq_name):
- imid = self._imid_handler
- (my_imid, parent_imid) = \
- imid.provider_attachment_alloc_ifmap_id(parent_type, fq_name)
- if my_imid is None or parent_imid is None:
- return (False, (my_imid, parent_imid))
- return (True, (my_imid, parent_imid))
- #end _ifmap_provider_attachment_alloc
-
- def _ifmap_provider_attachment_set(self, my_imid, existing_metas, obj_dict):
- # Properties Meta
- update = {}
- field = obj_dict.get('id_perms', None)
- if field is not None:
- # construct object of xsd-type and get its xml repr
- field = obj_dict['id_perms']
- buf = cStringIO.StringIO()
- # perms might be inserted at server as obj.
- # obj construction diff from dict construction.
- if isinstance(field, dict):
- IdPermsType(**field).exportChildren(buf, level = 1, name_ = 'id-perms', pretty_print = False)
- else: # object
- field.exportChildren(buf, level = 1, name_ = 'id-perms', pretty_print = False)
- id_perms_xml = buf.getvalue()
- buf.close()
- meta = Metadata('id-perms' , '',
- {'ifmap-cardinality':'singleValue'}, ns_prefix = 'contrail',
- elements = id_perms_xml)
-
- if (existing_metas and 'id-perms' in existing_metas and
- str(existing_metas['id-perms'][0]['meta']) == str(meta)):
- # no change
- pass
- else:
- self._update_id_self_meta(update, meta)
-
- field = obj_dict.get('display_name', None)
- if field is not None:
- norm_str = escape(str(obj_dict['display_name']))
- meta = Metadata('display-name', norm_str,
- {'ifmap-cardinality':'singleValue'}, ns_prefix = 'contrail')
-
- if (existing_metas and 'display-name' in existing_metas and
- str(existing_metas['display-name'][0]['meta']) == str(meta)):
- # no change
- pass
- else:
- self._update_id_self_meta(update, meta)
-
- # Ref Link Metas
- imid = self._imid_handler
-
- # construct object of xsd-type and get its xml repr
- refs = obj_dict.get('virtual_router_refs', None)
- if refs:
- for ref in refs:
- ref_fq_name = ref['to']
- obj_type = 'virtual-router'
- to_imid = cfgm_common.imid.get_ifmap_id_from_fq_name(obj_type, ref_fq_name)
- provider_attachment_virtual_router_xml = ''
- meta = Metadata('provider-attachment-virtual-router' , '',
- {'ifmap-cardinality':'singleValue'}, ns_prefix = 'contrail',
- elements = provider_attachment_virtual_router_xml)
- self._update_id_pair_meta(update, to_imid, meta)
-
-
- self._publish_update(my_imid, update)
- return (True, '')
- #end _ifmap_provider_attachment_set
-
- def _ifmap_provider_attachment_create(self, obj_ids, obj_dict):
- (ok, result) = self._ifmap_provider_attachment_set(obj_ids['imid'], None, obj_dict)
- return (ok, result)
- #end _ifmap_provider_attachment_create
-
-
- def _ifmap_provider_attachment_read_to_meta_index(self, ifmap_id, field_names = None):
- # field_names = None means all fields will be read
- imid = self._imid_handler
- start_id = str(Identity(name = ifmap_id, type = 'other',
- other_type = 'extended'))
- # if id-perms missing, identity doesn't exist
-
- all_metas = [u'provider-attachment-virtual-router', u'id-perms', u'display-name']
- if not field_names:
- metas_to_read = all_metas
- else: # read only requested fields
- metas_to_read = set(all_metas) & set(field_names.keys())
-
- # metas is a dict where key is meta-name and val is list of dict
- # of form [{'meta':meta}, {'id':id1, 'meta':meta}, {'id':id2, 'meta':meta}]
- metas = {}
- for meta_name in metas_to_read:
- if meta_name in self._id_to_metas[ifmap_id]:
- metas[meta_name] = self._id_to_metas[ifmap_id][meta_name]
- return metas
- #end _ifmap_provider-attachment_read_to_meta_index
-
- def _ifmap_provider_attachment_update(self, ifmap_id, new_obj_dict):
- # read in refs from ifmap to determine which ones become inactive after update
- existing_metas = self._ifmap_provider_attachment_read_to_meta_index(ifmap_id)
-
- # remove properties that are no longer active
- props = ['id-perms', 'display-name']
- for prop in props:
- prop_m = prop.replace('-', '_')
- if prop in existing_metas and prop_m not in new_obj_dict:
- self._delete_id_self_meta(ifmap_id, 'contrail:'+prop)
- # remove refs that are no longer active
- delete_list = []
- refs = {'provider-attachment-virtual-router': 'virtual-router'}
- for meta, to_name in refs.items():
- old_set = set([m['id'] for m in existing_metas.get(meta, [])])
- new_set = set()
- to_name_m = to_name.replace('-', '_')
- for ref in new_obj_dict.get(to_name_m+'_refs', []):
- to_imid = self.fq_name_to_ifmap_id(to_name, ref['to'])
- new_set.add(to_imid)
-
- for inact_ref in old_set - new_set:
- delete_list.append((inact_ref, 'contrail:'+meta))
-
- if delete_list:
- self._delete_id_pair_meta_list(ifmap_id, delete_list)
-
- (ok, result) = self._ifmap_provider_attachment_set(ifmap_id, existing_metas, new_obj_dict)
- return (ok, result)
- #end _ifmap_provider_attachment_update
-
- def _ifmap_provider_attachment_delete(self, obj_ids):
- ifmap_id = obj_ids['imid']
- parent_imid = obj_ids.get('parent_imid', None)
- existing_metas = self._ifmap_provider_attachment_read_to_meta_index(ifmap_id)
- meta_list = []
- for meta_name, meta_infos in existing_metas.items():
- for meta_info in meta_infos:
- ref_imid = meta_info.get('id')
- if ref_imid is None:
- continue
- meta_list.append((ref_imid, 'contrail:'+meta_name))
-
- if parent_imid:
- # Remove link from parent
- meta_list.append((parent_imid, None))
-
- if meta_list:
- self._delete_id_pair_meta_list(ifmap_id, meta_list)
-
- # Remove all property metadata associated with this ident
- self._delete_id_self_meta(ifmap_id, None)
-
- return (True, '')
- #end _ifmap_provider_attachment_delete
-
- def _ifmap_virtual_machine_interface_alloc(self, parent_type, fq_name):
- imid = self._imid_handler
- (my_imid, parent_imid) = \
- imid.virtual_machine_interface_alloc_ifmap_id(parent_type, fq_name)
- if my_imid is None or parent_imid is None:
- return (False, (my_imid, parent_imid))
- return (True, (my_imid, parent_imid))
- #end _ifmap_virtual_machine_interface_alloc
-
- def _ifmap_virtual_machine_interface_set(self, my_imid, existing_metas, obj_dict):
- # Properties Meta
- update = {}
- field = obj_dict.get('virtual_machine_interface_mac_addresses', None)
- if field is not None:
- # construct object of xsd-type and get its xml repr
- field = obj_dict['virtual_machine_interface_mac_addresses']
- buf = cStringIO.StringIO()
- # perms might be inserted at server as obj.
- # obj construction diff from dict construction.
- if isinstance(field, dict):
- MacAddressesType(**field).exportChildren(buf, level = 1, name_ = 'virtual-machine-interface-mac-addresses', pretty_print = False)
- else: # object
- field.exportChildren(buf, level = 1, name_ = 'virtual-machine-interface-mac-addresses', pretty_print = False)
- virtual_machine_interface_mac_addresses_xml = buf.getvalue()
- buf.close()
- meta = Metadata('virtual-machine-interface-mac-addresses' , '',
- {'ifmap-cardinality':'singleValue'}, ns_prefix = 'contrail',
- elements = virtual_machine_interface_mac_addresses_xml)
-
- if (existing_metas and 'virtual-machine-interface-mac-addresses' in existing_metas and
- str(existing_metas['virtual-machine-interface-mac-addresses'][0]['meta']) == str(meta)):
- # no change
- pass
- else:
- self._update_id_self_meta(update, meta)
-
- field = obj_dict.get('virtual_machine_interface_dhcp_option_list', None)
- if field is not None:
- # construct object of xsd-type and get its xml repr
- field = obj_dict['virtual_machine_interface_dhcp_option_list']
- buf = cStringIO.StringIO()
- # perms might be inserted at server as obj.
- # obj construction diff from dict construction.
- if isinstance(field, dict):
- DhcpOptionsListType(**field).exportChildren(buf, level = 1, name_ = 'virtual-machine-interface-dhcp-option-list', pretty_print = False)
- else: # object
- field.exportChildren(buf, level = 1, name_ = 'virtual-machine-interface-dhcp-option-list', pretty_print = False)
- virtual_machine_interface_dhcp_option_list_xml = buf.getvalue()
- buf.close()
- meta = Metadata('virtual-machine-interface-dhcp-option-list' , '',
- {'ifmap-cardinality':'singleValue'}, ns_prefix = 'contrail',
- elements = virtual_machine_interface_dhcp_option_list_xml)
-
- if (existing_metas and 'virtual-machine-interface-dhcp-option-list' in existing_metas and
- str(existing_metas['virtual-machine-interface-dhcp-option-list'][0]['meta']) == str(meta)):
- # no change
- pass
- else:
- self._update_id_self_meta(update, meta)
-
- field = obj_dict.get('virtual_machine_interface_host_routes', None)
- if field is not None:
- # construct object of xsd-type and get its xml repr
- field = obj_dict['virtual_machine_interface_host_routes']
- buf = cStringIO.StringIO()
- # perms might be inserted at server as obj.
- # obj construction diff from dict construction.
- if isinstance(field, dict):
- RouteTableType(**field).exportChildren(buf, level = 1, name_ = 'virtual-machine-interface-host-routes', pretty_print = False)
- else: # object
- field.exportChildren(buf, level = 1, name_ = 'virtual-machine-interface-host-routes', pretty_print = False)
- virtual_machine_interface_host_routes_xml = buf.getvalue()
- buf.close()
- meta = Metadata('virtual-machine-interface-host-routes' , '',
- {'ifmap-cardinality':'singleValue'}, ns_prefix = 'contrail',
- elements = virtual_machine_interface_host_routes_xml)
-
- if (existing_metas and 'virtual-machine-interface-host-routes' in existing_metas and
- str(existing_metas['virtual-machine-interface-host-routes'][0]['meta']) == str(meta)):
- # no change
- pass
- else:
- self._update_id_self_meta(update, meta)
-
- field = obj_dict.get('virtual_machine_interface_allowed_address_pairs', None)
- if field is not None:
- # construct object of xsd-type and get its xml repr
- field = obj_dict['virtual_machine_interface_allowed_address_pairs']
- buf = cStringIO.StringIO()
- # perms might be inserted at server as obj.
- # obj construction diff from dict construction.
- if isinstance(field, dict):
- AllowedAddressPairs(**field).exportChildren(buf, level = 1, name_ = 'virtual-machine-interface-allowed-address-pairs', pretty_print = False)
- else: # object
- field.exportChildren(buf, level = 1, name_ = 'virtual-machine-interface-allowed-address-pairs', pretty_print = False)
- virtual_machine_interface_allowed_address_pairs_xml = buf.getvalue()
- buf.close()
- meta = Metadata('virtual-machine-interface-allowed-address-pairs' , '',
- {'ifmap-cardinality':'singleValue'}, ns_prefix = 'contrail',
- elements = virtual_machine_interface_allowed_address_pairs_xml)
-
- if (existing_metas and 'virtual-machine-interface-allowed-address-pairs' in existing_metas and
- str(existing_metas['virtual-machine-interface-allowed-address-pairs'][0]['meta']) == str(meta)):
- # no change
- pass
- else:
- self._update_id_self_meta(update, meta)
-
- field = obj_dict.get('vrf_assign_table', None)
- if field is not None:
- # construct object of xsd-type and get its xml repr
- field = obj_dict['vrf_assign_table']
- buf = cStringIO.StringIO()
- # perms might be inserted at server as obj.
- # obj construction diff from dict construction.
- if isinstance(field, dict):
- VrfAssignTableType(**field).exportChildren(buf, level = 1, name_ = 'vrf-assign-table', pretty_print = False)
- else: # object
- field.exportChildren(buf, level = 1, name_ = 'vrf-assign-table', pretty_print = False)
- vrf_assign_table_xml = buf.getvalue()
- buf.close()
- meta = Metadata('vrf-assign-table' , '',
- {'ifmap-cardinality':'singleValue'}, ns_prefix = 'contrail',
- elements = vrf_assign_table_xml)
-
- if (existing_metas and 'vrf-assign-table' in existing_metas and
- str(existing_metas['vrf-assign-table'][0]['meta']) == str(meta)):
- # no change
- pass
- else:
- self._update_id_self_meta(update, meta)
-
- field = obj_dict.get('virtual_machine_interface_device_owner', None)
- if field is not None:
- norm_str = escape(str(obj_dict['virtual_machine_interface_device_owner']))
- meta = Metadata('virtual-machine-interface-device-owner', norm_str,
- {'ifmap-cardinality':'singleValue'}, ns_prefix = 'contrail')
-
- if (existing_metas and 'virtual-machine-interface-device-owner' in existing_metas and
- str(existing_metas['virtual-machine-interface-device-owner'][0]['meta']) == str(meta)):
- # no change
- pass
- else:
- self._update_id_self_meta(update, meta)
-
- field = obj_dict.get('virtual_machine_interface_properties', None)
- if field is not None:
- # construct object of xsd-type and get its xml repr
- field = obj_dict['virtual_machine_interface_properties']
- buf = cStringIO.StringIO()
- # perms might be inserted at server as obj.
- # obj construction diff from dict construction.
- if isinstance(field, dict):
- VirtualMachineInterfacePropertiesType(**field).exportChildren(buf, level = 1, name_ = 'virtual-machine-interface-properties', pretty_print = False)
- else: # object
- field.exportChildren(buf, level = 1, name_ = 'virtual-machine-interface-properties', pretty_print = False)
- virtual_machine_interface_properties_xml = buf.getvalue()
- buf.close()
- meta = Metadata('virtual-machine-interface-properties' , '',
- {'ifmap-cardinality':'singleValue'}, ns_prefix = 'contrail',
- elements = virtual_machine_interface_properties_xml)
-
- if (existing_metas and 'virtual-machine-interface-properties' in existing_metas and
- str(existing_metas['virtual-machine-interface-properties'][0]['meta']) == str(meta)):
- # no change
- pass
- else:
- self._update_id_self_meta(update, meta)
-
- field = obj_dict.get('id_perms', None)
- if field is not None:
- # construct object of xsd-type and get its xml repr
- field = obj_dict['id_perms']
- buf = cStringIO.StringIO()
- # perms might be inserted at server as obj.
- # obj construction diff from dict construction.
- if isinstance(field, dict):
- IdPermsType(**field).exportChildren(buf, level = 1, name_ = 'id-perms', pretty_print = False)
- else: # object
- field.exportChildren(buf, level = 1, name_ = 'id-perms', pretty_print = False)
- id_perms_xml = buf.getvalue()
- buf.close()
- meta = Metadata('id-perms' , '',
- {'ifmap-cardinality':'singleValue'}, ns_prefix = 'contrail',
- elements = id_perms_xml)
-
- if (existing_metas and 'id-perms' in existing_metas and
- str(existing_metas['id-perms'][0]['meta']) == str(meta)):
- # no change
- pass
- else:
- self._update_id_self_meta(update, meta)
-
- field = obj_dict.get('display_name', None)
- if field is not None:
- norm_str = escape(str(obj_dict['display_name']))
- meta = Metadata('display-name', norm_str,
- {'ifmap-cardinality':'singleValue'}, ns_prefix = 'contrail')
-
- if (existing_metas and 'display-name' in existing_metas and
- str(existing_metas['display-name'][0]['meta']) == str(meta)):
- # no change
- pass
- else:
- self._update_id_self_meta(update, meta)
-
- # Ref Link Metas
- imid = self._imid_handler
-
- # construct object of xsd-type and get its xml repr
- refs = obj_dict.get('qos_forwarding_class_refs', None)
- if refs:
- for ref in refs:
- ref_fq_name = ref['to']
- obj_type = 'qos-forwarding-class'
- to_imid = cfgm_common.imid.get_ifmap_id_from_fq_name(obj_type, ref_fq_name)
- virtual_machine_interface_qos_forwarding_class_xml = ''
- meta = Metadata('virtual-machine-interface-qos-forwarding-class' , '',
- {'ifmap-cardinality':'singleValue'}, ns_prefix = 'contrail',
- elements = virtual_machine_interface_qos_forwarding_class_xml)
- self._update_id_pair_meta(update, to_imid, meta)
-
- # construct object of xsd-type and get its xml repr
- refs = obj_dict.get('security_group_refs', None)
- if refs:
- for ref in refs:
- ref_fq_name = ref['to']
- obj_type = 'security-group'
- to_imid = cfgm_common.imid.get_ifmap_id_from_fq_name(obj_type, ref_fq_name)
- virtual_machine_interface_security_group_xml = ''
- meta = Metadata('virtual-machine-interface-security-group' , '',
- {'ifmap-cardinality':'singleValue'}, ns_prefix = 'contrail',
- elements = virtual_machine_interface_security_group_xml)
- self._update_id_pair_meta(update, to_imid, meta)
-
- # construct object of xsd-type and get its xml repr
- refs = obj_dict.get('virtual_machine_interface_refs', None)
- if refs:
- for ref in refs:
- ref_fq_name = ref['to']
- obj_type = 'virtual-machine-interface'
- to_imid = cfgm_common.imid.get_ifmap_id_from_fq_name(obj_type, ref_fq_name)
- virtual_machine_interface_sub_interface_xml = ''
- meta = Metadata('virtual-machine-interface-sub-interface' , '',
- {'ifmap-cardinality':'singleValue'}, ns_prefix = 'contrail',
- elements = virtual_machine_interface_sub_interface_xml)
- self._update_id_pair_meta(update, to_imid, meta)
-
- # construct object of xsd-type and get its xml repr
- refs = obj_dict.get('virtual_machine_refs', None)
- if refs:
- for ref in refs:
- ref_fq_name = ref['to']
- obj_type = 'virtual-machine'
- to_imid = cfgm_common.imid.get_ifmap_id_from_fq_name(obj_type, ref_fq_name)
- virtual_machine_interface_virtual_machine_xml = ''
- meta = Metadata('virtual-machine-interface-virtual-machine' , '',
- {'ifmap-cardinality':'singleValue'}, ns_prefix = 'contrail',
- elements = virtual_machine_interface_virtual_machine_xml)
- self._update_id_pair_meta(update, to_imid, meta)
-
- # construct object of xsd-type and get its xml repr
- refs = obj_dict.get('virtual_network_refs', None)
- if refs:
- for ref in refs:
- ref_fq_name = ref['to']
- obj_type = 'virtual-network'
- to_imid = cfgm_common.imid.get_ifmap_id_from_fq_name(obj_type, ref_fq_name)
- virtual_machine_interface_virtual_network_xml = ''
- meta = Metadata('virtual-machine-interface-virtual-network' , '',
- {'ifmap-cardinality':'singleValue'}, ns_prefix = 'contrail',
- elements = virtual_machine_interface_virtual_network_xml)
- self._update_id_pair_meta(update, to_imid, meta)
-
- # construct object of xsd-type and get its xml repr
- refs = obj_dict.get('routing_instance_refs', None)
- if refs:
- for ref in refs:
- ref_fq_name = ref['to']
- obj_type = 'routing-instance'
- to_imid = cfgm_common.imid.get_ifmap_id_from_fq_name(obj_type, ref_fq_name)
- virtual_machine_interface_routing_instance_xml = ''
- ref_data = ref['attr']
- if ref_data:
- buf = cStringIO.StringIO()
- PolicyBasedForwardingRuleType(**ref_data).exportChildren(buf, level = 1, name_ = 'virtual-machine-interface-routing-instance', pretty_print = False)
- virtual_machine_interface_routing_instance_xml = virtual_machine_interface_routing_instance_xml + buf.getvalue()
- buf.close()
- meta = Metadata('virtual-machine-interface-routing-instance' , '',
- {'ifmap-cardinality':'singleValue'}, ns_prefix = 'contrail',
- elements = virtual_machine_interface_routing_instance_xml)
- self._update_id_pair_meta(update, to_imid, meta)
-
- # construct object of xsd-type and get its xml repr
- refs = obj_dict.get('interface_route_table_refs', None)
- if refs:
- for ref in refs:
- ref_fq_name = ref['to']
- obj_type = 'interface-route-table'
- to_imid = cfgm_common.imid.get_ifmap_id_from_fq_name(obj_type, ref_fq_name)
- virtual_machine_interface_route_table_xml = ''
- meta = Metadata('virtual-machine-interface-route-table' , '',
- {'ifmap-cardinality':'singleValue'}, ns_prefix = 'contrail',
- elements = virtual_machine_interface_route_table_xml)
- self._update_id_pair_meta(update, to_imid, meta)
-
-
- self._publish_update(my_imid, update)
- return (True, '')
- #end _ifmap_virtual_machine_interface_set
-
- def _ifmap_virtual_machine_interface_create(self, obj_ids, obj_dict):
- if not 'parent_type' in obj_dict:
- # parent is config-root
- parent_type = 'config-root'
- parent_imid = 'contrail:config-root:root'
- else:
- parent_type = obj_dict['parent_type']
- parent_imid = obj_ids.get('parent_imid', None)
-
- # Parent Link Meta
- update = {}
- parent_link_meta = self._parent_metas[parent_type]['virtual-machine-interface']
- meta = Metadata(parent_link_meta, '',
- {'ifmap-cardinality':'singleValue'}, ns_prefix = 'contrail')
- self._update_id_pair_meta(update, obj_ids['imid'], meta)
- self._publish_update(parent_imid, update)
-
- (ok, result) = self._ifmap_virtual_machine_interface_set(obj_ids['imid'], None, obj_dict)
- return (ok, result)
- #end _ifmap_virtual_machine_interface_create
-
-
- def _ifmap_virtual_machine_interface_read_to_meta_index(self, ifmap_id, field_names = None):
- # field_names = None means all fields will be read
- imid = self._imid_handler
- start_id = str(Identity(name = ifmap_id, type = 'other',
- other_type = 'extended'))
- # if id-perms missing, identity doesn't exist
-
- all_metas = [u'virtual-machine-interface-qos-forwarding-class', u'virtual-machine-interface-security-group', u'virtual-machine-interface-sub-interface', u'virtual-machine-interface-virtual-machine', u'virtual-machine-interface-virtual-network', u'virtual-machine-interface-routing-instance', u'virtual-machine-interface-route-table', u'virtual-machine-interface-sub-interface', u'instance-ip-virtual-machine-interface', u'subnet-virtual-machine-interface', u'floating-ip-virtual-machine-interface', u'logical-interface-virtual-machine-interface', u'customer-attachment-virtual-machine-interface', u'logical-router-interface', u'loadbalancer-pool-virtual-machine-interface', u'virtual-ip-virtual-machine-interface', u'virtual-machine-interface-mac-addresses', u'virtual-machine-interface-dhcp-option-list', u'virtual-machine-interface-host-routes', u'virtual-machine-interface-allowed-address-pairs', u'vrf-assign-table', u'virtual-machine-interface-device-owner', u'virtual-machine-interface-properties', u'id-perms', u'display-name']
- if not field_names:
- metas_to_read = all_metas
- else: # read only requested fields
- metas_to_read = set(all_metas) & set(field_names.keys())
-
- # metas is a dict where key is meta-name and val is list of dict
- # of form [{'meta':meta}, {'id':id1, 'meta':meta}, {'id':id2, 'meta':meta}]
- metas = {}
- for meta_name in metas_to_read:
- if meta_name in self._id_to_metas[ifmap_id]:
- metas[meta_name] = self._id_to_metas[ifmap_id][meta_name]
- return metas
- #end _ifmap_virtual-machine-interface_read_to_meta_index
-
- def _ifmap_virtual_machine_interface_update(self, ifmap_id, new_obj_dict):
- # read in refs from ifmap to determine which ones become inactive after update
- existing_metas = self._ifmap_virtual_machine_interface_read_to_meta_index(ifmap_id)
-
- # remove properties that are no longer active
- props = ['virtual-machine-interface-mac-addresses', 'virtual-machine-interface-dhcp-option-list', 'virtual-machine-interface-host-routes', 'virtual-machine-interface-allowed-address-pairs', 'vrf-assign-table', 'virtual-machine-interface-device-owner', 'virtual-machine-interface-properties', 'id-perms', 'display-name']
- for prop in props:
- prop_m = prop.replace('-', '_')
- if prop in existing_metas and prop_m not in new_obj_dict:
- self._delete_id_self_meta(ifmap_id, 'contrail:'+prop)
- # remove refs that are no longer active
- delete_list = []
- refs = {'virtual-machine-interface-qos-forwarding-class': 'qos-forwarding-class',
- 'virtual-machine-interface-security-group': 'security-group',
- 'virtual-machine-interface-sub-interface': 'virtual-machine-interface',
- 'virtual-machine-interface-virtual-machine': 'virtual-machine',
- 'virtual-machine-interface-virtual-network': 'virtual-network',
- 'virtual-machine-interface-routing-instance': 'routing-instance',
- 'virtual-machine-interface-route-table': 'interface-route-table'}
- for meta, to_name in refs.items():
- old_set = set([m['id'] for m in existing_metas.get(meta, [])])
- new_set = set()
- to_name_m = to_name.replace('-', '_')
- for ref in new_obj_dict.get(to_name_m+'_refs', []):
- to_imid = self.fq_name_to_ifmap_id(to_name, ref['to'])
- new_set.add(to_imid)
-
- for inact_ref in old_set - new_set:
- delete_list.append((inact_ref, 'contrail:'+meta))
-
- if delete_list:
- self._delete_id_pair_meta_list(ifmap_id, delete_list)
-
- (ok, result) = self._ifmap_virtual_machine_interface_set(ifmap_id, existing_metas, new_obj_dict)
- return (ok, result)
- #end _ifmap_virtual_machine_interface_update
-
- def _ifmap_virtual_machine_interface_delete(self, obj_ids):
- ifmap_id = obj_ids['imid']
- parent_imid = obj_ids.get('parent_imid', None)
- existing_metas = self._ifmap_virtual_machine_interface_read_to_meta_index(ifmap_id)
- meta_list = []
- for meta_name, meta_infos in existing_metas.items():
- for meta_info in meta_infos:
- ref_imid = meta_info.get('id')
- if ref_imid is None:
- continue
- meta_list.append((ref_imid, 'contrail:'+meta_name))
-
- if parent_imid:
- # Remove link from parent
- meta_list.append((parent_imid, None))
-
- if meta_list:
- self._delete_id_pair_meta_list(ifmap_id, meta_list)
-
- # Remove all property metadata associated with this ident
- self._delete_id_self_meta(ifmap_id, None)
-
- return (True, '')
- #end _ifmap_virtual_machine_interface_delete
-
- def _ifmap_loadbalancer_healthmonitor_alloc(self, parent_type, fq_name):
- imid = self._imid_handler
- (my_imid, parent_imid) = \
- imid.loadbalancer_healthmonitor_alloc_ifmap_id(parent_type, fq_name)
- if my_imid is None or parent_imid is None:
- return (False, (my_imid, parent_imid))
- return (True, (my_imid, parent_imid))
- #end _ifmap_loadbalancer_healthmonitor_alloc
-
- def _ifmap_loadbalancer_healthmonitor_set(self, my_imid, existing_metas, obj_dict):
- # Properties Meta
- update = {}
- field = obj_dict.get('loadbalancer_healthmonitor_properties', None)
- if field is not None:
- # construct object of xsd-type and get its xml repr
- field = obj_dict['loadbalancer_healthmonitor_properties']
- buf = cStringIO.StringIO()
- # perms might be inserted at server as obj.
- # obj construction diff from dict construction.
- if isinstance(field, dict):
- LoadbalancerHealthmonitorType(**field).exportChildren(buf, level = 1, name_ = 'loadbalancer-healthmonitor-properties', pretty_print = False)
- else: # object
- field.exportChildren(buf, level = 1, name_ = 'loadbalancer-healthmonitor-properties', pretty_print = False)
- loadbalancer_healthmonitor_properties_xml = buf.getvalue()
- buf.close()
- meta = Metadata('loadbalancer-healthmonitor-properties' , '',
- {'ifmap-cardinality':'singleValue'}, ns_prefix = 'contrail',
- elements = loadbalancer_healthmonitor_properties_xml)
-
- if (existing_metas and 'loadbalancer-healthmonitor-properties' in existing_metas and
- str(existing_metas['loadbalancer-healthmonitor-properties'][0]['meta']) == str(meta)):
- # no change
- pass
- else:
- self._update_id_self_meta(update, meta)
-
- field = obj_dict.get('id_perms', None)
- if field is not None:
- # construct object of xsd-type and get its xml repr
- field = obj_dict['id_perms']
- buf = cStringIO.StringIO()
- # perms might be inserted at server as obj.
- # obj construction diff from dict construction.
- if isinstance(field, dict):
- IdPermsType(**field).exportChildren(buf, level = 1, name_ = 'id-perms', pretty_print = False)
- else: # object
- field.exportChildren(buf, level = 1, name_ = 'id-perms', pretty_print = False)
- id_perms_xml = buf.getvalue()
- buf.close()
- meta = Metadata('id-perms' , '',
- {'ifmap-cardinality':'singleValue'}, ns_prefix = 'contrail',
- elements = id_perms_xml)
-
- if (existing_metas and 'id-perms' in existing_metas and
- str(existing_metas['id-perms'][0]['meta']) == str(meta)):
- # no change
- pass
- else:
- self._update_id_self_meta(update, meta)
-
- field = obj_dict.get('display_name', None)
- if field is not None:
- norm_str = escape(str(obj_dict['display_name']))
- meta = Metadata('display-name', norm_str,
- {'ifmap-cardinality':'singleValue'}, ns_prefix = 'contrail')
-
- if (existing_metas and 'display-name' in existing_metas and
- str(existing_metas['display-name'][0]['meta']) == str(meta)):
- # no change
- pass
- else:
- self._update_id_self_meta(update, meta)
-
- # Ref Link Metas
- imid = self._imid_handler
-
-
- self._publish_update(my_imid, update)
- return (True, '')
- #end _ifmap_loadbalancer_healthmonitor_set
-
- def _ifmap_loadbalancer_healthmonitor_create(self, obj_ids, obj_dict):
- if not 'parent_type' in obj_dict:
- # parent is config-root
- parent_type = 'config-root'
- parent_imid = 'contrail:config-root:root'
- else:
- parent_type = obj_dict['parent_type']
- parent_imid = obj_ids.get('parent_imid', None)
-
- # Parent Link Meta
- update = {}
- parent_link_meta = self._parent_metas[parent_type]['loadbalancer-healthmonitor']
- meta = Metadata(parent_link_meta, '',
- {'ifmap-cardinality':'singleValue'}, ns_prefix = 'contrail')
- self._update_id_pair_meta(update, obj_ids['imid'], meta)
- self._publish_update(parent_imid, update)
-
- (ok, result) = self._ifmap_loadbalancer_healthmonitor_set(obj_ids['imid'], None, obj_dict)
- return (ok, result)
- #end _ifmap_loadbalancer_healthmonitor_create
-
-
- def _ifmap_loadbalancer_healthmonitor_read_to_meta_index(self, ifmap_id, field_names = None):
- # field_names = None means all fields will be read
- imid = self._imid_handler
- start_id = str(Identity(name = ifmap_id, type = 'other',
- other_type = 'extended'))
- # if id-perms missing, identity doesn't exist
-
- all_metas = [u'loadbalancer-pool-loadbalancer-healthmonitor', u'loadbalancer-healthmonitor-properties', u'id-perms', u'display-name']
- if not field_names:
- metas_to_read = all_metas
- else: # read only requested fields
- metas_to_read = set(all_metas) & set(field_names.keys())
-
- # metas is a dict where key is meta-name and val is list of dict
- # of form [{'meta':meta}, {'id':id1, 'meta':meta}, {'id':id2, 'meta':meta}]
- metas = {}
- for meta_name in metas_to_read:
- if meta_name in self._id_to_metas[ifmap_id]:
- metas[meta_name] = self._id_to_metas[ifmap_id][meta_name]
- return metas
- #end _ifmap_loadbalancer-healthmonitor_read_to_meta_index
-
- def _ifmap_loadbalancer_healthmonitor_update(self, ifmap_id, new_obj_dict):
- # read in refs from ifmap to determine which ones become inactive after update
- existing_metas = self._ifmap_loadbalancer_healthmonitor_read_to_meta_index(ifmap_id)
-
- # remove properties that are no longer active
- props = ['loadbalancer-healthmonitor-properties', 'id-perms', 'display-name']
- for prop in props:
- prop_m = prop.replace('-', '_')
- if prop in existing_metas and prop_m not in new_obj_dict:
- self._delete_id_self_meta(ifmap_id, 'contrail:'+prop)
- # remove refs that are no longer active
- delete_list = []
- refs = {}
- for meta, to_name in refs.items():
- old_set = set([m['id'] for m in existing_metas.get(meta, [])])
- new_set = set()
- to_name_m = to_name.replace('-', '_')
- for ref in new_obj_dict.get(to_name_m+'_refs', []):
- to_imid = self.fq_name_to_ifmap_id(to_name, ref['to'])
- new_set.add(to_imid)
-
- for inact_ref in old_set - new_set:
- delete_list.append((inact_ref, 'contrail:'+meta))
-
- if delete_list:
- self._delete_id_pair_meta_list(ifmap_id, delete_list)
-
- (ok, result) = self._ifmap_loadbalancer_healthmonitor_set(ifmap_id, existing_metas, new_obj_dict)
- return (ok, result)
- #end _ifmap_loadbalancer_healthmonitor_update
-
- def _ifmap_loadbalancer_healthmonitor_delete(self, obj_ids):
- ifmap_id = obj_ids['imid']
- parent_imid = obj_ids.get('parent_imid', None)
- existing_metas = self._ifmap_loadbalancer_healthmonitor_read_to_meta_index(ifmap_id)
- meta_list = []
- for meta_name, meta_infos in existing_metas.items():
- for meta_info in meta_infos:
- ref_imid = meta_info.get('id')
- if ref_imid is None:
- continue
- meta_list.append((ref_imid, 'contrail:'+meta_name))
-
- if parent_imid:
- # Remove link from parent
- meta_list.append((parent_imid, None))
-
- if meta_list:
- self._delete_id_pair_meta_list(ifmap_id, meta_list)
-
- # Remove all property metadata associated with this ident
- self._delete_id_self_meta(ifmap_id, None)
-
- return (True, '')
- #end _ifmap_loadbalancer_healthmonitor_delete
-
- def _ifmap_virtual_network_alloc(self, parent_type, fq_name):
- imid = self._imid_handler
- (my_imid, parent_imid) = \
- imid.virtual_network_alloc_ifmap_id(parent_type, fq_name)
- if my_imid is None or parent_imid is None:
- return (False, (my_imid, parent_imid))
- return (True, (my_imid, parent_imid))
- #end _ifmap_virtual_network_alloc
-
- def _ifmap_virtual_network_set(self, my_imid, existing_metas, obj_dict):
- # Properties Meta
- update = {}
- field = obj_dict.get('virtual_network_properties', None)
- if field is not None:
- # construct object of xsd-type and get its xml repr
- field = obj_dict['virtual_network_properties']
- buf = cStringIO.StringIO()
- # perms might be inserted at server as obj.
- # obj construction diff from dict construction.
- if isinstance(field, dict):
- VirtualNetworkType(**field).exportChildren(buf, level = 1, name_ = 'virtual-network-properties', pretty_print = False)
- else: # object
- field.exportChildren(buf, level = 1, name_ = 'virtual-network-properties', pretty_print = False)
- virtual_network_properties_xml = buf.getvalue()
- buf.close()
- meta = Metadata('virtual-network-properties' , '',
- {'ifmap-cardinality':'singleValue'}, ns_prefix = 'contrail',
- elements = virtual_network_properties_xml)
-
- if (existing_metas and 'virtual-network-properties' in existing_metas and
- str(existing_metas['virtual-network-properties'][0]['meta']) == str(meta)):
- # no change
- pass
- else:
- self._update_id_self_meta(update, meta)
-
- field = obj_dict.get('virtual_network_network_id', None)
- if field is not None:
- norm_str = escape(str(obj_dict['virtual_network_network_id']))
- meta = Metadata('virtual-network-network-id', norm_str,
- {'ifmap-cardinality':'singleValue'}, ns_prefix = 'contrail')
-
- if (existing_metas and 'virtual-network-network-id' in existing_metas and
- str(existing_metas['virtual-network-network-id'][0]['meta']) == str(meta)):
- # no change
- pass
- else:
- self._update_id_self_meta(update, meta)
-
- field = obj_dict.get('route_target_list', None)
- if field is not None:
- # construct object of xsd-type and get its xml repr
- field = obj_dict['route_target_list']
- buf = cStringIO.StringIO()
- # perms might be inserted at server as obj.
- # obj construction diff from dict construction.
- if isinstance(field, dict):
- RouteTargetList(**field).exportChildren(buf, level = 1, name_ = 'route-target-list', pretty_print = False)
- else: # object
- field.exportChildren(buf, level = 1, name_ = 'route-target-list', pretty_print = False)
- route_target_list_xml = buf.getvalue()
- buf.close()
- meta = Metadata('route-target-list' , '',
- {'ifmap-cardinality':'singleValue'}, ns_prefix = 'contrail',
- elements = route_target_list_xml)
-
- if (existing_metas and 'route-target-list' in existing_metas and
- str(existing_metas['route-target-list'][0]['meta']) == str(meta)):
- # no change
- pass
- else:
- self._update_id_self_meta(update, meta)
-
- field = obj_dict.get('router_external', None)
- if field is not None:
- norm_str = escape(str(obj_dict['router_external']))
- meta = Metadata('router-external', norm_str,
- {'ifmap-cardinality':'singleValue'}, ns_prefix = 'contrail')
-
- if (existing_metas and 'router-external' in existing_metas and
- str(existing_metas['router-external'][0]['meta']) == str(meta)):
- # no change
- pass
- else:
- self._update_id_self_meta(update, meta)
-
- field = obj_dict.get('is_shared', None)
- if field is not None:
- norm_str = escape(str(obj_dict['is_shared']))
- meta = Metadata('is-shared', norm_str,
- {'ifmap-cardinality':'singleValue'}, ns_prefix = 'contrail')
-
- if (existing_metas and 'is-shared' in existing_metas and
- str(existing_metas['is-shared'][0]['meta']) == str(meta)):
- # no change
- pass
- else:
- self._update_id_self_meta(update, meta)
-
- field = obj_dict.get('external_ipam', None)
- if field is not None:
- norm_str = escape(str(obj_dict['external_ipam']))
- meta = Metadata('external-ipam', norm_str,
- {'ifmap-cardinality':'singleValue'}, ns_prefix = 'contrail')
-
- if (existing_metas and 'external-ipam' in existing_metas and
- str(existing_metas['external-ipam'][0]['meta']) == str(meta)):
- # no change
- pass
- else:
- self._update_id_self_meta(update, meta)
-
- field = obj_dict.get('flood_unknown_unicast', None)
- if field is not None:
- norm_str = escape(str(obj_dict['flood_unknown_unicast']))
- meta = Metadata('flood-unknown-unicast', norm_str,
- {'ifmap-cardinality':'singleValue'}, ns_prefix = 'contrail')
-
- if (existing_metas and 'flood-unknown-unicast' in existing_metas and
- str(existing_metas['flood-unknown-unicast'][0]['meta']) == str(meta)):
- # no change
- pass
- else:
- self._update_id_self_meta(update, meta)
-
- field = obj_dict.get('id_perms', None)
- if field is not None:
- # construct object of xsd-type and get its xml repr
- field = obj_dict['id_perms']
- buf = cStringIO.StringIO()
- # perms might be inserted at server as obj.
- # obj construction diff from dict construction.
- if isinstance(field, dict):
- IdPermsType(**field).exportChildren(buf, level = 1, name_ = 'id-perms', pretty_print = False)
- else: # object
- field.exportChildren(buf, level = 1, name_ = 'id-perms', pretty_print = False)
- id_perms_xml = buf.getvalue()
- buf.close()
- meta = Metadata('id-perms' , '',
- {'ifmap-cardinality':'singleValue'}, ns_prefix = 'contrail',
- elements = id_perms_xml)
-
- if (existing_metas and 'id-perms' in existing_metas and
- str(existing_metas['id-perms'][0]['meta']) == str(meta)):
- # no change
- pass
- else:
- self._update_id_self_meta(update, meta)
-
- field = obj_dict.get('display_name', None)
- if field is not None:
- norm_str = escape(str(obj_dict['display_name']))
- meta = Metadata('display-name', norm_str,
- {'ifmap-cardinality':'singleValue'}, ns_prefix = 'contrail')
-
- if (existing_metas and 'display-name' in existing_metas and
- str(existing_metas['display-name'][0]['meta']) == str(meta)):
- # no change
- pass
- else:
- self._update_id_self_meta(update, meta)
-
- # Ref Link Metas
- imid = self._imid_handler
-
- # construct object of xsd-type and get its xml repr
- refs = obj_dict.get('qos_forwarding_class_refs', None)
- if refs:
- for ref in refs:
- ref_fq_name = ref['to']
- obj_type = 'qos-forwarding-class'
- to_imid = cfgm_common.imid.get_ifmap_id_from_fq_name(obj_type, ref_fq_name)
- virtual_network_qos_forwarding_class_xml = ''
- meta = Metadata('virtual-network-qos-forwarding-class' , '',
- {'ifmap-cardinality':'singleValue'}, ns_prefix = 'contrail',
- elements = virtual_network_qos_forwarding_class_xml)
- self._update_id_pair_meta(update, to_imid, meta)
-
- # construct object of xsd-type and get its xml repr
- refs = obj_dict.get('network_ipam_refs', None)
- if refs:
- for ref in refs:
- ref_fq_name = ref['to']
- obj_type = 'network-ipam'
- to_imid = cfgm_common.imid.get_ifmap_id_from_fq_name(obj_type, ref_fq_name)
- virtual_network_network_ipam_xml = ''
- ref_data = ref['attr']
- if ref_data:
- buf = cStringIO.StringIO()
- VnSubnetsType(**ref_data).exportChildren(buf, level = 1, name_ = 'virtual-network-network-ipam', pretty_print = False)
- virtual_network_network_ipam_xml = virtual_network_network_ipam_xml + buf.getvalue()
- buf.close()
- meta = Metadata('virtual-network-network-ipam' , '',
- {'ifmap-cardinality':'singleValue'}, ns_prefix = 'contrail',
- elements = virtual_network_network_ipam_xml)
- self._update_id_pair_meta(update, to_imid, meta)
-
- # construct object of xsd-type and get its xml repr
- refs = obj_dict.get('network_policy_refs', None)
- if refs:
- for ref in refs:
- ref_fq_name = ref['to']
- obj_type = 'network-policy'
- to_imid = cfgm_common.imid.get_ifmap_id_from_fq_name(obj_type, ref_fq_name)
- virtual_network_network_policy_xml = ''
- ref_data = ref['attr']
- if ref_data:
- buf = cStringIO.StringIO()
- VirtualNetworkPolicyType(**ref_data).exportChildren(buf, level = 1, name_ = 'virtual-network-network-policy', pretty_print = False)
- virtual_network_network_policy_xml = virtual_network_network_policy_xml + buf.getvalue()
- buf.close()
- meta = Metadata('virtual-network-network-policy' , '',
- {'ifmap-cardinality':'singleValue'}, ns_prefix = 'contrail',
- elements = virtual_network_network_policy_xml)
- self._update_id_pair_meta(update, to_imid, meta)
-
- # construct object of xsd-type and get its xml repr
- refs = obj_dict.get('access_control_list_refs', None)
- if refs:
- for ref in refs:
- ref_fq_name = ref['to']
- obj_type = 'access-control-list'
- to_imid = cfgm_common.imid.get_ifmap_id_from_fq_name(obj_type, ref_fq_name)
- virtual_network_access_control_list_xml = ''
- meta = Metadata('virtual-network-access-control-list' , '',
- {'ifmap-cardinality':'singleValue'}, ns_prefix = 'contrail',
- elements = virtual_network_access_control_list_xml)
- self._update_id_pair_meta(update, to_imid, meta)
-
- # construct object of xsd-type and get its xml repr
- refs = obj_dict.get('floating_ip_pool_refs', None)
- if refs:
- for ref in refs:
- ref_fq_name = ref['to']
- obj_type = 'floating-ip-pool'
- to_imid = cfgm_common.imid.get_ifmap_id_from_fq_name(obj_type, ref_fq_name)
- virtual_network_floating_ip_pool_xml = ''
- meta = Metadata('virtual-network-floating-ip-pool' , '',
- {'ifmap-cardinality':'singleValue'}, ns_prefix = 'contrail',
- elements = virtual_network_floating_ip_pool_xml)
- self._update_id_pair_meta(update, to_imid, meta)
-
- # construct object of xsd-type and get its xml repr
- refs = obj_dict.get('routing_instance_refs', None)
- if refs:
- for ref in refs:
- ref_fq_name = ref['to']
- obj_type = 'routing-instance'
- to_imid = cfgm_common.imid.get_ifmap_id_from_fq_name(obj_type, ref_fq_name)
- virtual_network_routing_instance_xml = ''
- meta = Metadata('virtual-network-routing-instance' , '',
- {'ifmap-cardinality':'singleValue'}, ns_prefix = 'contrail',
- elements = virtual_network_routing_instance_xml)
- self._update_id_pair_meta(update, to_imid, meta)
-
- # construct object of xsd-type and get its xml repr
- refs = obj_dict.get('route_table_refs', None)
- if refs:
- for ref in refs:
- ref_fq_name = ref['to']
- obj_type = 'route-table'
- to_imid = cfgm_common.imid.get_ifmap_id_from_fq_name(obj_type, ref_fq_name)
- virtual_network_route_table_xml = ''
- meta = Metadata('virtual-network-route-table' , '',
- {'ifmap-cardinality':'singleValue'}, ns_prefix = 'contrail',
- elements = virtual_network_route_table_xml)
- self._update_id_pair_meta(update, to_imid, meta)
-
-
- self._publish_update(my_imid, update)
- return (True, '')
- #end _ifmap_virtual_network_set
-
- def _ifmap_virtual_network_create(self, obj_ids, obj_dict):
- if not 'parent_type' in obj_dict:
- # parent is config-root
- parent_type = 'config-root'
- parent_imid = 'contrail:config-root:root'
- else:
- parent_type = obj_dict['parent_type']
- parent_imid = obj_ids.get('parent_imid', None)
-
- # Parent Link Meta
- update = {}
- parent_link_meta = self._parent_metas[parent_type]['virtual-network']
- meta = Metadata(parent_link_meta, '',
- {'ifmap-cardinality':'singleValue'}, ns_prefix = 'contrail')
- self._update_id_pair_meta(update, obj_ids['imid'], meta)
- self._publish_update(parent_imid, update)
-
- (ok, result) = self._ifmap_virtual_network_set(obj_ids['imid'], None, obj_dict)
- return (ok, result)
- #end _ifmap_virtual_network_create
-
-
- def _ifmap_virtual_network_read_to_meta_index(self, ifmap_id, field_names = None):
- # field_names = None means all fields will be read
- imid = self._imid_handler
- start_id = str(Identity(name = ifmap_id, type = 'other',
- other_type = 'extended'))
- # if id-perms missing, identity doesn't exist
-
- all_metas = [u'virtual-network-qos-forwarding-class', u'virtual-network-network-ipam', u'virtual-network-network-policy', u'virtual-network-route-table', u'virtual-machine-interface-virtual-network', u'instance-ip-virtual-network', u'physical-router-virtual-network', u'logical-router-gateway', u'virtual-network-properties', u'virtual-network-network-id', u'route-target-list', u'router-external', u'is-shared', u'external-ipam', u'flood-unknown-unicast', u'id-perms', u'display-name', u'virtual-network-access-control-list', u'virtual-network-floating-ip-pool', u'virtual-network-routing-instance']
- if not field_names:
- metas_to_read = all_metas
- else: # read only requested fields
- metas_to_read = set(all_metas) & set(field_names.keys())
-
- # metas is a dict where key is meta-name and val is list of dict
- # of form [{'meta':meta}, {'id':id1, 'meta':meta}, {'id':id2, 'meta':meta}]
- metas = {}
- for meta_name in metas_to_read:
- if meta_name in self._id_to_metas[ifmap_id]:
- metas[meta_name] = self._id_to_metas[ifmap_id][meta_name]
- return metas
- #end _ifmap_virtual-network_read_to_meta_index
-
- def _ifmap_virtual_network_update(self, ifmap_id, new_obj_dict):
- # read in refs from ifmap to determine which ones become inactive after update
- existing_metas = self._ifmap_virtual_network_read_to_meta_index(ifmap_id)
-
- # remove properties that are no longer active
- props = ['virtual-network-properties', 'virtual-network-network-id', 'route-target-list', 'router-external', 'is-shared', 'external-ipam', 'flood-unknown-unicast', 'id-perms', 'display-name']
- for prop in props:
- prop_m = prop.replace('-', '_')
- if prop in existing_metas and prop_m not in new_obj_dict:
- self._delete_id_self_meta(ifmap_id, 'contrail:'+prop)
- # remove refs that are no longer active
- delete_list = []
- refs = {'virtual-network-qos-forwarding-class': 'qos-forwarding-class',
- 'virtual-network-network-ipam': 'network-ipam',
- 'virtual-network-network-policy': 'network-policy',
- 'virtual-network-route-table': 'route-table'}
- for meta, to_name in refs.items():
- old_set = set([m['id'] for m in existing_metas.get(meta, [])])
- new_set = set()
- to_name_m = to_name.replace('-', '_')
- for ref in new_obj_dict.get(to_name_m+'_refs', []):
- to_imid = self.fq_name_to_ifmap_id(to_name, ref['to'])
- new_set.add(to_imid)
-
- for inact_ref in old_set - new_set:
- delete_list.append((inact_ref, 'contrail:'+meta))
-
- if delete_list:
- self._delete_id_pair_meta_list(ifmap_id, delete_list)
-
- (ok, result) = self._ifmap_virtual_network_set(ifmap_id, existing_metas, new_obj_dict)
- return (ok, result)
- #end _ifmap_virtual_network_update
-
- def _ifmap_virtual_network_delete(self, obj_ids):
- ifmap_id = obj_ids['imid']
- parent_imid = obj_ids.get('parent_imid', None)
- existing_metas = self._ifmap_virtual_network_read_to_meta_index(ifmap_id)
- meta_list = []
- for meta_name, meta_infos in existing_metas.items():
- for meta_info in meta_infos:
- ref_imid = meta_info.get('id')
- if ref_imid is None:
- continue
- meta_list.append((ref_imid, 'contrail:'+meta_name))
-
- if parent_imid:
- # Remove link from parent
- meta_list.append((parent_imid, None))
-
- if meta_list:
- self._delete_id_pair_meta_list(ifmap_id, meta_list)
-
- # Remove all property metadata associated with this ident
- self._delete_id_self_meta(ifmap_id, None)
-
- return (True, '')
- #end _ifmap_virtual_network_delete
-
- def _ifmap_project_alloc(self, parent_type, fq_name):
- imid = self._imid_handler
- (my_imid, parent_imid) = \
- imid.project_alloc_ifmap_id(parent_type, fq_name)
- if my_imid is None or parent_imid is None:
- return (False, (my_imid, parent_imid))
- return (True, (my_imid, parent_imid))
- #end _ifmap_project_alloc
-
- def _ifmap_project_set(self, my_imid, existing_metas, obj_dict):
- # Properties Meta
- update = {}
- field = obj_dict.get('quota', None)
- if field is not None:
- # construct object of xsd-type and get its xml repr
- field = obj_dict['quota']
- buf = cStringIO.StringIO()
- # perms might be inserted at server as obj.
- # obj construction diff from dict construction.
- if isinstance(field, dict):
- QuotaType(**field).exportChildren(buf, level = 1, name_ = 'quota', pretty_print = False)
- else: # object
- field.exportChildren(buf, level = 1, name_ = 'quota', pretty_print = False)
- quota_xml = buf.getvalue()
- buf.close()
- meta = Metadata('quota' , '',
- {'ifmap-cardinality':'singleValue'}, ns_prefix = 'contrail',
- elements = quota_xml)
-
- if (existing_metas and 'quota' in existing_metas and
- str(existing_metas['quota'][0]['meta']) == str(meta)):
- # no change
- pass
- else:
- self._update_id_self_meta(update, meta)
-
- field = obj_dict.get('id_perms', None)
- if field is not None:
- # construct object of xsd-type and get its xml repr
- field = obj_dict['id_perms']
- buf = cStringIO.StringIO()
- # perms might be inserted at server as obj.
- # obj construction diff from dict construction.
- if isinstance(field, dict):
- IdPermsType(**field).exportChildren(buf, level = 1, name_ = 'id-perms', pretty_print = False)
- else: # object
- field.exportChildren(buf, level = 1, name_ = 'id-perms', pretty_print = False)
- id_perms_xml = buf.getvalue()
- buf.close()
- meta = Metadata('id-perms' , '',
- {'ifmap-cardinality':'singleValue'}, ns_prefix = 'contrail',
- elements = id_perms_xml)
-
- if (existing_metas and 'id-perms' in existing_metas and
- str(existing_metas['id-perms'][0]['meta']) == str(meta)):
- # no change
- pass
- else:
- self._update_id_self_meta(update, meta)
-
- field = obj_dict.get('display_name', None)
- if field is not None:
- norm_str = escape(str(obj_dict['display_name']))
- meta = Metadata('display-name', norm_str,
- {'ifmap-cardinality':'singleValue'}, ns_prefix = 'contrail')
-
- if (existing_metas and 'display-name' in existing_metas and
- str(existing_metas['display-name'][0]['meta']) == str(meta)):
- # no change
- pass
- else:
- self._update_id_self_meta(update, meta)
-
- # Ref Link Metas
- imid = self._imid_handler
-
- # construct object of xsd-type and get its xml repr
- refs = obj_dict.get('namespace_refs', None)
- if refs:
- for ref in refs:
- ref_fq_name = ref['to']
- obj_type = 'namespace'
- to_imid = cfgm_common.imid.get_ifmap_id_from_fq_name(obj_type, ref_fq_name)
- project_namespace_xml = ''
- ref_data = ref['attr']
- if ref_data:
- buf = cStringIO.StringIO()
- SubnetType(**ref_data).exportChildren(buf, level = 1, name_ = 'project-namespace', pretty_print = False)
- project_namespace_xml = project_namespace_xml + buf.getvalue()
- buf.close()
- meta = Metadata('project-namespace' , '',
- {'ifmap-cardinality':'singleValue'}, ns_prefix = 'contrail',
- elements = project_namespace_xml)
- self._update_id_pair_meta(update, to_imid, meta)
-
- # construct object of xsd-type and get its xml repr
- refs = obj_dict.get('security_group_refs', None)
- if refs:
- for ref in refs:
- ref_fq_name = ref['to']
- obj_type = 'security-group'
- to_imid = cfgm_common.imid.get_ifmap_id_from_fq_name(obj_type, ref_fq_name)
- project_security_group_xml = ''
- meta = Metadata('project-security-group' , '',
- {'ifmap-cardinality':'singleValue'}, ns_prefix = 'contrail',
- elements = project_security_group_xml)
- self._update_id_pair_meta(update, to_imid, meta)
-
- # construct object of xsd-type and get its xml repr
- refs = obj_dict.get('virtual_network_refs', None)
- if refs:
- for ref in refs:
- ref_fq_name = ref['to']
- obj_type = 'virtual-network'
- to_imid = cfgm_common.imid.get_ifmap_id_from_fq_name(obj_type, ref_fq_name)
- project_virtual_network_xml = ''
- meta = Metadata('project-virtual-network' , '',
- {'ifmap-cardinality':'singleValue'}, ns_prefix = 'contrail',
- elements = project_virtual_network_xml)
- self._update_id_pair_meta(update, to_imid, meta)
-
- # construct object of xsd-type and get its xml repr
- refs = obj_dict.get('qos_queue_refs', None)
- if refs:
- for ref in refs:
- ref_fq_name = ref['to']
- obj_type = 'qos-queue'
- to_imid = cfgm_common.imid.get_ifmap_id_from_fq_name(obj_type, ref_fq_name)
- project_qos_queue_xml = ''
- meta = Metadata('project-qos-queue' , '',
- {'ifmap-cardinality':'singleValue'}, ns_prefix = 'contrail',
- elements = project_qos_queue_xml)
- self._update_id_pair_meta(update, to_imid, meta)
-
- # construct object of xsd-type and get its xml repr
- refs = obj_dict.get('qos_forwarding_class_refs', None)
- if refs:
- for ref in refs:
- ref_fq_name = ref['to']
- obj_type = 'qos-forwarding-class'
- to_imid = cfgm_common.imid.get_ifmap_id_from_fq_name(obj_type, ref_fq_name)
- project_qos_forwarding_class_xml = ''
- meta = Metadata('project-qos-forwarding-class' , '',
- {'ifmap-cardinality':'singleValue'}, ns_prefix = 'contrail',
- elements = project_qos_forwarding_class_xml)
- self._update_id_pair_meta(update, to_imid, meta)
-
- # construct object of xsd-type and get its xml repr
- refs = obj_dict.get('network_ipam_refs', None)
- if refs:
- for ref in refs:
- ref_fq_name = ref['to']
- obj_type = 'network-ipam'
- to_imid = cfgm_common.imid.get_ifmap_id_from_fq_name(obj_type, ref_fq_name)
- project_network_ipam_xml = ''
- meta = Metadata('project-network-ipam' , '',
- {'ifmap-cardinality':'singleValue'}, ns_prefix = 'contrail',
- elements = project_network_ipam_xml)
- self._update_id_pair_meta(update, to_imid, meta)
-
- # construct object of xsd-type and get its xml repr
- refs = obj_dict.get('network_policy_refs', None)
- if refs:
- for ref in refs:
- ref_fq_name = ref['to']
- obj_type = 'network-policy'
- to_imid = cfgm_common.imid.get_ifmap_id_from_fq_name(obj_type, ref_fq_name)
- project_network_policy_xml = ''
- meta = Metadata('project-network-policy' , '',
- {'ifmap-cardinality':'singleValue'}, ns_prefix = 'contrail',
- elements = project_network_policy_xml)
- self._update_id_pair_meta(update, to_imid, meta)
-
- # construct object of xsd-type and get its xml repr
- refs = obj_dict.get('virtual_machine_interface_refs', None)
- if refs:
- for ref in refs:
- ref_fq_name = ref['to']
- obj_type = 'virtual-machine-interface'
- to_imid = cfgm_common.imid.get_ifmap_id_from_fq_name(obj_type, ref_fq_name)
- project_virtual_machine_interface_xml = ''
- meta = Metadata('project-virtual-machine-interface' , '',
- {'ifmap-cardinality':'singleValue'}, ns_prefix = 'contrail',
- elements = project_virtual_machine_interface_xml)
- self._update_id_pair_meta(update, to_imid, meta)
-
- # construct object of xsd-type and get its xml repr
- refs = obj_dict.get('floating_ip_pool_refs', None)
- if refs:
- for ref in refs:
- ref_fq_name = ref['to']
- obj_type = 'floating-ip-pool'
- to_imid = cfgm_common.imid.get_ifmap_id_from_fq_name(obj_type, ref_fq_name)
- project_floating_ip_pool_xml = ''
- meta = Metadata('project-floating-ip-pool' , '',
- {'ifmap-cardinality':'singleValue'}, ns_prefix = 'contrail',
- elements = project_floating_ip_pool_xml)
- self._update_id_pair_meta(update, to_imid, meta)
-
- # construct object of xsd-type and get its xml repr
- refs = obj_dict.get('service_instance_refs', None)
- if refs:
- for ref in refs:
- ref_fq_name = ref['to']
- obj_type = 'service-instance'
- to_imid = cfgm_common.imid.get_ifmap_id_from_fq_name(obj_type, ref_fq_name)
- project_service_instance_xml = ''
- meta = Metadata('project-service-instance' , '',
- {'ifmap-cardinality':'singleValue'}, ns_prefix = 'contrail',
- elements = project_service_instance_xml)
- self._update_id_pair_meta(update, to_imid, meta)
-
- # construct object of xsd-type and get its xml repr
- refs = obj_dict.get('route_table_refs', None)
- if refs:
- for ref in refs:
- ref_fq_name = ref['to']
- obj_type = 'route-table'
- to_imid = cfgm_common.imid.get_ifmap_id_from_fq_name(obj_type, ref_fq_name)
- project_route_table_xml = ''
- meta = Metadata('project-route-table' , '',
- {'ifmap-cardinality':'singleValue'}, ns_prefix = 'contrail',
- elements = project_route_table_xml)
- self._update_id_pair_meta(update, to_imid, meta)
-
- # construct object of xsd-type and get its xml repr
- refs = obj_dict.get('interface_route_table_refs', None)
- if refs:
- for ref in refs:
- ref_fq_name = ref['to']
- obj_type = 'interface-route-table'
- to_imid = cfgm_common.imid.get_ifmap_id_from_fq_name(obj_type, ref_fq_name)
- project_interface_route_table_xml = ''
- meta = Metadata('project-interface-route-table' , '',
- {'ifmap-cardinality':'singleValue'}, ns_prefix = 'contrail',
- elements = project_interface_route_table_xml)
- self._update_id_pair_meta(update, to_imid, meta)
-
- # construct object of xsd-type and get its xml repr
- refs = obj_dict.get('logical_router_refs', None)
- if refs:
- for ref in refs:
- ref_fq_name = ref['to']
- obj_type = 'logical-router'
- to_imid = cfgm_common.imid.get_ifmap_id_from_fq_name(obj_type, ref_fq_name)
- project_logical_router_xml = ''
- meta = Metadata('project-logical-router' , '',
- {'ifmap-cardinality':'singleValue'}, ns_prefix = 'contrail',
- elements = project_logical_router_xml)
- self._update_id_pair_meta(update, to_imid, meta)
-
- # construct object of xsd-type and get its xml repr
- refs = obj_dict.get('loadbalancer_pool_refs', None)
- if refs:
- for ref in refs:
- ref_fq_name = ref['to']
- obj_type = 'loadbalancer-pool'
- to_imid = cfgm_common.imid.get_ifmap_id_from_fq_name(obj_type, ref_fq_name)
- project_loadbalancer_pool_xml = ''
- meta = Metadata('project-loadbalancer-pool' , '',
- {'ifmap-cardinality':'singleValue'}, ns_prefix = 'contrail',
- elements = project_loadbalancer_pool_xml)
- self._update_id_pair_meta(update, to_imid, meta)
-
- # construct object of xsd-type and get its xml repr
- refs = obj_dict.get('loadbalancer_healthmonitor_refs', None)
- if refs:
- for ref in refs:
- ref_fq_name = ref['to']
- obj_type = 'loadbalancer-healthmonitor'
- to_imid = cfgm_common.imid.get_ifmap_id_from_fq_name(obj_type, ref_fq_name)
- project_loadbalancer_healthmonitor_xml = ''
- meta = Metadata('project-loadbalancer-healthmonitor' , '',
- {'ifmap-cardinality':'singleValue'}, ns_prefix = 'contrail',
- elements = project_loadbalancer_healthmonitor_xml)
- self._update_id_pair_meta(update, to_imid, meta)
-
- # construct object of xsd-type and get its xml repr
- refs = obj_dict.get('virtual_ip_refs', None)
- if refs:
- for ref in refs:
- ref_fq_name = ref['to']
- obj_type = 'virtual-ip'
- to_imid = cfgm_common.imid.get_ifmap_id_from_fq_name(obj_type, ref_fq_name)
- project_virtual_ip_xml = ''
- meta = Metadata('project-virtual-ip' , '',
- {'ifmap-cardinality':'singleValue'}, ns_prefix = 'contrail',
- elements = project_virtual_ip_xml)
- self._update_id_pair_meta(update, to_imid, meta)
-
-
- self._publish_update(my_imid, update)
- return (True, '')
- #end _ifmap_project_set
-
- def _ifmap_project_create(self, obj_ids, obj_dict):
- if not 'parent_type' in obj_dict:
- # parent is config-root
- parent_type = 'config-root'
- parent_imid = 'contrail:config-root:root'
- else:
- parent_type = obj_dict['parent_type']
- parent_imid = obj_ids.get('parent_imid', None)
-
- # Parent Link Meta
- update = {}
- parent_link_meta = self._parent_metas[parent_type]['project']
- meta = Metadata(parent_link_meta, '',
- {'ifmap-cardinality':'singleValue'}, ns_prefix = 'contrail')
- self._update_id_pair_meta(update, obj_ids['imid'], meta)
- self._publish_update(parent_imid, update)
-
- (ok, result) = self._ifmap_project_set(obj_ids['imid'], None, obj_dict)
- return (ok, result)
- #end _ifmap_project_create
-
-
- def _ifmap_project_read_to_meta_index(self, ifmap_id, field_names = None):
- # field_names = None means all fields will be read
- imid = self._imid_handler
- start_id = str(Identity(name = ifmap_id, type = 'other',
- other_type = 'extended'))
- # if id-perms missing, identity doesn't exist
-
- all_metas = [u'project-namespace', u'project-floating-ip-pool', u'floating-ip-project', u'quota', u'id-perms', u'display-name', u'project-security-group', u'project-virtual-network', u'project-qos-queue', u'project-qos-forwarding-class', u'project-network-ipam', u'project-network-policy', u'project-virtual-machine-interface', u'project-service-instance', u'project-route-table', u'project-interface-route-table', u'project-logical-router', u'project-loadbalancer-pool', u'project-loadbalancer-healthmonitor', u'project-virtual-ip']
- if not field_names:
- metas_to_read = all_metas
- else: # read only requested fields
- metas_to_read = set(all_metas) & set(field_names.keys())
-
- # metas is a dict where key is meta-name and val is list of dict
- # of form [{'meta':meta}, {'id':id1, 'meta':meta}, {'id':id2, 'meta':meta}]
- metas = {}
- for meta_name in metas_to_read:
- if meta_name in self._id_to_metas[ifmap_id]:
- metas[meta_name] = self._id_to_metas[ifmap_id][meta_name]
- return metas
- #end _ifmap_project_read_to_meta_index
-
- def _ifmap_project_update(self, ifmap_id, new_obj_dict):
- # read in refs from ifmap to determine which ones become inactive after update
- existing_metas = self._ifmap_project_read_to_meta_index(ifmap_id)
-
- # remove properties that are no longer active
- props = ['quota', 'id-perms', 'display-name']
- for prop in props:
- prop_m = prop.replace('-', '_')
- if prop in existing_metas and prop_m not in new_obj_dict:
- self._delete_id_self_meta(ifmap_id, 'contrail:'+prop)
- # remove refs that are no longer active
- delete_list = []
- refs = {'project-namespace': 'namespace',
- 'project-floating-ip-pool': 'floating-ip-pool'}
- for meta, to_name in refs.items():
- old_set = set([m['id'] for m in existing_metas.get(meta, [])])
- new_set = set()
- to_name_m = to_name.replace('-', '_')
- for ref in new_obj_dict.get(to_name_m+'_refs', []):
- to_imid = self.fq_name_to_ifmap_id(to_name, ref['to'])
- new_set.add(to_imid)
-
- for inact_ref in old_set - new_set:
- delete_list.append((inact_ref, 'contrail:'+meta))
-
- if delete_list:
- self._delete_id_pair_meta_list(ifmap_id, delete_list)
-
- (ok, result) = self._ifmap_project_set(ifmap_id, existing_metas, new_obj_dict)
- return (ok, result)
- #end _ifmap_project_update
-
- def _ifmap_project_delete(self, obj_ids):
- ifmap_id = obj_ids['imid']
- parent_imid = obj_ids.get('parent_imid', None)
- existing_metas = self._ifmap_project_read_to_meta_index(ifmap_id)
- meta_list = []
- for meta_name, meta_infos in existing_metas.items():
- for meta_info in meta_infos:
- ref_imid = meta_info.get('id')
- if ref_imid is None:
- continue
- meta_list.append((ref_imid, 'contrail:'+meta_name))
-
- if parent_imid:
- # Remove link from parent
- meta_list.append((parent_imid, None))
-
- if meta_list:
- self._delete_id_pair_meta_list(ifmap_id, meta_list)
-
- # Remove all property metadata associated with this ident
- self._delete_id_self_meta(ifmap_id, None)
-
- return (True, '')
- #end _ifmap_project_delete
-
- def _ifmap_qos_forwarding_class_alloc(self, parent_type, fq_name):
- imid = self._imid_handler
- (my_imid, parent_imid) = \
- imid.qos_forwarding_class_alloc_ifmap_id(parent_type, fq_name)
- if my_imid is None or parent_imid is None:
- return (False, (my_imid, parent_imid))
- return (True, (my_imid, parent_imid))
- #end _ifmap_qos_forwarding_class_alloc
-
- def _ifmap_qos_forwarding_class_set(self, my_imid, existing_metas, obj_dict):
- # Properties Meta
- update = {}
- field = obj_dict.get('dscp', None)
- if field is not None:
- norm_str = escape(str(obj_dict['dscp']))
- meta = Metadata('dscp', norm_str,
- {'ifmap-cardinality':'singleValue'}, ns_prefix = 'contrail')
-
- if (existing_metas and 'dscp' in existing_metas and
- str(existing_metas['dscp'][0]['meta']) == str(meta)):
- # no change
- pass
- else:
- self._update_id_self_meta(update, meta)
-
- field = obj_dict.get('trusted', None)
- if field is not None:
- norm_str = escape(str(obj_dict['trusted']))
- meta = Metadata('trusted', norm_str,
- {'ifmap-cardinality':'singleValue'}, ns_prefix = 'contrail')
-
- if (existing_metas and 'trusted' in existing_metas and
- str(existing_metas['trusted'][0]['meta']) == str(meta)):
- # no change
- pass
- else:
- self._update_id_self_meta(update, meta)
-
- field = obj_dict.get('id_perms', None)
- if field is not None:
- # construct object of xsd-type and get its xml repr
- field = obj_dict['id_perms']
- buf = cStringIO.StringIO()
- # perms might be inserted at server as obj.
- # obj construction diff from dict construction.
- if isinstance(field, dict):
- IdPermsType(**field).exportChildren(buf, level = 1, name_ = 'id-perms', pretty_print = False)
- else: # object
- field.exportChildren(buf, level = 1, name_ = 'id-perms', pretty_print = False)
- id_perms_xml = buf.getvalue()
- buf.close()
- meta = Metadata('id-perms' , '',
- {'ifmap-cardinality':'singleValue'}, ns_prefix = 'contrail',
- elements = id_perms_xml)
-
- if (existing_metas and 'id-perms' in existing_metas and
- str(existing_metas['id-perms'][0]['meta']) == str(meta)):
- # no change
- pass
- else:
- self._update_id_self_meta(update, meta)
-
- field = obj_dict.get('display_name', None)
- if field is not None:
- norm_str = escape(str(obj_dict['display_name']))
- meta = Metadata('display-name', norm_str,
- {'ifmap-cardinality':'singleValue'}, ns_prefix = 'contrail')
-
- if (existing_metas and 'display-name' in existing_metas and
- str(existing_metas['display-name'][0]['meta']) == str(meta)):
- # no change
- pass
- else:
- self._update_id_self_meta(update, meta)
-
- # Ref Link Metas
- imid = self._imid_handler
-
- # construct object of xsd-type and get its xml repr
- refs = obj_dict.get('qos_queue_refs', None)
- if refs:
- for ref in refs:
- ref_fq_name = ref['to']
- obj_type = 'qos-queue'
- to_imid = cfgm_common.imid.get_ifmap_id_from_fq_name(obj_type, ref_fq_name)
- qos_forwarding_class_qos_queue_xml = ''
- meta = Metadata('qos-forwarding-class-qos-queue' , '',
- {'ifmap-cardinality':'singleValue'}, ns_prefix = 'contrail',
- elements = qos_forwarding_class_qos_queue_xml)
- self._update_id_pair_meta(update, to_imid, meta)
-
-
- self._publish_update(my_imid, update)
- return (True, '')
- #end _ifmap_qos_forwarding_class_set
-
- def _ifmap_qos_forwarding_class_create(self, obj_ids, obj_dict):
- if not 'parent_type' in obj_dict:
- # parent is config-root
- parent_type = 'config-root'
- parent_imid = 'contrail:config-root:root'
- else:
- parent_type = obj_dict['parent_type']
- parent_imid = obj_ids.get('parent_imid', None)
-
- # Parent Link Meta
- update = {}
- parent_link_meta = self._parent_metas[parent_type]['qos-forwarding-class']
- meta = Metadata(parent_link_meta, '',
- {'ifmap-cardinality':'singleValue'}, ns_prefix = 'contrail')
- self._update_id_pair_meta(update, obj_ids['imid'], meta)
- self._publish_update(parent_imid, update)
-
- (ok, result) = self._ifmap_qos_forwarding_class_set(obj_ids['imid'], None, obj_dict)
- return (ok, result)
- #end _ifmap_qos_forwarding_class_create
-
-
- def _ifmap_qos_forwarding_class_read_to_meta_index(self, ifmap_id, field_names = None):
- # field_names = None means all fields will be read
- imid = self._imid_handler
- start_id = str(Identity(name = ifmap_id, type = 'other',
- other_type = 'extended'))
- # if id-perms missing, identity doesn't exist
-
- all_metas = [u'qos-forwarding-class-qos-queue', u'virtual-network-qos-forwarding-class', u'virtual-machine-interface-qos-forwarding-class', u'dscp', u'trusted', u'id-perms', u'display-name']
- if not field_names:
- metas_to_read = all_metas
- else: # read only requested fields
- metas_to_read = set(all_metas) & set(field_names.keys())
-
- # metas is a dict where key is meta-name and val is list of dict
- # of form [{'meta':meta}, {'id':id1, 'meta':meta}, {'id':id2, 'meta':meta}]
- metas = {}
- for meta_name in metas_to_read:
- if meta_name in self._id_to_metas[ifmap_id]:
- metas[meta_name] = self._id_to_metas[ifmap_id][meta_name]
- return metas
- #end _ifmap_qos-forwarding-class_read_to_meta_index
-
- def _ifmap_qos_forwarding_class_update(self, ifmap_id, new_obj_dict):
- # read in refs from ifmap to determine which ones become inactive after update
- existing_metas = self._ifmap_qos_forwarding_class_read_to_meta_index(ifmap_id)
-
- # remove properties that are no longer active
- props = ['dscp', 'trusted', 'id-perms', 'display-name']
- for prop in props:
- prop_m = prop.replace('-', '_')
- if prop in existing_metas and prop_m not in new_obj_dict:
- self._delete_id_self_meta(ifmap_id, 'contrail:'+prop)
- # remove refs that are no longer active
- delete_list = []
- refs = {'qos-forwarding-class-qos-queue': 'qos-queue'}
- for meta, to_name in refs.items():
- old_set = set([m['id'] for m in existing_metas.get(meta, [])])
- new_set = set()
- to_name_m = to_name.replace('-', '_')
- for ref in new_obj_dict.get(to_name_m+'_refs', []):
- to_imid = self.fq_name_to_ifmap_id(to_name, ref['to'])
- new_set.add(to_imid)
-
- for inact_ref in old_set - new_set:
- delete_list.append((inact_ref, 'contrail:'+meta))
-
- if delete_list:
- self._delete_id_pair_meta_list(ifmap_id, delete_list)
-
- (ok, result) = self._ifmap_qos_forwarding_class_set(ifmap_id, existing_metas, new_obj_dict)
- return (ok, result)
- #end _ifmap_qos_forwarding_class_update
-
- def _ifmap_qos_forwarding_class_delete(self, obj_ids):
- ifmap_id = obj_ids['imid']
- parent_imid = obj_ids.get('parent_imid', None)
- existing_metas = self._ifmap_qos_forwarding_class_read_to_meta_index(ifmap_id)
- meta_list = []
- for meta_name, meta_infos in existing_metas.items():
- for meta_info in meta_infos:
- ref_imid = meta_info.get('id')
- if ref_imid is None:
- continue
- meta_list.append((ref_imid, 'contrail:'+meta_name))
-
- if parent_imid:
- # Remove link from parent
- meta_list.append((parent_imid, None))
-
- if meta_list:
- self._delete_id_pair_meta_list(ifmap_id, meta_list)
-
- # Remove all property metadata associated with this ident
- self._delete_id_self_meta(ifmap_id, None)
-
- return (True, '')
- #end _ifmap_qos_forwarding_class_delete
-
- def _ifmap_database_node_alloc(self, parent_type, fq_name):
- imid = self._imid_handler
- (my_imid, parent_imid) = \
- imid.database_node_alloc_ifmap_id(parent_type, fq_name)
- if my_imid is None or parent_imid is None:
- return (False, (my_imid, parent_imid))
- return (True, (my_imid, parent_imid))
- #end _ifmap_database_node_alloc
-
- def _ifmap_database_node_set(self, my_imid, existing_metas, obj_dict):
- # Properties Meta
- update = {}
- field = obj_dict.get('database_node_ip_address', None)
- if field is not None:
- norm_str = escape(str(obj_dict['database_node_ip_address']))
- meta = Metadata('database-node-ip-address', norm_str,
- {'ifmap-cardinality':'singleValue'}, ns_prefix = 'contrail')
-
- if (existing_metas and 'database-node-ip-address' in existing_metas and
- str(existing_metas['database-node-ip-address'][0]['meta']) == str(meta)):
- # no change
- pass
- else:
- self._update_id_self_meta(update, meta)
-
- field = obj_dict.get('id_perms', None)
- if field is not None:
- # construct object of xsd-type and get its xml repr
- field = obj_dict['id_perms']
- buf = cStringIO.StringIO()
- # perms might be inserted at server as obj.
- # obj construction diff from dict construction.
- if isinstance(field, dict):
- IdPermsType(**field).exportChildren(buf, level = 1, name_ = 'id-perms', pretty_print = False)
- else: # object
- field.exportChildren(buf, level = 1, name_ = 'id-perms', pretty_print = False)
- id_perms_xml = buf.getvalue()
- buf.close()
- meta = Metadata('id-perms' , '',
- {'ifmap-cardinality':'singleValue'}, ns_prefix = 'contrail',
- elements = id_perms_xml)
-
- if (existing_metas and 'id-perms' in existing_metas and
- str(existing_metas['id-perms'][0]['meta']) == str(meta)):
- # no change
- pass
- else:
- self._update_id_self_meta(update, meta)
-
- field = obj_dict.get('display_name', None)
- if field is not None:
- norm_str = escape(str(obj_dict['display_name']))
- meta = Metadata('display-name', norm_str,
- {'ifmap-cardinality':'singleValue'}, ns_prefix = 'contrail')
-
- if (existing_metas and 'display-name' in existing_metas and
- str(existing_metas['display-name'][0]['meta']) == str(meta)):
- # no change
- pass
- else:
- self._update_id_self_meta(update, meta)
-
- # Ref Link Metas
- imid = self._imid_handler
-
-
- self._publish_update(my_imid, update)
- return (True, '')
- #end _ifmap_database_node_set
-
- def _ifmap_database_node_create(self, obj_ids, obj_dict):
- if not 'parent_type' in obj_dict:
- # parent is config-root
- parent_type = 'config-root'
- parent_imid = 'contrail:config-root:root'
- else:
- parent_type = obj_dict['parent_type']
- parent_imid = obj_ids.get('parent_imid', None)
-
- # Parent Link Meta
- update = {}
- parent_link_meta = self._parent_metas[parent_type]['database-node']
- meta = Metadata(parent_link_meta, '',
- {'ifmap-cardinality':'singleValue'}, ns_prefix = 'contrail')
- self._update_id_pair_meta(update, obj_ids['imid'], meta)
- self._publish_update(parent_imid, update)
-
- (ok, result) = self._ifmap_database_node_set(obj_ids['imid'], None, obj_dict)
- return (ok, result)
- #end _ifmap_database_node_create
-
-
- def _ifmap_database_node_read_to_meta_index(self, ifmap_id, field_names = None):
- # field_names = None means all fields will be read
- imid = self._imid_handler
- start_id = str(Identity(name = ifmap_id, type = 'other',
- other_type = 'extended'))
- # if id-perms missing, identity doesn't exist
-
- all_metas = [u'database-node-ip-address', u'id-perms', u'display-name']
- if not field_names:
- metas_to_read = all_metas
- else: # read only requested fields
- metas_to_read = set(all_metas) & set(field_names.keys())
-
- # metas is a dict where key is meta-name and val is list of dict
- # of form [{'meta':meta}, {'id':id1, 'meta':meta}, {'id':id2, 'meta':meta}]
- metas = {}
- for meta_name in metas_to_read:
- if meta_name in self._id_to_metas[ifmap_id]:
- metas[meta_name] = self._id_to_metas[ifmap_id][meta_name]
- return metas
- #end _ifmap_database-node_read_to_meta_index
-
- def _ifmap_database_node_update(self, ifmap_id, new_obj_dict):
- # read in refs from ifmap to determine which ones become inactive after update
- existing_metas = self._ifmap_database_node_read_to_meta_index(ifmap_id)
-
- # remove properties that are no longer active
- props = ['database-node-ip-address', 'id-perms', 'display-name']
- for prop in props:
- prop_m = prop.replace('-', '_')
- if prop in existing_metas and prop_m not in new_obj_dict:
- self._delete_id_self_meta(ifmap_id, 'contrail:'+prop)
- # remove refs that are no longer active
- delete_list = []
- refs = {}
- for meta, to_name in refs.items():
- old_set = set([m['id'] for m in existing_metas.get(meta, [])])
- new_set = set()
- to_name_m = to_name.replace('-', '_')
- for ref in new_obj_dict.get(to_name_m+'_refs', []):
- to_imid = self.fq_name_to_ifmap_id(to_name, ref['to'])
- new_set.add(to_imid)
-
- for inact_ref in old_set - new_set:
- delete_list.append((inact_ref, 'contrail:'+meta))
-
- if delete_list:
- self._delete_id_pair_meta_list(ifmap_id, delete_list)
-
- (ok, result) = self._ifmap_database_node_set(ifmap_id, existing_metas, new_obj_dict)
- return (ok, result)
- #end _ifmap_database_node_update
-
- def _ifmap_database_node_delete(self, obj_ids):
- ifmap_id = obj_ids['imid']
- parent_imid = obj_ids.get('parent_imid', None)
- existing_metas = self._ifmap_database_node_read_to_meta_index(ifmap_id)
- meta_list = []
- for meta_name, meta_infos in existing_metas.items():
- for meta_info in meta_infos:
- ref_imid = meta_info.get('id')
- if ref_imid is None:
- continue
- meta_list.append((ref_imid, 'contrail:'+meta_name))
-
- if parent_imid:
- # Remove link from parent
- meta_list.append((parent_imid, None))
-
- if meta_list:
- self._delete_id_pair_meta_list(ifmap_id, meta_list)
-
- # Remove all property metadata associated with this ident
- self._delete_id_self_meta(ifmap_id, None)
-
- return (True, '')
- #end _ifmap_database_node_delete
-
- def _ifmap_routing_instance_alloc(self, parent_type, fq_name):
- imid = self._imid_handler
- (my_imid, parent_imid) = \
- imid.routing_instance_alloc_ifmap_id(parent_type, fq_name)
- if my_imid is None or parent_imid is None:
- return (False, (my_imid, parent_imid))
- return (True, (my_imid, parent_imid))
- #end _ifmap_routing_instance_alloc
-
- def _ifmap_routing_instance_set(self, my_imid, existing_metas, obj_dict):
- # Properties Meta
- update = {}
- field = obj_dict.get('service_chain_information', None)
- if field is not None:
- # construct object of xsd-type and get its xml repr
- field = obj_dict['service_chain_information']
- buf = cStringIO.StringIO()
- # perms might be inserted at server as obj.
- # obj construction diff from dict construction.
- if isinstance(field, dict):
- ServiceChainInfo(**field).exportChildren(buf, level = 1, name_ = 'service-chain-information', pretty_print = False)
- else: # object
- field.exportChildren(buf, level = 1, name_ = 'service-chain-information', pretty_print = False)
- service_chain_information_xml = buf.getvalue()
- buf.close()
- meta = Metadata('service-chain-information' , '',
- {'ifmap-cardinality':'singleValue'}, ns_prefix = 'contrail',
- elements = service_chain_information_xml)
-
- if (existing_metas and 'service-chain-information' in existing_metas and
- str(existing_metas['service-chain-information'][0]['meta']) == str(meta)):
- # no change
- pass
- else:
- self._update_id_self_meta(update, meta)
-
- field = obj_dict.get('routing_instance_is_default', None)
- if field is not None:
- norm_str = escape(str(obj_dict['routing_instance_is_default']))
- meta = Metadata('routing-instance-is-default', norm_str,
- {'ifmap-cardinality':'singleValue'}, ns_prefix = 'contrail')
-
- if (existing_metas and 'routing-instance-is-default' in existing_metas and
- str(existing_metas['routing-instance-is-default'][0]['meta']) == str(meta)):
- # no change
- pass
- else:
- self._update_id_self_meta(update, meta)
-
- field = obj_dict.get('static_route_entries', None)
- if field is not None:
- # construct object of xsd-type and get its xml repr
- field = obj_dict['static_route_entries']
- buf = cStringIO.StringIO()
- # perms might be inserted at server as obj.
- # obj construction diff from dict construction.
- if isinstance(field, dict):
- StaticRouteEntriesType(**field).exportChildren(buf, level = 1, name_ = 'static-route-entries', pretty_print = False)
- else: # object
- field.exportChildren(buf, level = 1, name_ = 'static-route-entries', pretty_print = False)
- static_route_entries_xml = buf.getvalue()
- buf.close()
- meta = Metadata('static-route-entries' , '',
- {'ifmap-cardinality':'singleValue'}, ns_prefix = 'contrail',
- elements = static_route_entries_xml)
-
- if (existing_metas and 'static-route-entries' in existing_metas and
- str(existing_metas['static-route-entries'][0]['meta']) == str(meta)):
- # no change
- pass
- else:
- self._update_id_self_meta(update, meta)
-
- field = obj_dict.get('default_ce_protocol', None)
- if field is not None:
- # construct object of xsd-type and get its xml repr
- field = obj_dict['default_ce_protocol']
- buf = cStringIO.StringIO()
- # perms might be inserted at server as obj.
- # obj construction diff from dict construction.
- if isinstance(field, dict):
- DefaultProtocolType(**field).exportChildren(buf, level = 1, name_ = 'default-ce-protocol', pretty_print = False)
- else: # object
- field.exportChildren(buf, level = 1, name_ = 'default-ce-protocol', pretty_print = False)
- default_ce_protocol_xml = buf.getvalue()
- buf.close()
- meta = Metadata('default-ce-protocol' , '',
- {'ifmap-cardinality':'singleValue'}, ns_prefix = 'contrail',
- elements = default_ce_protocol_xml)
-
- if (existing_metas and 'default-ce-protocol' in existing_metas and
- str(existing_metas['default-ce-protocol'][0]['meta']) == str(meta)):
- # no change
- pass
- else:
- self._update_id_self_meta(update, meta)
-
- field = obj_dict.get('id_perms', None)
- if field is not None:
- # construct object of xsd-type and get its xml repr
- field = obj_dict['id_perms']
- buf = cStringIO.StringIO()
- # perms might be inserted at server as obj.
- # obj construction diff from dict construction.
- if isinstance(field, dict):
- IdPermsType(**field).exportChildren(buf, level = 1, name_ = 'id-perms', pretty_print = False)
- else: # object
- field.exportChildren(buf, level = 1, name_ = 'id-perms', pretty_print = False)
- id_perms_xml = buf.getvalue()
- buf.close()
- meta = Metadata('id-perms' , '',
- {'ifmap-cardinality':'singleValue'}, ns_prefix = 'contrail',
- elements = id_perms_xml)
-
- if (existing_metas and 'id-perms' in existing_metas and
- str(existing_metas['id-perms'][0]['meta']) == str(meta)):
- # no change
- pass
- else:
- self._update_id_self_meta(update, meta)
-
- field = obj_dict.get('display_name', None)
- if field is not None:
- norm_str = escape(str(obj_dict['display_name']))
- meta = Metadata('display-name', norm_str,
- {'ifmap-cardinality':'singleValue'}, ns_prefix = 'contrail')
-
- if (existing_metas and 'display-name' in existing_metas and
- str(existing_metas['display-name'][0]['meta']) == str(meta)):
- # no change
- pass
- else:
- self._update_id_self_meta(update, meta)
-
- # Ref Link Metas
- imid = self._imid_handler
-
- # construct object of xsd-type and get its xml repr
- refs = obj_dict.get('bgp_router_refs', None)
- if refs:
- for ref in refs:
- ref_fq_name = ref['to']
- obj_type = 'bgp-router'
- to_imid = cfgm_common.imid.get_ifmap_id_from_fq_name(obj_type, ref_fq_name)
- instance_bgp_router_xml = ''
- meta = Metadata('instance-bgp-router' , '',
- {'ifmap-cardinality':'singleValue'}, ns_prefix = 'contrail',
- elements = instance_bgp_router_xml)
- self._update_id_pair_meta(update, to_imid, meta)
-
- # construct object of xsd-type and get its xml repr
- refs = obj_dict.get('routing_instance_refs', None)
- if refs:
- for ref in refs:
- ref_fq_name = ref['to']
- obj_type = 'routing-instance'
- to_imid = cfgm_common.imid.get_ifmap_id_from_fq_name(obj_type, ref_fq_name)
- connection_xml = ''
- ref_data = ref['attr']
- if ref_data:
- buf = cStringIO.StringIO()
- ConnectionType(**ref_data).exportChildren(buf, level = 1, name_ = 'connection', pretty_print = False)
- connection_xml = connection_xml + buf.getvalue()
- buf.close()
- meta = Metadata('connection' , '',
- {'ifmap-cardinality':'singleValue'}, ns_prefix = 'contrail',
- elements = connection_xml)
- self._update_id_pair_meta(update, to_imid, meta)
-
- # construct object of xsd-type and get its xml repr
- refs = obj_dict.get('route_target_refs', None)
- if refs:
- for ref in refs:
- ref_fq_name = ref['to']
- obj_type = 'route-target'
- to_imid = cfgm_common.imid.get_ifmap_id_from_fq_name(obj_type, ref_fq_name)
- instance_target_xml = ''
- ref_data = ref['attr']
- if ref_data:
- buf = cStringIO.StringIO()
- InstanceTargetType(**ref_data).exportChildren(buf, level = 1, name_ = 'instance-target', pretty_print = False)
- instance_target_xml = instance_target_xml + buf.getvalue()
- buf.close()
- meta = Metadata('instance-target' , '',
- {'ifmap-cardinality':'singleValue'}, ns_prefix = 'contrail',
- elements = instance_target_xml)
- self._update_id_pair_meta(update, to_imid, meta)
-
-
- self._publish_update(my_imid, update)
- return (True, '')
- #end _ifmap_routing_instance_set
-
- def _ifmap_routing_instance_create(self, obj_ids, obj_dict):
- if not 'parent_type' in obj_dict:
- # parent is config-root
- parent_type = 'config-root'
- parent_imid = 'contrail:config-root:root'
- else:
- parent_type = obj_dict['parent_type']
- parent_imid = obj_ids.get('parent_imid', None)
-
- # Parent Link Meta
- update = {}
- parent_link_meta = self._parent_metas[parent_type]['routing-instance']
- meta = Metadata(parent_link_meta, '',
- {'ifmap-cardinality':'singleValue'}, ns_prefix = 'contrail')
- self._update_id_pair_meta(update, obj_ids['imid'], meta)
- self._publish_update(parent_imid, update)
-
- (ok, result) = self._ifmap_routing_instance_set(obj_ids['imid'], None, obj_dict)
- return (ok, result)
- #end _ifmap_routing_instance_create
-
-
- def _ifmap_routing_instance_read_to_meta_index(self, ifmap_id, field_names = None):
- # field_names = None means all fields will be read
- imid = self._imid_handler
- start_id = str(Identity(name = ifmap_id, type = 'other',
- other_type = 'extended'))
- # if id-perms missing, identity doesn't exist
-
- all_metas = [u'connection', u'instance-target', u'virtual-machine-interface-routing-instance', u'connection', u'service-chain-information', u'routing-instance-is-default', u'static-route-entries', u'default-ce-protocol', u'id-perms', u'display-name', u'instance-bgp-router']
- if not field_names:
- metas_to_read = all_metas
- else: # read only requested fields
- metas_to_read = set(all_metas) & set(field_names.keys())
-
- # metas is a dict where key is meta-name and val is list of dict
- # of form [{'meta':meta}, {'id':id1, 'meta':meta}, {'id':id2, 'meta':meta}]
- metas = {}
- for meta_name in metas_to_read:
- if meta_name in self._id_to_metas[ifmap_id]:
- metas[meta_name] = self._id_to_metas[ifmap_id][meta_name]
- return metas
- #end _ifmap_routing-instance_read_to_meta_index
-
- def _ifmap_routing_instance_update(self, ifmap_id, new_obj_dict):
- # read in refs from ifmap to determine which ones become inactive after update
- existing_metas = self._ifmap_routing_instance_read_to_meta_index(ifmap_id)
-
- # remove properties that are no longer active
- props = ['service-chain-information', 'routing-instance-is-default', 'static-route-entries', 'default-ce-protocol', 'id-perms', 'display-name']
- for prop in props:
- prop_m = prop.replace('-', '_')
- if prop in existing_metas and prop_m not in new_obj_dict:
- self._delete_id_self_meta(ifmap_id, 'contrail:'+prop)
- # remove refs that are no longer active
- delete_list = []
- refs = {'connection': 'routing-instance',
- 'instance-target': 'route-target'}
- for meta, to_name in refs.items():
- old_set = set([m['id'] for m in existing_metas.get(meta, [])])
- new_set = set()
- to_name_m = to_name.replace('-', '_')
- for ref in new_obj_dict.get(to_name_m+'_refs', []):
- to_imid = self.fq_name_to_ifmap_id(to_name, ref['to'])
- new_set.add(to_imid)
-
- for inact_ref in old_set - new_set:
- delete_list.append((inact_ref, 'contrail:'+meta))
-
- if delete_list:
- self._delete_id_pair_meta_list(ifmap_id, delete_list)
-
- (ok, result) = self._ifmap_routing_instance_set(ifmap_id, existing_metas, new_obj_dict)
- return (ok, result)
- #end _ifmap_routing_instance_update
-
- def _ifmap_routing_instance_delete(self, obj_ids):
- ifmap_id = obj_ids['imid']
- parent_imid = obj_ids.get('parent_imid', None)
- existing_metas = self._ifmap_routing_instance_read_to_meta_index(ifmap_id)
- meta_list = []
- for meta_name, meta_infos in existing_metas.items():
- for meta_info in meta_infos:
- ref_imid = meta_info.get('id')
- if ref_imid is None:
- continue
- meta_list.append((ref_imid, 'contrail:'+meta_name))
-
- if parent_imid:
- # Remove link from parent
- meta_list.append((parent_imid, None))
-
- if meta_list:
- self._delete_id_pair_meta_list(ifmap_id, meta_list)
-
- # Remove all property metadata associated with this ident
- self._delete_id_self_meta(ifmap_id, None)
-
- return (True, '')
- #end _ifmap_routing_instance_delete
-
- def _ifmap_network_ipam_alloc(self, parent_type, fq_name):
- imid = self._imid_handler
- (my_imid, parent_imid) = \
- imid.network_ipam_alloc_ifmap_id(parent_type, fq_name)
- if my_imid is None or parent_imid is None:
- return (False, (my_imid, parent_imid))
- return (True, (my_imid, parent_imid))
- #end _ifmap_network_ipam_alloc
-
- def _ifmap_network_ipam_set(self, my_imid, existing_metas, obj_dict):
- # Properties Meta
- update = {}
- field = obj_dict.get('network_ipam_mgmt', None)
- if field is not None:
- # construct object of xsd-type and get its xml repr
- field = obj_dict['network_ipam_mgmt']
- buf = cStringIO.StringIO()
- # perms might be inserted at server as obj.
- # obj construction diff from dict construction.
- if isinstance(field, dict):
- IpamType(**field).exportChildren(buf, level = 1, name_ = 'network-ipam-mgmt', pretty_print = False)
- else: # object
- field.exportChildren(buf, level = 1, name_ = 'network-ipam-mgmt', pretty_print = False)
- network_ipam_mgmt_xml = buf.getvalue()
- buf.close()
- meta = Metadata('network-ipam-mgmt' , '',
- {'ifmap-cardinality':'singleValue'}, ns_prefix = 'contrail',
- elements = network_ipam_mgmt_xml)
-
- if (existing_metas and 'network-ipam-mgmt' in existing_metas and
- str(existing_metas['network-ipam-mgmt'][0]['meta']) == str(meta)):
- # no change
- pass
- else:
- self._update_id_self_meta(update, meta)
-
- field = obj_dict.get('id_perms', None)
- if field is not None:
- # construct object of xsd-type and get its xml repr
- field = obj_dict['id_perms']
- buf = cStringIO.StringIO()
- # perms might be inserted at server as obj.
- # obj construction diff from dict construction.
- if isinstance(field, dict):
- IdPermsType(**field).exportChildren(buf, level = 1, name_ = 'id-perms', pretty_print = False)
- else: # object
- field.exportChildren(buf, level = 1, name_ = 'id-perms', pretty_print = False)
- id_perms_xml = buf.getvalue()
- buf.close()
- meta = Metadata('id-perms' , '',
- {'ifmap-cardinality':'singleValue'}, ns_prefix = 'contrail',
- elements = id_perms_xml)
-
- if (existing_metas and 'id-perms' in existing_metas and
- str(existing_metas['id-perms'][0]['meta']) == str(meta)):
- # no change
- pass
- else:
- self._update_id_self_meta(update, meta)
-
- field = obj_dict.get('display_name', None)
- if field is not None:
- norm_str = escape(str(obj_dict['display_name']))
- meta = Metadata('display-name', norm_str,
- {'ifmap-cardinality':'singleValue'}, ns_prefix = 'contrail')
-
- if (existing_metas and 'display-name' in existing_metas and
- str(existing_metas['display-name'][0]['meta']) == str(meta)):
- # no change
- pass
- else:
- self._update_id_self_meta(update, meta)
-
- # Ref Link Metas
- imid = self._imid_handler
-
- # construct object of xsd-type and get its xml repr
- refs = obj_dict.get('virtual_DNS_refs', None)
- if refs:
- for ref in refs:
- ref_fq_name = ref['to']
- obj_type = 'virtual-DNS'
- to_imid = cfgm_common.imid.get_ifmap_id_from_fq_name(obj_type, ref_fq_name)
- network_ipam_virtual_DNS_xml = ''
- meta = Metadata('network-ipam-virtual-DNS' , '',
- {'ifmap-cardinality':'singleValue'}, ns_prefix = 'contrail',
- elements = network_ipam_virtual_DNS_xml)
- self._update_id_pair_meta(update, to_imid, meta)
-
-
- self._publish_update(my_imid, update)
- return (True, '')
- #end _ifmap_network_ipam_set
-
- def _ifmap_network_ipam_create(self, obj_ids, obj_dict):
- if not 'parent_type' in obj_dict:
- # parent is config-root
- parent_type = 'config-root'
- parent_imid = 'contrail:config-root:root'
- else:
- parent_type = obj_dict['parent_type']
- parent_imid = obj_ids.get('parent_imid', None)
-
- # Parent Link Meta
- update = {}
- parent_link_meta = self._parent_metas[parent_type]['network-ipam']
- meta = Metadata(parent_link_meta, '',
- {'ifmap-cardinality':'singleValue'}, ns_prefix = 'contrail')
- self._update_id_pair_meta(update, obj_ids['imid'], meta)
- self._publish_update(parent_imid, update)
-
- (ok, result) = self._ifmap_network_ipam_set(obj_ids['imid'], None, obj_dict)
- return (ok, result)
- #end _ifmap_network_ipam_create
-
-
- def _ifmap_network_ipam_read_to_meta_index(self, ifmap_id, field_names = None):
- # field_names = None means all fields will be read
- imid = self._imid_handler
- start_id = str(Identity(name = ifmap_id, type = 'other',
- other_type = 'extended'))
- # if id-perms missing, identity doesn't exist
-
- all_metas = [u'network-ipam-virtual-DNS', u'virtual-network-network-ipam', u'network-ipam-mgmt', u'id-perms', u'display-name']
- if not field_names:
- metas_to_read = all_metas
- else: # read only requested fields
- metas_to_read = set(all_metas) & set(field_names.keys())
-
- # metas is a dict where key is meta-name and val is list of dict
- # of form [{'meta':meta}, {'id':id1, 'meta':meta}, {'id':id2, 'meta':meta}]
- metas = {}
- for meta_name in metas_to_read:
- if meta_name in self._id_to_metas[ifmap_id]:
- metas[meta_name] = self._id_to_metas[ifmap_id][meta_name]
- return metas
- #end _ifmap_network-ipam_read_to_meta_index
-
- def _ifmap_network_ipam_update(self, ifmap_id, new_obj_dict):
- # read in refs from ifmap to determine which ones become inactive after update
- existing_metas = self._ifmap_network_ipam_read_to_meta_index(ifmap_id)
-
- # remove properties that are no longer active
- props = ['network-ipam-mgmt', 'id-perms', 'display-name']
- for prop in props:
- prop_m = prop.replace('-', '_')
- if prop in existing_metas and prop_m not in new_obj_dict:
- self._delete_id_self_meta(ifmap_id, 'contrail:'+prop)
- # remove refs that are no longer active
- delete_list = []
- refs = {'network-ipam-virtual-DNS': 'virtual-DNS'}
- for meta, to_name in refs.items():
- old_set = set([m['id'] for m in existing_metas.get(meta, [])])
- new_set = set()
- to_name_m = to_name.replace('-', '_')
- for ref in new_obj_dict.get(to_name_m+'_refs', []):
- to_imid = self.fq_name_to_ifmap_id(to_name, ref['to'])
- new_set.add(to_imid)
-
- for inact_ref in old_set - new_set:
- delete_list.append((inact_ref, 'contrail:'+meta))
-
- if delete_list:
- self._delete_id_pair_meta_list(ifmap_id, delete_list)
-
- (ok, result) = self._ifmap_network_ipam_set(ifmap_id, existing_metas, new_obj_dict)
- return (ok, result)
- #end _ifmap_network_ipam_update
-
- def _ifmap_network_ipam_delete(self, obj_ids):
- ifmap_id = obj_ids['imid']
- parent_imid = obj_ids.get('parent_imid', None)
- existing_metas = self._ifmap_network_ipam_read_to_meta_index(ifmap_id)
- meta_list = []
- for meta_name, meta_infos in existing_metas.items():
- for meta_info in meta_infos:
- ref_imid = meta_info.get('id')
- if ref_imid is None:
- continue
- meta_list.append((ref_imid, 'contrail:'+meta_name))
-
- if parent_imid:
- # Remove link from parent
- meta_list.append((parent_imid, None))
-
- if meta_list:
- self._delete_id_pair_meta_list(ifmap_id, meta_list)
-
- # Remove all property metadata associated with this ident
- self._delete_id_self_meta(ifmap_id, None)
-
- return (True, '')
- #end _ifmap_network_ipam_delete
-
- def _ifmap_logical_router_alloc(self, parent_type, fq_name):
- imid = self._imid_handler
- (my_imid, parent_imid) = \
- imid.logical_router_alloc_ifmap_id(parent_type, fq_name)
- if my_imid is None or parent_imid is None:
- return (False, (my_imid, parent_imid))
- return (True, (my_imid, parent_imid))
- #end _ifmap_logical_router_alloc
-
- def _ifmap_logical_router_set(self, my_imid, existing_metas, obj_dict):
- # Properties Meta
- update = {}
- field = obj_dict.get('id_perms', None)
- if field is not None:
- # construct object of xsd-type and get its xml repr
- field = obj_dict['id_perms']
- buf = cStringIO.StringIO()
- # perms might be inserted at server as obj.
- # obj construction diff from dict construction.
- if isinstance(field, dict):
- IdPermsType(**field).exportChildren(buf, level = 1, name_ = 'id-perms', pretty_print = False)
- else: # object
- field.exportChildren(buf, level = 1, name_ = 'id-perms', pretty_print = False)
- id_perms_xml = buf.getvalue()
- buf.close()
- meta = Metadata('id-perms' , '',
- {'ifmap-cardinality':'singleValue'}, ns_prefix = 'contrail',
- elements = id_perms_xml)
-
- if (existing_metas and 'id-perms' in existing_metas and
- str(existing_metas['id-perms'][0]['meta']) == str(meta)):
- # no change
- pass
- else:
- self._update_id_self_meta(update, meta)
-
- field = obj_dict.get('display_name', None)
- if field is not None:
- norm_str = escape(str(obj_dict['display_name']))
- meta = Metadata('display-name', norm_str,
- {'ifmap-cardinality':'singleValue'}, ns_prefix = 'contrail')
-
- if (existing_metas and 'display-name' in existing_metas and
- str(existing_metas['display-name'][0]['meta']) == str(meta)):
- # no change
- pass
- else:
- self._update_id_self_meta(update, meta)
-
- # Ref Link Metas
- imid = self._imid_handler
-
- # construct object of xsd-type and get its xml repr
- refs = obj_dict.get('virtual_machine_interface_refs', None)
- if refs:
- for ref in refs:
- ref_fq_name = ref['to']
- obj_type = 'virtual-machine-interface'
- to_imid = cfgm_common.imid.get_ifmap_id_from_fq_name(obj_type, ref_fq_name)
- logical_router_interface_xml = ''
- meta = Metadata('logical-router-interface' , '',
- {'ifmap-cardinality':'singleValue'}, ns_prefix = 'contrail',
- elements = logical_router_interface_xml)
- self._update_id_pair_meta(update, to_imid, meta)
-
- # construct object of xsd-type and get its xml repr
- refs = obj_dict.get('route_target_refs', None)
- if refs:
- for ref in refs:
- ref_fq_name = ref['to']
- obj_type = 'route-target'
- to_imid = cfgm_common.imid.get_ifmap_id_from_fq_name(obj_type, ref_fq_name)
- logical_router_target_xml = ''
- meta = Metadata('logical-router-target' , '',
- {'ifmap-cardinality':'singleValue'}, ns_prefix = 'contrail',
- elements = logical_router_target_xml)
- self._update_id_pair_meta(update, to_imid, meta)
-
- # construct object of xsd-type and get its xml repr
- refs = obj_dict.get('virtual_network_refs', None)
- if refs:
- for ref in refs:
- ref_fq_name = ref['to']
- obj_type = 'virtual-network'
- to_imid = cfgm_common.imid.get_ifmap_id_from_fq_name(obj_type, ref_fq_name)
- logical_router_gateway_xml = ''
- meta = Metadata('logical-router-gateway' , '',
- {'ifmap-cardinality':'singleValue'}, ns_prefix = 'contrail',
- elements = logical_router_gateway_xml)
- self._update_id_pair_meta(update, to_imid, meta)
-
- # construct object of xsd-type and get its xml repr
- refs = obj_dict.get('service_instance_refs', None)
- if refs:
- for ref in refs:
- ref_fq_name = ref['to']
- obj_type = 'service-instance'
- to_imid = cfgm_common.imid.get_ifmap_id_from_fq_name(obj_type, ref_fq_name)
- logical_router_service_instance_xml = ''
- meta = Metadata('logical-router-service-instance' , '',
- {'ifmap-cardinality':'singleValue'}, ns_prefix = 'contrail',
- elements = logical_router_service_instance_xml)
- self._update_id_pair_meta(update, to_imid, meta)
-
-
- self._publish_update(my_imid, update)
- return (True, '')
- #end _ifmap_logical_router_set
-
- def _ifmap_logical_router_create(self, obj_ids, obj_dict):
- if not 'parent_type' in obj_dict:
- # parent is config-root
- parent_type = 'config-root'
- parent_imid = 'contrail:config-root:root'
- else:
- parent_type = obj_dict['parent_type']
- parent_imid = obj_ids.get('parent_imid', None)
-
- # Parent Link Meta
- update = {}
- parent_link_meta = self._parent_metas[parent_type]['logical-router']
- meta = Metadata(parent_link_meta, '',
- {'ifmap-cardinality':'singleValue'}, ns_prefix = 'contrail')
- self._update_id_pair_meta(update, obj_ids['imid'], meta)
- self._publish_update(parent_imid, update)
-
- (ok, result) = self._ifmap_logical_router_set(obj_ids['imid'], None, obj_dict)
- return (ok, result)
- #end _ifmap_logical_router_create
-
-
- def _ifmap_logical_router_read_to_meta_index(self, ifmap_id, field_names = None):
- # field_names = None means all fields will be read
- imid = self._imid_handler
- start_id = str(Identity(name = ifmap_id, type = 'other',
- other_type = 'extended'))
- # if id-perms missing, identity doesn't exist
-
- all_metas = [u'logical-router-interface', u'logical-router-target', u'logical-router-gateway', u'logical-router-service-instance', u'id-perms', u'display-name']
- if not field_names:
- metas_to_read = all_metas
- else: # read only requested fields
- metas_to_read = set(all_metas) & set(field_names.keys())
-
- # metas is a dict where key is meta-name and val is list of dict
- # of form [{'meta':meta}, {'id':id1, 'meta':meta}, {'id':id2, 'meta':meta}]
- metas = {}
- for meta_name in metas_to_read:
- if meta_name in self._id_to_metas[ifmap_id]:
- metas[meta_name] = self._id_to_metas[ifmap_id][meta_name]
- return metas
- #end _ifmap_logical-router_read_to_meta_index
-
- def _ifmap_logical_router_update(self, ifmap_id, new_obj_dict):
- # read in refs from ifmap to determine which ones become inactive after update
- existing_metas = self._ifmap_logical_router_read_to_meta_index(ifmap_id)
-
- # remove properties that are no longer active
- props = ['id-perms', 'display-name']
- for prop in props:
- prop_m = prop.replace('-', '_')
- if prop in existing_metas and prop_m not in new_obj_dict:
- self._delete_id_self_meta(ifmap_id, 'contrail:'+prop)
- # remove refs that are no longer active
- delete_list = []
- refs = {'logical-router-interface': 'virtual-machine-interface',
- 'logical-router-target': 'route-target',
- 'logical-router-gateway': 'virtual-network',
- 'logical-router-service-instance': 'service-instance'}
- for meta, to_name in refs.items():
- old_set = set([m['id'] for m in existing_metas.get(meta, [])])
- new_set = set()
- to_name_m = to_name.replace('-', '_')
- for ref in new_obj_dict.get(to_name_m+'_refs', []):
- to_imid = self.fq_name_to_ifmap_id(to_name, ref['to'])
- new_set.add(to_imid)
-
- for inact_ref in old_set - new_set:
- delete_list.append((inact_ref, 'contrail:'+meta))
-
- if delete_list:
- self._delete_id_pair_meta_list(ifmap_id, delete_list)
-
- (ok, result) = self._ifmap_logical_router_set(ifmap_id, existing_metas, new_obj_dict)
- return (ok, result)
- #end _ifmap_logical_router_update
-
- def _ifmap_logical_router_delete(self, obj_ids):
- ifmap_id = obj_ids['imid']
- parent_imid = obj_ids.get('parent_imid', None)
- existing_metas = self._ifmap_logical_router_read_to_meta_index(ifmap_id)
- meta_list = []
- for meta_name, meta_infos in existing_metas.items():
- for meta_info in meta_infos:
- ref_imid = meta_info.get('id')
- if ref_imid is None:
- continue
- meta_list.append((ref_imid, 'contrail:'+meta_name))
-
- if parent_imid:
- # Remove link from parent
- meta_list.append((parent_imid, None))
-
- if meta_list:
- self._delete_id_pair_meta_list(ifmap_id, meta_list)
-
- # Remove all property metadata associated with this ident
- self._delete_id_self_meta(ifmap_id, None)
-
- return (True, '')
- #end _ifmap_logical_router_delete
-
-#end class VncIfmapClientGen
-
-class ImidGen(object):
- def domain_alloc_ifmap_id(self, parent_type, fq_name):
- my_fqn = ':'.join(fq_name)
- parent_fqn = ':'.join(fq_name[:-1])
-
- my_imid = 'contrail:domain:' + my_fqn
- if parent_fqn:
- if parent_type is None:
- return (None, None)
- parent_imid = 'contrail:' + parent_type + ':' + parent_fqn
- else: # parent is config-root
- parent_imid = 'contrail:config-root:root'
-
- # Normalize/escape special chars
- my_imid = escape(my_imid)
- parent_imid = escape(parent_imid)
-
- return (my_imid, parent_imid)
- #end domain_alloc_ifmap_id
-
- def global_vrouter_config_alloc_ifmap_id(self, parent_type, fq_name):
- my_fqn = ':'.join(fq_name)
- parent_fqn = ':'.join(fq_name[:-1])
-
- my_imid = 'contrail:global-vrouter-config:' + my_fqn
- if parent_fqn:
- if parent_type is None:
- return (None, None)
- parent_imid = 'contrail:' + parent_type + ':' + parent_fqn
- else: # parent is config-root
- parent_imid = 'contrail:config-root:root'
-
- # Normalize/escape special chars
- my_imid = escape(my_imid)
- parent_imid = escape(parent_imid)
-
- return (my_imid, parent_imid)
- #end global_vrouter_config_alloc_ifmap_id
-
- def instance_ip_alloc_ifmap_id(self, parent_type, fq_name):
- my_fqn = ':'.join(fq_name)
- parent_fqn = ':'.join(fq_name[:-1])
-
- my_imid = 'contrail:instance-ip:' + my_fqn
- if parent_fqn:
- if parent_type is None:
- return (None, None)
- parent_imid = 'contrail:' + parent_type + ':' + parent_fqn
- else: # parent is config-root
- parent_imid = 'contrail:config-root:root'
-
- # Normalize/escape special chars
- my_imid = escape(my_imid)
- parent_imid = escape(parent_imid)
-
- return (my_imid, parent_imid)
- #end instance_ip_alloc_ifmap_id
-
- def network_policy_alloc_ifmap_id(self, parent_type, fq_name):
- my_fqn = ':'.join(fq_name)
- parent_fqn = ':'.join(fq_name[:-1])
-
- my_imid = 'contrail:network-policy:' + my_fqn
- if parent_fqn:
- if parent_type is None:
- return (None, None)
- parent_imid = 'contrail:' + parent_type + ':' + parent_fqn
- else: # parent is config-root
- parent_imid = 'contrail:config-root:root'
-
- # Normalize/escape special chars
- my_imid = escape(my_imid)
- parent_imid = escape(parent_imid)
-
- return (my_imid, parent_imid)
- #end network_policy_alloc_ifmap_id
-
- def loadbalancer_pool_alloc_ifmap_id(self, parent_type, fq_name):
- my_fqn = ':'.join(fq_name)
- parent_fqn = ':'.join(fq_name[:-1])
-
- my_imid = 'contrail:loadbalancer-pool:' + my_fqn
- if parent_fqn:
- if parent_type is None:
- return (None, None)
- parent_imid = 'contrail:' + parent_type + ':' + parent_fqn
- else: # parent is config-root
- parent_imid = 'contrail:config-root:root'
-
- # Normalize/escape special chars
- my_imid = escape(my_imid)
- parent_imid = escape(parent_imid)
-
- return (my_imid, parent_imid)
- #end loadbalancer_pool_alloc_ifmap_id
-
- def virtual_DNS_record_alloc_ifmap_id(self, parent_type, fq_name):
- my_fqn = ':'.join(fq_name)
- parent_fqn = ':'.join(fq_name[:-1])
-
- my_imid = 'contrail:virtual-DNS-record:' + my_fqn
- if parent_fqn:
- if parent_type is None:
- return (None, None)
- parent_imid = 'contrail:' + parent_type + ':' + parent_fqn
- else: # parent is config-root
- parent_imid = 'contrail:config-root:root'
-
- # Normalize/escape special chars
- my_imid = escape(my_imid)
- parent_imid = escape(parent_imid)
-
- return (my_imid, parent_imid)
- #end virtual_DNS_record_alloc_ifmap_id
-
- def route_target_alloc_ifmap_id(self, parent_type, fq_name):
- my_fqn = ':'.join(fq_name)
- parent_fqn = ':'.join(fq_name[:-1])
-
- my_imid = 'contrail:route-target:' + my_fqn
- if parent_fqn:
- if parent_type is None:
- return (None, None)
- parent_imid = 'contrail:' + parent_type + ':' + parent_fqn
- else: # parent is config-root
- parent_imid = 'contrail:config-root:root'
-
- # Normalize/escape special chars
- my_imid = escape(my_imid)
- parent_imid = escape(parent_imid)
-
- return (my_imid, parent_imid)
- #end route_target_alloc_ifmap_id
-
- def floating_ip_alloc_ifmap_id(self, parent_type, fq_name):
- my_fqn = ':'.join(fq_name)
- parent_fqn = ':'.join(fq_name[:-1])
-
- my_imid = 'contrail:floating-ip:' + my_fqn
- if parent_fqn:
- if parent_type is None:
- return (None, None)
- parent_imid = 'contrail:' + parent_type + ':' + parent_fqn
- else: # parent is config-root
- parent_imid = 'contrail:config-root:root'
-
- # Normalize/escape special chars
- my_imid = escape(my_imid)
- parent_imid = escape(parent_imid)
-
- return (my_imid, parent_imid)
- #end floating_ip_alloc_ifmap_id
-
- def floating_ip_pool_alloc_ifmap_id(self, parent_type, fq_name):
- my_fqn = ':'.join(fq_name)
- parent_fqn = ':'.join(fq_name[:-1])
-
- my_imid = 'contrail:floating-ip-pool:' + my_fqn
- if parent_fqn:
- if parent_type is None:
- return (None, None)
- parent_imid = 'contrail:' + parent_type + ':' + parent_fqn
- else: # parent is config-root
- parent_imid = 'contrail:config-root:root'
-
- # Normalize/escape special chars
- my_imid = escape(my_imid)
- parent_imid = escape(parent_imid)
-
- return (my_imid, parent_imid)
- #end floating_ip_pool_alloc_ifmap_id
-
- def physical_router_alloc_ifmap_id(self, parent_type, fq_name):
- my_fqn = ':'.join(fq_name)
- parent_fqn = ':'.join(fq_name[:-1])
-
- my_imid = 'contrail:physical-router:' + my_fqn
- if parent_fqn:
- if parent_type is None:
- return (None, None)
- parent_imid = 'contrail:' + parent_type + ':' + parent_fqn
- else: # parent is config-root
- parent_imid = 'contrail:config-root:root'
-
- # Normalize/escape special chars
- my_imid = escape(my_imid)
- parent_imid = escape(parent_imid)
-
- return (my_imid, parent_imid)
- #end physical_router_alloc_ifmap_id
-
- def bgp_router_alloc_ifmap_id(self, parent_type, fq_name):
- my_fqn = ':'.join(fq_name)
- parent_fqn = ':'.join(fq_name[:-1])
-
- my_imid = 'contrail:bgp-router:' + my_fqn
- if parent_fqn:
- if parent_type is None:
- return (None, None)
- parent_imid = 'contrail:' + parent_type + ':' + parent_fqn
- else: # parent is config-root
- parent_imid = 'contrail:config-root:root'
-
- # Normalize/escape special chars
- my_imid = escape(my_imid)
- parent_imid = escape(parent_imid)
-
- return (my_imid, parent_imid)
- #end bgp_router_alloc_ifmap_id
-
- def virtual_router_alloc_ifmap_id(self, parent_type, fq_name):
- my_fqn = ':'.join(fq_name)
- parent_fqn = ':'.join(fq_name[:-1])
-
- my_imid = 'contrail:virtual-router:' + my_fqn
- if parent_fqn:
- if parent_type is None:
- return (None, None)
- parent_imid = 'contrail:' + parent_type + ':' + parent_fqn
- else: # parent is config-root
- parent_imid = 'contrail:config-root:root'
-
- # Normalize/escape special chars
- my_imid = escape(my_imid)
- parent_imid = escape(parent_imid)
-
- return (my_imid, parent_imid)
- #end virtual_router_alloc_ifmap_id
-
- def config_root_alloc_ifmap_id(self, parent_type, fq_name):
- my_fqn = ':'.join(fq_name)
- parent_fqn = ':'.join(fq_name[:-1])
-
- my_imid = 'contrail:config-root:' + my_fqn
- if parent_fqn:
- if parent_type is None:
- return (None, None)
- parent_imid = 'contrail:' + parent_type + ':' + parent_fqn
- else: # parent is config-root
- parent_imid = 'contrail:config-root:root'
-
- # Normalize/escape special chars
- my_imid = escape(my_imid)
- parent_imid = escape(parent_imid)
-
- return (my_imid, parent_imid)
- #end config_root_alloc_ifmap_id
-
- def subnet_alloc_ifmap_id(self, parent_type, fq_name):
- my_fqn = ':'.join(fq_name)
- parent_fqn = ':'.join(fq_name[:-1])
-
- my_imid = 'contrail:subnet:' + my_fqn
- if parent_fqn:
- if parent_type is None:
- return (None, None)
- parent_imid = 'contrail:' + parent_type + ':' + parent_fqn
- else: # parent is config-root
- parent_imid = 'contrail:config-root:root'
-
- # Normalize/escape special chars
- my_imid = escape(my_imid)
- parent_imid = escape(parent_imid)
-
- return (my_imid, parent_imid)
- #end subnet_alloc_ifmap_id
-
- def global_system_config_alloc_ifmap_id(self, parent_type, fq_name):
- my_fqn = ':'.join(fq_name)
- parent_fqn = ':'.join(fq_name[:-1])
-
- my_imid = 'contrail:global-system-config:' + my_fqn
- if parent_fqn:
- if parent_type is None:
- return (None, None)
- parent_imid = 'contrail:' + parent_type + ':' + parent_fqn
- else: # parent is config-root
- parent_imid = 'contrail:config-root:root'
-
- # Normalize/escape special chars
- my_imid = escape(my_imid)
- parent_imid = escape(parent_imid)
-
- return (my_imid, parent_imid)
- #end global_system_config_alloc_ifmap_id
-
- def service_appliance_alloc_ifmap_id(self, parent_type, fq_name):
- my_fqn = ':'.join(fq_name)
- parent_fqn = ':'.join(fq_name[:-1])
-
- my_imid = 'contrail:service-appliance:' + my_fqn
- if parent_fqn:
- if parent_type is None:
- return (None, None)
- parent_imid = 'contrail:' + parent_type + ':' + parent_fqn
- else: # parent is config-root
- parent_imid = 'contrail:config-root:root'
-
- # Normalize/escape special chars
- my_imid = escape(my_imid)
- parent_imid = escape(parent_imid)
-
- return (my_imid, parent_imid)
- #end service_appliance_alloc_ifmap_id
-
- def service_instance_alloc_ifmap_id(self, parent_type, fq_name):
- my_fqn = ':'.join(fq_name)
- parent_fqn = ':'.join(fq_name[:-1])
-
- my_imid = 'contrail:service-instance:' + my_fqn
- if parent_fqn:
- if parent_type is None:
- return (None, None)
- parent_imid = 'contrail:' + parent_type + ':' + parent_fqn
- else: # parent is config-root
- parent_imid = 'contrail:config-root:root'
-
- # Normalize/escape special chars
- my_imid = escape(my_imid)
- parent_imid = escape(parent_imid)
-
- return (my_imid, parent_imid)
- #end service_instance_alloc_ifmap_id
-
- def namespace_alloc_ifmap_id(self, parent_type, fq_name):
- my_fqn = ':'.join(fq_name)
- parent_fqn = ':'.join(fq_name[:-1])
-
- my_imid = 'contrail:namespace:' + my_fqn
- if parent_fqn:
- if parent_type is None:
- return (None, None)
- parent_imid = 'contrail:' + parent_type + ':' + parent_fqn
- else: # parent is config-root
- parent_imid = 'contrail:config-root:root'
-
- # Normalize/escape special chars
- my_imid = escape(my_imid)
- parent_imid = escape(parent_imid)
-
- return (my_imid, parent_imid)
- #end namespace_alloc_ifmap_id
-
- def logical_interface_alloc_ifmap_id(self, parent_type, fq_name):
- my_fqn = ':'.join(fq_name)
- parent_fqn = ':'.join(fq_name[:-1])
-
- my_imid = 'contrail:logical-interface:' + my_fqn
- if parent_fqn:
- if parent_type is None:
- return (None, None)
- parent_imid = 'contrail:' + parent_type + ':' + parent_fqn
- else: # parent is config-root
- parent_imid = 'contrail:config-root:root'
-
- # Normalize/escape special chars
- my_imid = escape(my_imid)
- parent_imid = escape(parent_imid)
-
- return (my_imid, parent_imid)
- #end logical_interface_alloc_ifmap_id
-
- def route_table_alloc_ifmap_id(self, parent_type, fq_name):
- my_fqn = ':'.join(fq_name)
- parent_fqn = ':'.join(fq_name[:-1])
-
- my_imid = 'contrail:route-table:' + my_fqn
- if parent_fqn:
- if parent_type is None:
- return (None, None)
- parent_imid = 'contrail:' + parent_type + ':' + parent_fqn
- else: # parent is config-root
- parent_imid = 'contrail:config-root:root'
-
- # Normalize/escape special chars
- my_imid = escape(my_imid)
- parent_imid = escape(parent_imid)
-
- return (my_imid, parent_imid)
- #end route_table_alloc_ifmap_id
-
- def physical_interface_alloc_ifmap_id(self, parent_type, fq_name):
- my_fqn = ':'.join(fq_name)
- parent_fqn = ':'.join(fq_name[:-1])
-
- my_imid = 'contrail:physical-interface:' + my_fqn
- if parent_fqn:
- if parent_type is None:
- return (None, None)
- parent_imid = 'contrail:' + parent_type + ':' + parent_fqn
- else: # parent is config-root
- parent_imid = 'contrail:config-root:root'
-
- # Normalize/escape special chars
- my_imid = escape(my_imid)
- parent_imid = escape(parent_imid)
-
- return (my_imid, parent_imid)
- #end physical_interface_alloc_ifmap_id
-
- def access_control_list_alloc_ifmap_id(self, parent_type, fq_name):
- my_fqn = ':'.join(fq_name)
- parent_fqn = ':'.join(fq_name[:-1])
-
- my_imid = 'contrail:access-control-list:' + my_fqn
- if parent_fqn:
- if parent_type is None:
- return (None, None)
- parent_imid = 'contrail:' + parent_type + ':' + parent_fqn
- else: # parent is config-root
- parent_imid = 'contrail:config-root:root'
-
- # Normalize/escape special chars
- my_imid = escape(my_imid)
- parent_imid = escape(parent_imid)
-
- return (my_imid, parent_imid)
- #end access_control_list_alloc_ifmap_id
-
- def analytics_node_alloc_ifmap_id(self, parent_type, fq_name):
- my_fqn = ':'.join(fq_name)
- parent_fqn = ':'.join(fq_name[:-1])
-
- my_imid = 'contrail:analytics-node:' + my_fqn
- if parent_fqn:
- if parent_type is None:
- return (None, None)
- parent_imid = 'contrail:' + parent_type + ':' + parent_fqn
- else: # parent is config-root
- parent_imid = 'contrail:config-root:root'
-
- # Normalize/escape special chars
- my_imid = escape(my_imid)
- parent_imid = escape(parent_imid)
-
- return (my_imid, parent_imid)
- #end analytics_node_alloc_ifmap_id
-
- def virtual_DNS_alloc_ifmap_id(self, parent_type, fq_name):
- my_fqn = ':'.join(fq_name)
- parent_fqn = ':'.join(fq_name[:-1])
-
- my_imid = 'contrail:virtual-DNS:' + my_fqn
- if parent_fqn:
- if parent_type is None:
- return (None, None)
- parent_imid = 'contrail:' + parent_type + ':' + parent_fqn
- else: # parent is config-root
- parent_imid = 'contrail:config-root:root'
-
- # Normalize/escape special chars
- my_imid = escape(my_imid)
- parent_imid = escape(parent_imid)
-
- return (my_imid, parent_imid)
- #end virtual_DNS_alloc_ifmap_id
-
- def customer_attachment_alloc_ifmap_id(self, parent_type, fq_name):
- my_fqn = ':'.join(fq_name)
- parent_fqn = ':'.join(fq_name[:-1])
-
- my_imid = 'contrail:customer-attachment:' + my_fqn
- if parent_fqn:
- if parent_type is None:
- return (None, None)
- parent_imid = 'contrail:' + parent_type + ':' + parent_fqn
- else: # parent is config-root
- parent_imid = 'contrail:config-root:root'
-
- # Normalize/escape special chars
- my_imid = escape(my_imid)
- parent_imid = escape(parent_imid)
-
- return (my_imid, parent_imid)
- #end customer_attachment_alloc_ifmap_id
-
- def service_appliance_set_alloc_ifmap_id(self, parent_type, fq_name):
- my_fqn = ':'.join(fq_name)
- parent_fqn = ':'.join(fq_name[:-1])
-
- my_imid = 'contrail:service-appliance-set:' + my_fqn
- if parent_fqn:
- if parent_type is None:
- return (None, None)
- parent_imid = 'contrail:' + parent_type + ':' + parent_fqn
- else: # parent is config-root
- parent_imid = 'contrail:config-root:root'
-
- # Normalize/escape special chars
- my_imid = escape(my_imid)
- parent_imid = escape(parent_imid)
-
- return (my_imid, parent_imid)
- #end service_appliance_set_alloc_ifmap_id
-
- def config_node_alloc_ifmap_id(self, parent_type, fq_name):
- my_fqn = ':'.join(fq_name)
- parent_fqn = ':'.join(fq_name[:-1])
-
- my_imid = 'contrail:config-node:' + my_fqn
- if parent_fqn:
- if parent_type is None:
- return (None, None)
- parent_imid = 'contrail:' + parent_type + ':' + parent_fqn
- else: # parent is config-root
- parent_imid = 'contrail:config-root:root'
-
- # Normalize/escape special chars
- my_imid = escape(my_imid)
- parent_imid = escape(parent_imid)
-
- return (my_imid, parent_imid)
- #end config_node_alloc_ifmap_id
-
- def qos_queue_alloc_ifmap_id(self, parent_type, fq_name):
- my_fqn = ':'.join(fq_name)
- parent_fqn = ':'.join(fq_name[:-1])
-
- my_imid = 'contrail:qos-queue:' + my_fqn
- if parent_fqn:
- if parent_type is None:
- return (None, None)
- parent_imid = 'contrail:' + parent_type + ':' + parent_fqn
- else: # parent is config-root
- parent_imid = 'contrail:config-root:root'
-
- # Normalize/escape special chars
- my_imid = escape(my_imid)
- parent_imid = escape(parent_imid)
-
- return (my_imid, parent_imid)
- #end qos_queue_alloc_ifmap_id
-
- def virtual_machine_alloc_ifmap_id(self, parent_type, fq_name):
- my_fqn = ':'.join(fq_name)
- parent_fqn = ':'.join(fq_name[:-1])
-
- my_imid = 'contrail:virtual-machine:' + my_fqn
- if parent_fqn:
- if parent_type is None:
- return (None, None)
- parent_imid = 'contrail:' + parent_type + ':' + parent_fqn
- else: # parent is config-root
- parent_imid = 'contrail:config-root:root'
-
- # Normalize/escape special chars
- my_imid = escape(my_imid)
- parent_imid = escape(parent_imid)
-
- return (my_imid, parent_imid)
- #end virtual_machine_alloc_ifmap_id
-
- def interface_route_table_alloc_ifmap_id(self, parent_type, fq_name):
- my_fqn = ':'.join(fq_name)
- parent_fqn = ':'.join(fq_name[:-1])
-
- my_imid = 'contrail:interface-route-table:' + my_fqn
- if parent_fqn:
- if parent_type is None:
- return (None, None)
- parent_imid = 'contrail:' + parent_type + ':' + parent_fqn
- else: # parent is config-root
- parent_imid = 'contrail:config-root:root'
-
- # Normalize/escape special chars
- my_imid = escape(my_imid)
- parent_imid = escape(parent_imid)
-
- return (my_imid, parent_imid)
- #end interface_route_table_alloc_ifmap_id
-
- def service_template_alloc_ifmap_id(self, parent_type, fq_name):
- my_fqn = ':'.join(fq_name)
- parent_fqn = ':'.join(fq_name[:-1])
-
- my_imid = 'contrail:service-template:' + my_fqn
- if parent_fqn:
- if parent_type is None:
- return (None, None)
- parent_imid = 'contrail:' + parent_type + ':' + parent_fqn
- else: # parent is config-root
- parent_imid = 'contrail:config-root:root'
-
- # Normalize/escape special chars
- my_imid = escape(my_imid)
- parent_imid = escape(parent_imid)
-
- return (my_imid, parent_imid)
- #end service_template_alloc_ifmap_id
-
- def virtual_ip_alloc_ifmap_id(self, parent_type, fq_name):
- my_fqn = ':'.join(fq_name)
- parent_fqn = ':'.join(fq_name[:-1])
-
- my_imid = 'contrail:virtual-ip:' + my_fqn
- if parent_fqn:
- if parent_type is None:
- return (None, None)
- parent_imid = 'contrail:' + parent_type + ':' + parent_fqn
- else: # parent is config-root
- parent_imid = 'contrail:config-root:root'
-
- # Normalize/escape special chars
- my_imid = escape(my_imid)
- parent_imid = escape(parent_imid)
-
- return (my_imid, parent_imid)
- #end virtual_ip_alloc_ifmap_id
-
- def loadbalancer_member_alloc_ifmap_id(self, parent_type, fq_name):
- my_fqn = ':'.join(fq_name)
- parent_fqn = ':'.join(fq_name[:-1])
-
- my_imid = 'contrail:loadbalancer-member:' + my_fqn
- if parent_fqn:
- if parent_type is None:
- return (None, None)
- parent_imid = 'contrail:' + parent_type + ':' + parent_fqn
- else: # parent is config-root
- parent_imid = 'contrail:config-root:root'
-
- # Normalize/escape special chars
- my_imid = escape(my_imid)
- parent_imid = escape(parent_imid)
-
- return (my_imid, parent_imid)
- #end loadbalancer_member_alloc_ifmap_id
-
- def security_group_alloc_ifmap_id(self, parent_type, fq_name):
- my_fqn = ':'.join(fq_name)
- parent_fqn = ':'.join(fq_name[:-1])
-
- my_imid = 'contrail:security-group:' + my_fqn
- if parent_fqn:
- if parent_type is None:
- return (None, None)
- parent_imid = 'contrail:' + parent_type + ':' + parent_fqn
- else: # parent is config-root
- parent_imid = 'contrail:config-root:root'
-
- # Normalize/escape special chars
- my_imid = escape(my_imid)
- parent_imid = escape(parent_imid)
-
- return (my_imid, parent_imid)
- #end security_group_alloc_ifmap_id
-
- def provider_attachment_alloc_ifmap_id(self, parent_type, fq_name):
- my_fqn = ':'.join(fq_name)
- parent_fqn = ':'.join(fq_name[:-1])
-
- my_imid = 'contrail:provider-attachment:' + my_fqn
- if parent_fqn:
- if parent_type is None:
- return (None, None)
- parent_imid = 'contrail:' + parent_type + ':' + parent_fqn
- else: # parent is config-root
- parent_imid = 'contrail:config-root:root'
-
- # Normalize/escape special chars
- my_imid = escape(my_imid)
- parent_imid = escape(parent_imid)
-
- return (my_imid, parent_imid)
- #end provider_attachment_alloc_ifmap_id
-
- def virtual_machine_interface_alloc_ifmap_id(self, parent_type, fq_name):
- my_fqn = ':'.join(fq_name)
- parent_fqn = ':'.join(fq_name[:-1])
-
- my_imid = 'contrail:virtual-machine-interface:' + my_fqn
- if parent_fqn:
- if parent_type is None:
- return (None, None)
- parent_imid = 'contrail:' + parent_type + ':' + parent_fqn
- else: # parent is config-root
- parent_imid = 'contrail:config-root:root'
-
- # Normalize/escape special chars
- my_imid = escape(my_imid)
- parent_imid = escape(parent_imid)
-
- return (my_imid, parent_imid)
- #end virtual_machine_interface_alloc_ifmap_id
-
- def loadbalancer_healthmonitor_alloc_ifmap_id(self, parent_type, fq_name):
- my_fqn = ':'.join(fq_name)
- parent_fqn = ':'.join(fq_name[:-1])
-
- my_imid = 'contrail:loadbalancer-healthmonitor:' + my_fqn
- if parent_fqn:
- if parent_type is None:
- return (None, None)
- parent_imid = 'contrail:' + parent_type + ':' + parent_fqn
- else: # parent is config-root
- parent_imid = 'contrail:config-root:root'
-
- # Normalize/escape special chars
- my_imid = escape(my_imid)
- parent_imid = escape(parent_imid)
-
- return (my_imid, parent_imid)
- #end loadbalancer_healthmonitor_alloc_ifmap_id
-
- def virtual_network_alloc_ifmap_id(self, parent_type, fq_name):
- my_fqn = ':'.join(fq_name)
- parent_fqn = ':'.join(fq_name[:-1])
-
- my_imid = 'contrail:virtual-network:' + my_fqn
- if parent_fqn:
- if parent_type is None:
- return (None, None)
- parent_imid = 'contrail:' + parent_type + ':' + parent_fqn
- else: # parent is config-root
- parent_imid = 'contrail:config-root:root'
-
- # Normalize/escape special chars
- my_imid = escape(my_imid)
- parent_imid = escape(parent_imid)
-
- return (my_imid, parent_imid)
- #end virtual_network_alloc_ifmap_id
-
- def project_alloc_ifmap_id(self, parent_type, fq_name):
- my_fqn = ':'.join(fq_name)
- parent_fqn = ':'.join(fq_name[:-1])
-
- my_imid = 'contrail:project:' + my_fqn
- if parent_fqn:
- if parent_type is None:
- return (None, None)
- parent_imid = 'contrail:' + parent_type + ':' + parent_fqn
- else: # parent is config-root
- parent_imid = 'contrail:config-root:root'
-
- # Normalize/escape special chars
- my_imid = escape(my_imid)
- parent_imid = escape(parent_imid)
-
- return (my_imid, parent_imid)
- #end project_alloc_ifmap_id
-
- def qos_forwarding_class_alloc_ifmap_id(self, parent_type, fq_name):
- my_fqn = ':'.join(fq_name)
- parent_fqn = ':'.join(fq_name[:-1])
-
- my_imid = 'contrail:qos-forwarding-class:' + my_fqn
- if parent_fqn:
- if parent_type is None:
- return (None, None)
- parent_imid = 'contrail:' + parent_type + ':' + parent_fqn
- else: # parent is config-root
- parent_imid = 'contrail:config-root:root'
-
- # Normalize/escape special chars
- my_imid = escape(my_imid)
- parent_imid = escape(parent_imid)
-
- return (my_imid, parent_imid)
- #end qos_forwarding_class_alloc_ifmap_id
-
- def database_node_alloc_ifmap_id(self, parent_type, fq_name):
- my_fqn = ':'.join(fq_name)
- parent_fqn = ':'.join(fq_name[:-1])
-
- my_imid = 'contrail:database-node:' + my_fqn
- if parent_fqn:
- if parent_type is None:
- return (None, None)
- parent_imid = 'contrail:' + parent_type + ':' + parent_fqn
- else: # parent is config-root
- parent_imid = 'contrail:config-root:root'
-
- # Normalize/escape special chars
- my_imid = escape(my_imid)
- parent_imid = escape(parent_imid)
-
- return (my_imid, parent_imid)
- #end database_node_alloc_ifmap_id
-
- def routing_instance_alloc_ifmap_id(self, parent_type, fq_name):
- my_fqn = ':'.join(fq_name)
- parent_fqn = ':'.join(fq_name[:-1])
-
- my_imid = 'contrail:routing-instance:' + my_fqn
- if parent_fqn:
- if parent_type is None:
- return (None, None)
- parent_imid = 'contrail:' + parent_type + ':' + parent_fqn
- else: # parent is config-root
- parent_imid = 'contrail:config-root:root'
-
- # Normalize/escape special chars
- my_imid = escape(my_imid)
- parent_imid = escape(parent_imid)
-
- return (my_imid, parent_imid)
- #end routing_instance_alloc_ifmap_id
-
- def network_ipam_alloc_ifmap_id(self, parent_type, fq_name):
- my_fqn = ':'.join(fq_name)
- parent_fqn = ':'.join(fq_name[:-1])
-
- my_imid = 'contrail:network-ipam:' + my_fqn
- if parent_fqn:
- if parent_type is None:
- return (None, None)
- parent_imid = 'contrail:' + parent_type + ':' + parent_fqn
- else: # parent is config-root
- parent_imid = 'contrail:config-root:root'
-
- # Normalize/escape special chars
- my_imid = escape(my_imid)
- parent_imid = escape(parent_imid)
-
- return (my_imid, parent_imid)
- #end network_ipam_alloc_ifmap_id
-
- def logical_router_alloc_ifmap_id(self, parent_type, fq_name):
- my_fqn = ':'.join(fq_name)
- parent_fqn = ':'.join(fq_name[:-1])
-
- my_imid = 'contrail:logical-router:' + my_fqn
- if parent_fqn:
- if parent_type is None:
- return (None, None)
- parent_imid = 'contrail:' + parent_type + ':' + parent_fqn
- else: # parent is config-root
- parent_imid = 'contrail:config-root:root'
-
- # Normalize/escape special chars
- my_imid = escape(my_imid)
- parent_imid = escape(parent_imid)
-
- return (my_imid, parent_imid)
- #end logical_router_alloc_ifmap_id
-
-
-link_name_to_xsd_type = {
- "project-namespace":"SubnetType",
- "connection":"ConnectionType",
- "bgp-peering":"BgpPeeringAttributes",
- "virtual-machine-interface-routing-instance":"PolicyBasedForwardingRuleType",
- "virtual-network-network-policy":"VirtualNetworkPolicyType",
- "instance-target":"InstanceTargetType",
- "virtual-network-network-ipam":"VnSubnetsType"
-}
-
diff --git a/Testcases/vnc_api/gen/vnc_ifmap_client_gen.pyc b/Testcases/vnc_api/gen/vnc_ifmap_client_gen.pyc
deleted file mode 100644
index b22e080..0000000
--- a/Testcases/vnc_api/gen/vnc_ifmap_client_gen.pyc
+++ /dev/null
Binary files differ
diff --git a/Testcases/vnc_api/vnc_api.py b/Testcases/vnc_api/vnc_api.py
deleted file mode 100644
index 33ab750..0000000
--- a/Testcases/vnc_api/vnc_api.py
+++ /dev/null
@@ -1,682 +0,0 @@
-#
-# Copyright (c) 2013 Juniper Networks, Inc. All rights reserved.
-#
-import logging
-import requests
-from requests.exceptions import ConnectionError
-
-import ConfigParser
-import pprint
-import json
-import sys
-import time
-import platform
-import __main__ as main
-
-import gen.resource_common
-from gen.resource_xsd import *
-from gen.resource_client import *
-from gen.vnc_api_client_gen import VncApiClientGen
-
-from cfgm_common import rest, utils
-from cfgm_common.exceptions import *
-
-from pprint import pformat
-
-def str_to_class(class_name):
- try:
- return reduce(getattr, class_name.split("."), sys.modules[__name__])
- except Exception as e:
- logger = logging.getLogger(__name__)
- logger.warn("Exception: %s", str(e))
- return None
-#end str_to_class
-
-
-def _read_cfg(cfg_parser, section, option, default):
- try:
- val = cfg_parser.get(section, option)
- except (AttributeError,
- ConfigParser.NoOptionError,
- ConfigParser.NoSectionError):
- val = default
-
- return val
-#end _read_cfg
-
-class ActionUriDict(dict):
- """Action uri dictionary with operator([]) overloading to parse home page
- and populate the action_uri, if not populated already.
- """
- def __init__(self, vnc_api, *args, **kwargs):
- dict.__init__(self, args, **kwargs)
- self.vnc_api = vnc_api
-
- def __getitem__(self, key):
- try:
- return dict.__getitem__(self, key)
- except KeyError:
- homepage = self.vnc_api._request(rest.OP_GET, self.vnc_api._base_url,
- retry_on_error=False)
- self.vnc_api._cfg_root_url = self.vnc_api._parse_homepage(homepage)
- return dict.__getitem__(self, key)
-
-
-class VncApi(VncApiClientGen):
- _DEFAULT_WEB_SERVER = "127.0.0.1"
-
- hostname = platform.node()
- _DEFAULT_HEADERS = {
- 'Content-type': 'application/json; charset="UTF-8"',
- 'X-Contrail-Useragent': '%s:%s'
- %(hostname, getattr(main, '__file__', '')),
- }
-
- _AUTHN_SUPPORTED_TYPES = ["keystone"]
- _DEFAULT_AUTHN_TYPE = "keystone"
- _DEFAULT_AUTHN_HEADERS = _DEFAULT_HEADERS
- _DEFAULT_AUTHN_PROTOCOL = "http"
- _DEFAULT_AUTHN_SERVER = _DEFAULT_WEB_SERVER
- _DEFAULT_AUTHN_PORT = 35357
- _DEFAULT_AUTHN_URL = "/v2.0/tokens"
- _DEFAULT_AUTHN_USER = ""
- _DEFAULT_AUTHN_PASSWORD = ""
- _DEFAULT_AUTHN_TENANT = VncApiClientGen._tenant_name
-
- # Connection to api-server through Quantum
- _DEFAULT_WEB_PORT = 8082
- _DEFAULT_BASE_URL = "/"
-
- # The number of items beyond which instead of GET /<collection>
- # a POST /list-bulk-collection is issued
- POST_FOR_LIST_THRESHOLD = 25
-
- def __init__(self, username=None, password=None, tenant_name=None,
- api_server_host='127.0.0.1', api_server_port='8082',
- api_server_url=None, conf_file=None, user_info=None,
- auth_token=None, auth_host=None, auth_port=None,
- auth_protocol = None, auth_url=None, auth_type=None,
- wait_for_connect=False):
- # TODO allow for username/password to be present in creds file
-
- super(VncApi, self).__init__(self._obj_serializer_diff)
-
- cfg_parser = ConfigParser.ConfigParser()
- try:
- cfg_parser.read(conf_file or
- "/etc/contrail/vnc_api_lib.ini")
- except Exception as e:
- logger = logging.getLogger(__name__)
- logger.warn("Exception: %s", str(e))
-
- # keystone
- self._authn_type = auth_type or \
- _read_cfg(cfg_parser, 'auth', 'AUTHN_TYPE',
- self._DEFAULT_AUTHN_TYPE)
-
- if self._authn_type == 'keystone':
- self._authn_protocol = auth_protocol or \
- _read_cfg(cfg_parser, 'auth', 'AUTHN_PROTOCOL',
- self._DEFAULT_AUTHN_PROTOCOL)
- self._authn_server = auth_host or \
- _read_cfg(cfg_parser, 'auth', 'AUTHN_SERVER',
- self._DEFAULT_AUTHN_SERVER)
- self._authn_port = auth_port or \
- _read_cfg(cfg_parser, 'auth', 'AUTHN_PORT',
- self._DEFAULT_AUTHN_PORT)
- self._authn_url = auth_url or \
- _read_cfg(cfg_parser, 'auth', 'AUTHN_URL',
- self._DEFAULT_AUTHN_URL)
- self._username = username or \
- _read_cfg(cfg_parser, 'auth', 'AUTHN_USER',
- self._DEFAULT_AUTHN_USER)
- self._password = password or \
- _read_cfg(cfg_parser, 'auth', 'AUTHN_PASSWORD',
- self._DEFAULT_AUTHN_PASSWORD)
- self._tenant_name = tenant_name or \
- _read_cfg(cfg_parser, 'auth', 'AUTHN_TENANT',
- self._DEFAULT_AUTHN_TENANT)
- self._authn_body = \
- '{"auth":{"passwordCredentials":{' + \
- '"username": "%s",' % (self._username) + \
- ' "password": "%s"},' % (self._password) + \
- ' "tenantName":"%s"}}' % (self._tenant_name)
- self._user_info = user_info
-
- if not api_server_host:
- self._web_host = _read_cfg(cfg_parser, 'global', 'WEB_SERVER',
- self._DEFAULT_WEB_SERVER)
- else:
- self._web_host = api_server_host
-
- if not api_server_port:
- self._web_port = _read_cfg(cfg_parser, 'global', 'WEB_PORT',
- self._DEFAULT_WEB_PORT)
- else:
- self._web_port = api_server_port
-
- # Where client's view of world begins
- if not api_server_url:
- self._base_url = _read_cfg(cfg_parser, 'global', 'BASE_URL',
- self._DEFAULT_BASE_URL)
- else:
- self._base_url = api_server_url
-
- # Where server says its root is when _base_url is fetched
- self._srv_root_url = None
-
- # Type-independent actions offered by server
- self._action_uri = ActionUriDict(self)
-
- self._headers = self._DEFAULT_HEADERS.copy()
- self._headers[rest.hdr_client_tenant()] = self._tenant_name
-
- self._auth_token_input = False
- self._auth_token = None
-
- if auth_token:
- self._auth_token = auth_token
- self._auth_token_input = True
- self._headers['X-AUTH-TOKEN'] = self._auth_token
-
- # user information for quantum
- if self._user_info:
- if 'user_id' in self._user_info:
- self._headers['X-API-USER-ID'] = self._user_info['user_id']
- if 'user' in self._user_info:
- self._headers['X-API-USER'] = self._user_info['user']
- if 'role' in self._user_info:
- self._headers['X-API-ROLE'] = self._user_info['role']
-
- #self._http = HTTPClient(self._web_host, self._web_port,
- # network_timeout = 300)
-
- self._create_api_server_session()
-
- retry_count = 6
- while retry_count:
- try:
- homepage = self._request(rest.OP_GET, self._base_url,
- retry_on_error=False)
- self._cfg_root_url = self._parse_homepage(homepage)
- except ServiceUnavailableError as e:
- logger = logging.getLogger(__name__)
- logger.warn("Exception: %s", str(e))
- if wait_for_connect:
- # Retry connect infinitely when http retcode 503
- continue
- elif retry_count:
- # Retry connect 60 times when http retcode 503
- retry_count -= 1
- time.sleep(1)
- else:
- # connected succesfully
- break
- #end __init__
-
- def _obj_serializer_diff(self, obj):
- if hasattr(obj, 'serialize_to_json'):
- return obj.serialize_to_json(obj.get_pending_updates())
- else:
- return dict((k, v) for k, v in obj.__dict__.iteritems())
- #end _obj_serializer_diff
-
- def _obj_serializer_all(self, obj):
- if hasattr(obj, 'serialize_to_json'):
- return obj.serialize_to_json()
- else:
- return dict((k, v) for k, v in obj.__dict__.iteritems())
- #end _obj_serializer_all
-
- def _create_api_server_session(self):
- self._api_server_session = requests.Session()
-
- adapter = requests.adapters.HTTPAdapter(pool_connections=100,
- pool_maxsize=100)
- self._api_server_session.mount("http://", adapter)
- self._api_server_session.mount("https://", adapter)
- #end _create_api_server_session
-
- # Authenticate with configured service
- def _authenticate(self, response=None, headers=None):
- if self._authn_type is None:
- return headers
- url = "%s://%s:%s%s" % (self._authn_protocol, self._authn_server, self._authn_port,
- self._authn_url)
- try:
- response = requests.post(url, data=self._authn_body,
- headers=self._DEFAULT_AUTHN_HEADERS)
- except Exception as e:
- raise RuntimeError('Unable to connect to keystone for authentication. Verify keystone server details')
-
- if response.status_code == 200:
- # plan is to re-issue original request with new token
- new_headers = headers or {}
- authn_content = json.loads(response.text)
- self._auth_token = authn_content['access']['token']['id']
- new_headers['X-AUTH-TOKEN'] = self._auth_token
- return new_headers
- else:
- raise RuntimeError('Authentication Failure')
- #end _authenticate
-
- def _http_get(self, uri, headers=None, query_params=None):
- url = "http://%s:%s%s" \
- % (self._web_host, self._web_port, uri)
- response = self._api_server_session.get(url, headers=headers,
- params=query_params)
- #print 'Sending Request URL: ' + pformat(url)
- #print ' Headers: ' + pformat(headers)
- #print ' QParams: ' + pformat(query_params)
- #response = self._api_server_session.get(url, headers = headers,
- # params = query_params)
- #print 'Received Response: ' + pformat(response.text)
- return (response.status_code, response.text)
- #end _http_get
-
- def _http_post(self, uri, body, headers):
- url = "http://%s:%s%s" \
- % (self._web_host, self._web_port, uri)
- response = self._api_server_session.post(url, data=body,
- headers=headers)
- return (response.status_code, response.text)
- #end _http_post
-
- def _http_delete(self, uri, body, headers):
- url = "http://%s:%s%s" \
- % (self._web_host, self._web_port, uri)
- response = self._api_server_session.delete(url, data=body,
- headers=headers)
- return (response.status_code, response.text)
- #end _http_delete
-
- def _http_put(self, uri, body, headers):
- url = "http://%s:%s%s" \
- % (self._web_host, self._web_port, uri)
- response = self._api_server_session.put(url, data=body,
- headers=headers)
- return (response.status_code, response.text)
- #end _http_delete
-
- def _parse_homepage(self, json_body):
- py_obj = json.loads(json_body)
-
- srv_root_url = py_obj['href']
- self._srv_root_url = srv_root_url
-
- for link in py_obj['links']:
- # strip base from *_url to get *_uri
- uri = link['link']['href'].replace(srv_root_url, '')
- if link['link']['rel'] == 'collection':
- class_name = "%s" % (utils.CamelCase(link['link']['name']))
- cls = str_to_class(class_name)
- if not cls:
- continue
- cls.create_uri = uri
- elif link['link']['rel'] == 'resource-base':
- class_name = "%s" % (utils.CamelCase(link['link']['name']))
- cls = str_to_class(class_name)
- if not cls:
- continue
- resource_type = link['link']['name']
- cls.resource_uri_base[resource_type] = uri
- elif link['link']['rel'] == 'action':
- act_type = link['link']['name']
- self._action_uri[act_type] = uri
- #end _parse_homepage
-
- def _find_url(self, json_body, resource_name):
- rname = unicode(resource_name)
- py_obj = json.loads(json_body)
- pprint.pprint(py_obj)
- for link in py_obj['links']:
- if link['link']['name'] == rname:
- return link['link']['href']
-
- return None
- #end _find_url
-
- def _read_args_to_id(self, obj_type, fq_name=None, fq_name_str=None,
- id=None, ifmap_id=None):
- arg_count = ((fq_name is not None) + (fq_name_str is not None) +
- (id is not None) + (ifmap_id is not None))
-
- if (arg_count == 0):
- return (False, "at least one of the arguments has to be provided")
- elif (arg_count > 1):
- return (False, "only one of the arguments should be provided")
-
- if id:
- return (True, id)
- if fq_name:
- return (True, self.fq_name_to_id(obj_type, fq_name))
- if fq_name_str:
- return (True, self.fq_name_to_id(obj_type, fq_name_str.split(':')))
- if ifmap_id:
- return (True, self.ifmap_to_id(ifmap_id))
- #end _read_args_to_id
-
- def _request_server(self, op, url, data=None, retry_on_error=True,
- retry_after_authn=False, retry_count=30):
- if not hasattr(self, '_cfg_root_url'):
- homepage = self._request(rest.OP_GET, self._base_url,
- retry_on_error=False)
- self._cfg_root_url = self._parse_homepage(homepage)
-
- return self._request(op, url, data=data, retry_on_error=retry_on_error,
- retry_after_authn=retry_after_authn,
- retry_count=retry_count)
-
- def _request(self, op, url, data=None, retry_on_error=True,
- retry_after_authn=False, retry_count=30):
- retried = 0
- while True:
- try:
- if (op == rest.OP_GET):
- (status, content) = self._http_get(url, headers=self._headers,
- query_params=data)
- elif (op == rest.OP_POST):
- (status, content) = self._http_post(url, body=data,
- headers=self._headers)
- elif (op == rest.OP_DELETE):
- (status, content) = self._http_delete(url, body=data,
- headers=self._headers)
- elif (op == rest.OP_PUT):
- (status, content) = self._http_put(url, body=data,
- headers=self._headers)
- else:
- raise ValueError
- except ConnectionError:
- if not retry_on_error:
- raise ConnectionError
-
- time.sleep(1)
- self._create_api_server_session()
- continue
-
- if status == 200:
- return content
-
- # Exception Response, see if it can be resolved
- if ((status == 401) and (not self._auth_token_input) and (not retry_after_authn)):
- self._headers = self._authenticate(content, self._headers)
- # Recursive call after authentication (max 1 level)
- content = self._request(op, url, data=data, retry_after_authn=True)
-
- return content
- elif status == 404:
- raise NoIdError('Error: oper %s url %s body %s response %s'
- % (op, url, data, content))
- elif status == 403:
- raise PermissionDenied(content)
- elif status == 409:
- raise RefsExistError(content)
- elif status == 504:
- # Request sent to API server, but no response came within 50s
- raise TimeOutError('Gateway Timeout 504')
- elif status in [502, 503]:
- # 502: API server died after accepting request, so retry
- # 503: no API server available even before sending the request
- retried += 1
- if retried >= retry_count:
- raise ServiceUnavailableError('Service Unavailable Timeout %d' % status)
-
- time.sleep(1)
- continue
- elif status == 400:
- raise BadRequest(status, content)
- else: # Unknown Error
- raise HttpError(status, content)
- # end while True
-
- #end _request_server
-
- def ref_update(self, obj_type, obj_uuid, ref_type, ref_uuid, ref_fq_name, operation, attr=None):
- if ref_type.endswith('_refs'):
- ref_type = ref_type[:-5].replace('_', '-')
- json_body = json.dumps({'type': obj_type, 'uuid': obj_uuid,
- 'ref-type': ref_type, 'ref-uuid': ref_uuid,
- 'ref-fq-name': ref_fq_name,
- 'operation': operation, 'attr': attr},
- default=self._obj_serializer_diff)
- uri = self._action_uri['ref-update']
- try:
- content = self._request_server(rest.OP_POST, uri, data=json_body)
- except HttpError as he:
- if he.status_code == 404:
- return None
- raise he
-
- return json.loads(content)['uuid']
- #end ref_update
-
- def obj_to_id(self, obj):
- return self.fq_name_to_id(obj.get_type(), obj.get_fq_name())
- #end obj_to_id
-
- def fq_name_to_id(self, obj_type, fq_name):
- json_body = json.dumps({'type': obj_type, 'fq_name': fq_name})
- uri = self._action_uri['name-to-id']
- try:
- content = self._request_server(rest.OP_POST, uri, data=json_body)
- except HttpError as he:
- if he.status_code == 404:
- return None
- raise he
-
- return json.loads(content)['uuid']
- #end fq_name_to_id
-
- def id_to_fq_name(self, id):
- json_body = json.dumps({'uuid': id})
- uri = self._action_uri['id-to-name']
- content = self._request_server(rest.OP_POST, uri, data=json_body)
-
- return json.loads(content)['fq_name']
- #end id_to_fq_name
-
- def id_to_fq_name_type(self, id):
- json_body = json.dumps({'uuid': id})
- uri = self._action_uri['id-to-name']
- content = self._request_server(rest.OP_POST, uri, data=json_body)
-
- json_rsp = json.loads(content)
- return (json_rsp['fq_name'], json_rsp['type'])
-
- # This is required only for helping ifmap-subscribers using rest publish
- def ifmap_to_id(self, ifmap_id):
- json_body = json.dumps({'ifmap_id': ifmap_id})
- uri = self._action_uri['ifmap-to-id']
- try:
- content = self._request_server(rest.OP_POST, uri, data=json_body)
- except HttpError as he:
- if he.status_code == 404:
- return None
-
- return json.loads(content)['uuid']
- #end ifmap_to_id
-
- def obj_to_json(self, obj):
- return json.dumps(obj, default=self._obj_serializer_all)
- # end obj_to_json
-
- def obj_to_dict(self, obj):
- return json.loads(self.obj_to_json(obj))
- # end obj_to_dict
-
- def fetch_records(self):
- json_body = json.dumps({'fetch_records': None})
- uri = self._action_uri['fetch-records']
- content = self._request_server(rest.OP_POST, uri, data=json_body)
-
- return json.loads(content)['results']
- #end fetch_records
-
- def restore_config(self, create, resource, json_body):
- class_name = "%s" % (utils.CamelCase(resource))
- cls = str_to_class(class_name)
- if not cls:
- return None
-
- if create:
- uri = cls.create_uri
- content = self._request_server(rest.OP_POST, uri, data=json_body)
- else:
- obj_dict = json.loads(json_body)
- uri = cls.resource_uri_base[resource] + '/'
- uri += obj_dict[resource]['uuid']
- content = self._request_server(rest.OP_PUT, uri, data=json_body)
-
- return json.loads(content)
- #end restore_config
-
- def kv_store(self, key, value):
- # TODO move oper value to common
- json_body = json.dumps({'operation': 'STORE',
- 'key': key,
- 'value': value})
- uri = self._action_uri['useragent-keyvalue']
- self._request_server(rest.OP_POST, uri, data=json_body)
- #end kv_store
-
- def kv_retrieve(self, key=None):
- # if key is None, entire collection is retrieved, use with caution!
- # TODO move oper value to common
- json_body = json.dumps({'operation': 'RETRIEVE',
- 'key': key})
- uri = self._action_uri['useragent-keyvalue']
- content = self._request_server(rest.OP_POST, uri, data=json_body)
-
- return json.loads(content)['value']
- #end kv_retrieve
-
- def kv_delete(self, key):
- # TODO move oper value to common
- json_body = json.dumps({'operation': 'DELETE',
- 'key': key})
- uri = self._action_uri['useragent-keyvalue']
- self._request_server(rest.OP_POST, uri, data=json_body)
- #end kv_delete
-
- # reserve block of IP address from a VN
- # expected format {"subnet" : "2.1.1.0/24", "count" : 4}
- def virtual_network_ip_alloc(self, vnobj, count=1, subnet=None):
- json_body = json.dumps({'count': count, 'subnet': subnet})
- uri = self._action_uri['virtual-network-ip-alloc'] % vnobj.uuid
- content = self._request_server(rest.OP_POST, uri, data=json_body)
- return json.loads(content)['ip_addr']
- #end virtual_network_ip_alloc
-
- # free previously reserved block of IP address from a VN
- # Expected format "subnet" : "2.1.1.0/24",
- # "ip_addr" : ["2.1.1.239", "2.1.1.238"]
- def virtual_network_ip_free(self, vnobj, ip_list, subnet=None):
- json_body = json.dumps({'ip_addr': ip_list, 'subnet': subnet})
- uri = self._action_uri['virtual-network-ip-free'] % vnobj.uuid
- rv = self._request_server(rest.OP_POST, uri, data=json_body)
- return rv
- #end virtual_network_ip_free
-
- # return no of ip instances from a given VN/Subnet
- # Expected format "subne_list" : ["2.1.1.0/24", "2.2.2.0/24"]
- def virtual_network_subnet_ip_count(self, vnobj, subnet_list):
- json_body = json.dumps({'subnet_list': subnet_list})
- uri = self._action_uri['virtual-network-subnet-ip-count'] % vnobj.uuid
- rv = self._request_server(rest.OP_POST, uri, data=json_body)
- return rv
- #end virtual_network_subnet_ip_count
-
- def get_auth_token(self):
- if self._auth_token:
- return self._auth_token
- self._headers = self._authenticate(headers=self._headers)
- return self._auth_token
-
- #end get_auth_token
-
- def resource_list(self, obj_type, parent_id=None, parent_fq_name=None,
- back_ref_id=None, obj_uuids=None, fields=None,
- detail=False, count=False, filters=None):
- if not obj_type:
- raise ResourceTypeUnknownError(obj_type)
-
- class_name = "%s" % (utils.CamelCase(obj_type))
- obj_class = str_to_class(class_name)
- if not obj_class:
- raise ResourceTypeUnknownError(obj_type)
-
- query_params = {}
- do_post_for_list = False
-
- if parent_fq_name:
- parent_fq_name_str = ':'.join(parent_fq_name)
- query_params['parent_fq_name_str'] = parent_fq_name_str
- elif parent_id:
- if isinstance(parent_id, list):
- query_params['parent_id'] = ','.join(parent_id)
- if len(parent_id) > self.POST_FOR_LIST_THRESHOLD:
- do_post_for_list = True
- else:
- query_params['parent_id'] = parent_id
-
- if back_ref_id:
- if isinstance(back_ref_id, list):
- query_params['back_ref_id'] = ','.join(back_ref_id)
- if len(back_ref_id) > self.POST_FOR_LIST_THRESHOLD:
- do_post_for_list = True
- else:
- query_params['back_ref_id'] = back_ref_id
-
- if obj_uuids:
- comma_sep_obj_uuids = ','.join(u for u in obj_uuids)
- query_params['obj_uuids'] = comma_sep_obj_uuids
- if len(obj_uuids) > self.POST_FOR_LIST_THRESHOLD:
- do_post_for_list = True
-
- if fields:
- comma_sep_fields = ','.join(f for f in fields)
- query_params['fields'] = comma_sep_fields
-
- query_params['detail'] = detail
-
- query_params['count'] = count
-
- if filters:
- query_params['filters'] = ','.join(
- '%s==%s' %(k,json.dumps(v)) for k,v in filters.items())
-
- if do_post_for_list:
- uri = self._action_uri.get('list-bulk-collection')
- if not uri:
- raise
-
- # use same keys as in GET with additional 'type'
- query_params['type'] = obj_type
- json_body = json.dumps(query_params)
- content = self._request_server(rest.OP_POST,
- uri, json_body)
- else: # GET /<collection>
- try:
- content = self._request_server(rest.OP_GET,
- obj_class.create_uri,
- data = query_params)
- except NoIdError:
- # dont allow NoIdError propagate to user
- return []
-
- if not detail:
- return json.loads(content)
-
- resource_dicts = json.loads(content)['%ss' %(obj_type)]
- resource_objs = []
- for resource_dict in resource_dicts:
- obj_dict = resource_dict['%s' %(obj_type)]
- resource_obj = obj_class.from_dict(**obj_dict)
- resource_obj.clear_pending_updates()
- resource_obj.set_server_conn(self)
- resource_objs.append(resource_obj)
-
- return resource_objs
- #end resource_list
-
-#end class VncApi
diff --git a/Testcases/vnc_api/vnc_api.pyc b/Testcases/vnc_api/vnc_api.pyc
deleted file mode 100644
index e017072..0000000
--- a/Testcases/vnc_api/vnc_api.pyc
+++ /dev/null
Binary files differ